/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Isochronous IXL Compiler.
* The compiler converts the general hardware independent IXL command
* blocks into OpenHCI DMA descriptors.
*/
#include <sys/tnf_probe.h>
/* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
/* invalid opcode */
/*
* maximum number of interrupts permitted for a single context in which
* the context does not advance to the next DMA descriptor. Interrupts are
* triggered by 1) hardware completing a DMA descriptor block which has the
* interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
* interrupt. Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
* returned.
*/
/*
* FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
* Receive Only: Transmit Only:
* IXL1394_OP_RECV_PKT_ST IXL1394_OP_SEND_PKT_WHDR_ST
* IXL1394_OP_RECV_PKT IXL1394_OP_SEND_PKT_ST
* IXL1394_OP_RECV_BUF IXL1394_OP_SEND_PKT
* IXL1394_OP_SET_SYNCWAIT IXL1394_OP_SEND_BUF
* IXL1394_OP_SEND_HDR_ONLY
* Receive or Transmit: IXL1394_OP_SEND_NO_PKT
* IXL1394_OP_CALLBACK IXL1394_OP_SET_TAGSYNC
* IXL1394_OP_LABEL IXL1394_OP_SET_SKIPMODE
* IXL1394_OP_JUMP IXL1394_OP_STORE_TIMESTAMP
*/
/*
* hci1394_compile_ixl()
* Top level ixl compiler entry point. Scans ixl and builds openHCI 1.0
* descriptor blocks in dma memory.
*/
int
{
/* Initialize compiler working variables */
/*
* First pass:
* Parse ixl commands, building desc blocks, until end of IXL
* linked list.
*/
/*
* Second pass:
* Resolve all generated descriptor block jump and skip addresses.
* Set interrupt enable in descriptor blocks which have callback
* operations in their execution scope. (Previously store_timesamp
* operations were counted also.) Set interrupt enable in descriptor
* blocks which were introduced by an ixl label command.
*/
if (wv.dma_bld_error == 0) {
}
/* Endup: finalize and cleanup ixl compile, return result */
if (*resultp != 0) {
return (DDI_FAILURE);
} else {
return (DDI_SUCCESS);
}
}
/*
* hci1394_compile_ixl_init()
* Initialize the isoch context structure associated with the IXL
* program, and initialize the temporary working variables structure.
*/
static void
{
ctxtp->dma_last_time = 0;
ctxtp->ixl_exec_depth = 0;
/*
* the context's max_noadv_intrs is set here instead of in isoch init
* because the default is patchable and would only be picked up this way
*/
/* init working variables */
wvp->dma_bld_error = 0;
wvp->xfer_pktlen = 0;
wvp->xfer_bufcnt = 0;
wvp->descriptors = 0;
/* START RECV ONLY SECTION */
wvp->ixl_setsyncwait_cnt = 0;
/* START XMIT ONLY SECTION */
wvp->storevalue_data = 0;
wvp->xmit_pkthdr1 = 0;
wvp->xmit_pkthdr2 = 0;
/* END XMIT ONLY SECTION */
}
/*
* hci1394_compile_ixl_endup()
* This routine is called just before the main hci1394_compile_ixl() exits.
* It checks for errors and performs the appropriate cleanup, or it rolls any
* relevant info from the working variables struct into the context structure
*/
static void
{
int err;
/* error if no descriptor blocks found in ixl & created in dma memory */
"IXL1394_ENO_DATA_PKTS: prog has no data packets");
}
/* if no errors yet, find the first IXL command that's a transfer cmd */
if (wvp->dma_bld_error == 0) {
NULL, &ixl_exec_stp);
/* error if a label<->jump loop, or no xfer */
"IXL1394_ENO_DATA_PKTS: loop or no xfer detected");
}
}
/* Sync all the DMA descriptor buffers */
if (err != DDI_SUCCESS) {
"IXL1394_INTERNAL_ERROR: dma_sync() failed");
break;
}
/* advance to next dma memory descriptor */
}
/*
* If error, cleanup and return. delete all allocated xfer_ctl structs
* and all dma descriptor page memory and its dma memory blocks too.
*/
if (wvp->dma_bld_error != 0) {
return;
}
/* can only get to here if the first ixl transfer command is found */
/* set required processing vars into ctxtp struct */
/*
* the transfer command's compiler private xfer_ctl structure has the
* appropriate bound address
*/
/* compile done */
}
/*
* hci1394_parse_ixl()
* Scan IXL program and build ohci DMA descriptor blocks in dma memory.
*
* reached. Evaluate ixl syntax and build (xmit or recv) descriptor
* blocks. To aid execution time evaluation of current location, enable
* status recording on each descriptor block built.
* On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
*/
static void
{
"");
/* follow ixl links until reach end or find error */
/* set this command as the current ixl command */
/* init compiler controlled values in current ixl command */
ixlcurp->compiler_resv = 0;
((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
/* check if command op failed because it was invalid */
errmsg, "IXL1394_BAD_IXL_OPCODE",
} else {
errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
}
continue;
}
/*
* if ends xfer flag set, finalize current xfer descriptor
* block build
*/
if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
/* finalize any descriptor block build in progress */
if (wvp->dma_bld_error != 0) {
continue;
}
}
/*
* now process based on specific opcode value
*/
switch (ixlopcode) {
case IXL1394_OP_RECV_BUF:
case IXL1394_OP_RECV_BUF_U: {
/*
* In packet-per-buffer mode:
* This ixl command builds a collection of xfer
* recv a packet whose buffer size is pkt_size and
* whose buffer ptr is (pktcur*pkt_size + bufp)
*
* In buffer fill mode:
* This ixl command builds a single xfer descriptor
* block to recv as many packets or parts of packets
* as can fit into the buffer size specified
* (pkt_size is not used).
*/
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
/*
* perform packet-per-buffer checks
* (no checks needed when in buffer fill mode)
*/
/* the packets must use the buffer exactly */
pktcnt = 0;
if (pktsize != 0) {
}
cur_xfer_buf_ixlp->size)) {
"IXL1394_EPKTSIZE_RATIO", tnf_int,
wvp->dma_bld_error =
continue;
}
}
/*
* set buffer pointer & size into first xfer_bufp
* and xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
break;
}
case IXL1394_OP_RECV_PKT_ST:
case IXL1394_OP_RECV_PKT_ST_U: {
/* error if in buffer fill mode */
errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
"RECV_PKT_ST used in BFFILL mode");
continue;
}
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
/*
* set buffer pointer & size into first xfer_bufp
* and xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
break;
}
case IXL1394_OP_RECV_PKT:
case IXL1394_OP_RECV_PKT_U: {
/* error if in buffer fill mode */
errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
"RECV_PKT_ST used in BFFILL mode");
continue;
}
/* error if xfer_state not xfer pkt */
errmsg, "IXL1394_EMISPLACED_RECV: "
"RECV_PKT without RECV_PKT_ST");
continue;
}
/*
* save xfer start cmd ixl ptr in compiler_privatep
* field of this cmd
*/
ixlcurp->compiler_privatep = (void *)
/*
* save pkt index [1-n] in compiler_resv field of
* this cmd
*/
/*
* set buffer pointer & size into next xfer_bufp
* and xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
/*
* set updateable xfer cache flush eval flag if
* updateable opcode
*/
if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
}
break;
}
case IXL1394_OP_SEND_BUF:
case IXL1394_OP_SEND_BUF_U: {
/*
* These send_buf commands build a collection of xmit
* xfer a packet whose buffer size is pkt_size and whose
* buffer pt is (pktcur*pkt_size + bufp). (ptr and size
* are adjusted if they have header form of ixl cmd)
*/
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
/* the packets must use the buffer exactly,else error */
pktcnt = 0;
if (pktsize != 0) {
}
cur_xfer_buf_ixlp->size)) {
continue;
}
/* set buf ptr & size into 1st xfer_bufp & xfer_size */
/* wvp->dma_bld_error is set by above call */
continue;
}
break;
}
case IXL1394_OP_SEND_PKT_ST:
case IXL1394_OP_SEND_PKT_ST_U: {
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
/*
* set buffer pointer & size into first xfer_bufp and
* xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
break;
}
case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
/*
* buffer size must be at least 4 (must include header),
* else error
*/
continue;
}
/*
* set buffer and size(excluding header) into first
* xfer_bufp and xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
break;
}
case IXL1394_OP_SEND_PKT:
case IXL1394_OP_SEND_PKT_U: {
/* error if xfer_state not xfer pkt */
errmsg, "IXL1394_EMISPLACED_SEND: SEND_PKT "
"without SEND_PKT_ST");
continue;
}
/*
* save xfer start cmd ixl ptr in compiler_privatep
* field of this cmd
*/
ixlcurp->compiler_privatep = (void *)
/*
* save pkt index [1-n] in compiler_resv field of this
* cmd
*/
/*
* set buffer pointer & size into next xfer_bufp
* and xfer_size
*/
/* wvp->dma_bld_error is set by above call */
continue;
}
/*
* set updateable xfer cache flush eval flag if
* updateable opcode
*/
if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
}
break;
}
case IXL1394_OP_SEND_HDR_ONLY:
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
break;
case IXL1394_OP_SEND_NO_PKT:
/* set xfer_state for new descriptor block build */
/* set this ixl command as current xferstart command */
break;
case IXL1394_OP_JUMP:
case IXL1394_OP_JUMP_U: {
/*
* verify label indicated by IXL1394_OP_JUMP is
* actually an IXL1394_OP_LABEL or NULL
*/
IXL1394_OP_LABEL)) {
errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
continue;
}
break;
}
case IXL1394_OP_LABEL:
/*
* save current ixl label command for xfer cmd
* finalize processing
*/
/* set initiating label flag to cause cache flush */
break;
case IXL1394_OP_CALLBACK:
case IXL1394_OP_CALLBACK_U:
/*
* these commands are accepted during compile,
* processed during execution (interrupt handling)
* No further processing is needed here.
*/
break;
case IXL1394_OP_SET_SKIPMODE:
/*
* Error if already have a set skipmode cmd for
* this xfer
*/
errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
" duplicate set skipmode", tnf_opaque,
continue;
}
/* save skip mode ixl command and verify skipmode */
continue;
}
/*
* if mode is IXL1394_SKIP_TO_LABEL, verify label
* references an IXL1394_OP_LABEL
*/
IXL1394_OP_LABEL))) {
errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
continue;
}
/*
* set updateable set cmd cache flush eval flag if
* updateable opcode
*/
if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
}
break;
case IXL1394_OP_SET_TAGSYNC:
case IXL1394_OP_SET_TAGSYNC_U:
/*
* is an error if already have a set tag and sync cmd
* for this xfer
*/
errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
" duplicate set tagsync", tnf_opaque,
continue;
}
/* save ixl command containing tag and sync values */
/*
* set updateable set cmd cache flush eval flag if
* updateable opcode
*/
if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
}
break;
case IXL1394_OP_SET_SYNCWAIT:
/*
* count ixl wait-for-sync commands since last
* finalize ignore multiple occurrences for same xfer
* command
*/
break;
default:
/* error - unknown/unimplemented ixl command */
continue;
}
} /* while */
/* finalize any last descriptor block build */
if (wvp->dma_bld_error == 0) {
}
}
/*
* hci1394_finalize_all_xfer_desc()
* Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
*
* Set interrupt enable on first descriptor block associated with current
* xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
*
* Set interrupt enable on last descriptor block associated with current xfer
* IXL command if any callback ixl commands are found on the execution path
* between the current and the next xfer ixl command. (Previously, this
* applied to store timestamp ixl commands, as well.)
*/
static void
{
int ii;
int err;
/*
* If xmit mode and if default skipmode is skip to label -
* follow exec path starting at default skipmode label until
* find the first ixl xfer command which is to be executed.
* Set its address into default_skipxferp.
*/
if (err == DDI_FAILURE) {
"IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
"for skiplabel default w/no xfers", tnf_opaque,
return;
}
}
/* set first ixl cmd */
/* follow ixl links until reach end or find error */
/* set this command as the current ixl command */
/* get command opcode removing unneeded update flag */
/*
* Scan for next ixl xfer start command (including this one),
* along ixl link path. Once xfer command found, find next IXL
* xfer cmd along execution path and fill in branch address of
* current xfer command. If is composite ixl xfer command, first
* link forward branch dma addresses of each descriptor block in
* composite, until reach final one then set its branch address
* to next execution path xfer found. Next determine skip mode
* and fill in skip address(es) appropriately.
*/
/* skip to next if not xfer start ixl command */
if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
continue;
}
/*
* get xfer_ctl structure and composite repeat count for current
* IXL xfer cmd
*/
/*
* if initiated by an IXL label command, set interrupt enable
* flag into last component of first descriptor block of
* current IXL xfer cmd
*/
hcidescp = (hci1394_desc_t *)
temp |= DESC_INTR_ENBL;
}
/* find next xfer IXL cmd by following execution path */
&callback_cnt, &ixlexecnext);
/* if label<->jump loop detected, return error */
if (err == DDI_FAILURE) {
"IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
continue;
}
/* link current IXL's xfer_ctl to next xfer IXL on exec path */
/*
* if callbacks have been seen during execution path scan,
* set interrupt enable flag into last descriptor of last
* descriptor block of current IXL xfer cmd
*/
if (callback_cnt != 0) {
hcidescp = (hci1394_desc_t *)
acc_hdl =
temp |= DESC_INTR_ENBL;
}
/*
* obtain dma bound addr of next exec path IXL xfer command,
* if any
*/
dma_execnext_addr = 0;
if (ixlexecnext != NULL) {
} else {
/*
* If this is last descriptor (next == NULL), then
* make sure the interrupt bit is enabled. This
* way we can ensure that we are notified when the
* descriptor chain processing has come to an end.
*/
hcidescp = (hci1394_desc_t *)
acc_hdl =
temp |= DESC_INTR_ENBL;
}
/*
* set jump address of final cur IXL xfer cmd to addr next
* IXL xfer cmd
*/
hcidescp = (hci1394_desc_t *)
/*
* if a composite object, forward link initial jump
* dma addresses
*/
hcidescp = (hci1394_desc_t *)
}
/*
* fill in skip address(es) for all descriptor blocks belonging
* to current IXL xfer command; note:skip addresses apply only
* to xmit mode commands
*/
if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
/* first obtain and set skip mode information */
/*
* if skip to label,init dma bound addr to be
* 1st xfer cmd after label
*/
dma_skiplabel_addr = 0;
}
/*
* set skip addrs for each descriptor blk at this
* xfer start IXL cmd
*/
case IXL1394_SKIP_TO_LABEL:
/* set dma bound address - label */
break;
case IXL1394_SKIP_TO_NEXT:
/* set dma bound address - next */
} else {
}
break;
case IXL1394_SKIP_TO_SELF:
/* set dma bound address - self */
break;
case IXL1394_SKIP_TO_STOP:
default:
/* set dma bound address - stop */
dma_skip_addr = 0;
break;
}
/*
* determine address of first descriptor of
* current descriptor block by adjusting addr of
* last descriptor of current descriptor block
*/
hcidescp = ((hci1394_desc_t *)
acc_hdl =
/*
* adjust by count of descriptors in this desc
* block not including the last one (size of
* descriptor)
*/
DESC_Z_MASK) - 1);
/*
* adjust further if the last descriptor is
* double sized
*/
if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
hcidescp++;
}
/*
* now set skip address into first descriptor
* of descriptor block
*/
} /* for */
} /* if */
} /* while */
}
/*
* hci1394_finalize_cur_xfer_desc()
* Build the openHCI descriptor for a packet or buffer based on info
* currently collected into the working vars struct (wvp). After some
* checks, this routine dispatches to the appropriate descriptor block
* build (bld) routine for the packet or buf type.
*/
static void
{
/* extract opcode from current IXL cmd (if any) */
} else {
}
/*
* if no xfer descriptor block being built, perform validity checks
*/
/*
* error if being finalized by IXL1394_OP_LABEL or
* IXL1394_OP_JUMP or if at end, and have an unapplied
* IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
* IXL1394_OP_SET_SYNCWAIT
*/
if ((ixlopraw == IXL1394_OP_JUMP) ||
(ixlopraw == IXL1394_OP_LABEL) ||
(wvp->ixl_setsyncwait_cnt != 0)) {
errmsg, "IXL1394_UNAPPLIED_SET_CMD: "
"orphaned set (no associated packet)",
wvp->ixl_cur_cmdp);
return;
}
}
/* error if finalize is due to updateable jump cmd */
if (ixlopcode == IXL1394_OP_JUMP_U) {
return;
}
/* no error, no xfer */
return;
}
/*
* finalize current xfer descriptor block being built
*/
/* count IXL xfer start command for descriptor block being built */
wvp->ixl_xfer_st_cnt++;
/*
* complete setting of cache flush evaluation flags; flags will already
* have been set by updateable set cmds and non-start xfer pkt cmds
*/
/* now set cache flush flag if current xfer start cmnd is updateable */
}
/*
* also set cache flush flag if xfer being finalized by
* updateable jump cmd
*/
if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
}
/*
* Determine if cache flush required before building next descriptor
* block. If xfer pkt command and any cache flush flags are set,
* hci flush needed.
* If buffer or special xfer command and xfer command is updateable or
* an associated set command is updateable, hci flush is required now.
* If a single-xfer buffer or special xfer command is finalized by
* updateable jump command, hci flush is required now.
* Note: a cache flush will be required later, before the last
* descriptor block of a multi-xfer set of descriptor blocks is built,
* if this (non-pkt) xfer is finalized by an updateable jump command.
*/
if (wvp->xfer_hci_flush != 0) {
0)) {
/* wvp->dma_bld_error is set by above call */
return;
}
}
}
/*
* determine which kind of descriptor block to build based on
* xfer state - hdr only, skip cycle, pkt or buf.
*/
switch (wvp->xfer_state) {
case XFER_PKT:
} else {
}
break;
case XFER_BUF:
} else {
}
} else {
}
break;
case XMIT_HDRONLY:
case XMIT_NOPKT:
break;
default:
/* internal compiler error */
"IXL1394_INTERNAL_ERROR: invalid state", tnf_opaque,
}
/* return if error */
if (wvp->dma_bld_error != 0) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* if was finalizing IXL jump cmd, set compiler_privatep to
* cur xfer IXL cmd
*/
if (ixlopraw == IXL1394_OP_JUMP) {
(void *)wvp->ixl_cur_xfer_stp;
}
/* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
((hci1394_xfer_ctl_t *)
}
/*
* set any associated IXL set skipmode cmd into xfer_ctl of
* cur xfer IXL cmd
*/
((hci1394_xfer_ctl_t *)
}
/* set no current xfer start cmd */
/* set no current set tag&sync, set skipmode or set syncwait commands */
wvp->ixl_setsyncwait_cnt = 0;
/* set no currently active descriptor blocks */
wvp->descriptors = 0;
/* reset total packet length and buffers count */
wvp->xfer_pktlen = 0;
wvp->xfer_bufcnt = 0;
/* reset flush cache evaluation flags */
wvp->xfer_hci_flush = 0;
/* set no xmit descriptor block being built */
}
/*
* hci1394_bld_recv_pkt_desc()
* Used to create the openHCI dma descriptor block(s) for a receive packet.
*/
static void
{
/*
* is error if number of descriptors to be built exceeds maximum
* descriptors allowed in a descriptor block.
*/
return;
}
/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
"IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer cmd
*/
/*
* if enabled, set wait for sync flag in first descriptor of
* descriptor block
*/
if (wvp->ixl_setsyncwait_cnt > 0) {
} else {
}
/* create descriptor block for this recv packet (xfer status enabled) */
} else {
}
wvp->descriptors++;
}
/* allocate and copy descriptor block to dma memory */
DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above function call */
return;
}
/*
* set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
* is last component)
*/
}
/*
* hci1394_bld_recv_buf_ppb_desc()
* Used to create the openHCI dma descriptor block(s) for a receive buf
* in packet per buffer mode.
*/
static void
{
/* determine number and size of pkt desc blocks to create */
/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
"IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer cmd
*/
/*
* if enabled, set wait for sync flag in first descriptor in
* descriptor block
*/
if (wvp->ixl_setsyncwait_cnt > 0) {
} else {
}
/* create first descriptor block for this recv packet */
/* consists of one descriptor and xfer status is enabled */
wvp->descriptors++;
/* useful debug trace info - IXL command, and packet count and size */
/*
* generate as many contiguous descriptor blocks as there are
* recv pkts
*/
/* if about to create last descriptor block */
/* check and perform any required hci cache flush */
DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
}
/* allocate and copy descriptor block to dma memory */
&dma_desc_bound) != DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* set dma addrs into xfer_ctl struct (unbound addr (kernel
* virtual) is last component (descriptor))
*/
/* advance buffer ptr by pktsize in descriptor block */
}
}
/*
* hci1394_bld_recv_buf_fill_desc()
* Used to create the openHCI dma descriptor block(s) for a receive buf
* in buffer fill mode.
*/
static void
{
/* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
"IXL1394_EMEM_ALLOC_FAIL: xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer cmd
*/
/*
* if enabled, set wait for sync flag in first descriptor of
* descriptor block
*/
if (wvp->ixl_setsyncwait_cnt > 0) {
} else {
}
/*
* create descriptor block for this buffer fill mode recv command which
* consists of one descriptor with xfer status enabled
*/
wvp->descriptors++;
/* check and perform any required hci cache flush */
/* wvp->dma_bld_error is set by above call */
return;
}
/* allocate and copy descriptor block to dma memory */
!= DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
* is last component.
*/
}
/*
* hci1394_bld_xmit_pkt_desc()
* Used to create the openHCI dma descriptor block(s) for a transmit packet.
*/
static void
{
/*
* is error if number of descriptors to be built exceeds maximum
* descriptors allowed in a descriptor block. Add 2 for the overhead
* of the OMORE-Immediate.
*/
return;
}
/* is error if total packet length exceeds 0xFFFF */
wvp->xfer_pktlen);
return;
}
/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
"IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer cmd
*/
/* generate values for the xmit pkt hdrs */
/*
* xmit pkt starts with an output more immediate,
* a double sized hci1394_desc
*/
wv_omi_descp->data_addr = 0;
wv_omi_descp->branch = 0;
wv_omi_descp->status = 0;
wv_omi_descp->q3 = 0;
wv_omi_descp->q4 = 0;
/*
* create the required output more hci1394_desc descriptor, then create
* an output last hci1394_desc descriptor with xfer status enabled
*/
} else {
}
wvp->descriptors++;
}
/* allocate and copy descriptor block to dma memory */
DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
* is last component (descriptor))
*/
}
/*
* hci1394_bld_xmit_buf_desc()
* Used to create the openHCI dma descriptor blocks for a transmit buffer.
*/
static void
{
/* determine number and size of pkt desc blocks to create */
/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
"IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer cmd
*/
/* generate values for the xmit pkt hdrs */
/*
* xmit pkt starts with an output more immediate,
* a double sized hci1394_desc
*/
wv_omi_descp->data_addr = 0;
wv_omi_descp->branch = 0;
wv_omi_descp->status = 0;
wv_omi_descp->q3 = 0;
wv_omi_descp->q4 = 0;
pktsize);
wvp->descriptors++;
/*
* generate as many contiguous descriptor blocks as there are
* xmit packets
*/
/* if about to create last descriptor block */
/* check and perform any required hci cache flush */
DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
}
/* allocate and copy descriptor block to dma memory */
&dma_desc_bound) != DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* set dma addrs into xfer_ctl structure (unbound addr
* (kernel virtual) is last component (descriptor))
*/
sizeof (hci1394_desc_t);
/* advance buffer ptr by pktsize in descriptor block */
}
}
/*
* hci1394_bld_xmit_hdronly_nopkt_desc()
* Used to create the openHCI dma descriptor blocks for transmitting
* a packet consisting of an isochronous header with no data payload,
* or for not sending a packet at all for a cycle.
*
* A Store_Value openhci descriptor is built at the start of each
* IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
* descriptor block (to allow for skip cycle specification and set skipmode
* processing for these commands).
*/
static void
{
/*
* allocate an xfer_ctl structure which includes repcnt
* xfer_ctl_dma structs
*/
"IXL EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
return;
}
/*
* save xfer_ctl struct addr in compiler_privatep of
* current IXL xfer command
*/
/*
* create a storevalue descriptor
* (will be used for skip vs jump processing)
*/
/*
* processing now based on opcode:
* IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
*/
/* for header only, generate values for the xmit pkt hdrs */
/*
* create an output last immediate (double sized) descriptor
* xfer status enabled
*/
wv_oli_descp->data_addr = 0;
wv_oli_descp->branch = 0;
wv_oli_descp->status = 0;
wv_oli_descp->q3 = 0;
wv_oli_descp->q4 = 0;
} else {
/*
* for skip cycle, create a single output last descriptor
* with xfer status enabled
*/
DESC_INTR_DSABL, 0);
wv_ol_descp->data_addr = 0;
wv_ol_descp->branch = 0;
wv_ol_descp->status = 0;
wvp->descriptors++;
}
/*
* generate as many contiguous descriptor blocks as repeat count
* indicates
*/
/* if about to create last descriptor block */
/* check and perform any required hci cache flush */
DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
}
/* allocate and copy descriptor block to dma memory */
&dma_desc_bound) != DDI_SUCCESS) {
/* wvp->dma_bld_error is set by above call */
return;
}
/*
* set dma addrs into xfer_ctl structure (unbound addr
* (kernel virtual) is last component (descriptor)
*/
}
}
/*
* hci1394_bld_dma_mem_desc_blk()
* Used to put a given OpenHCI descriptor block into dma bound memory.
*/
static int
{
/* set internal error if no descriptor blocks to build */
if (wvp->descriptors == 0) {
"IXL1394_INTERNAL_ERROR: no descriptors to build");
return (DDI_FAILURE);
}
/* allocate dma memory and move this descriptor block to it */
sizeof (hci1394_desc_t), &dma_bound);
if (*dma_descpp == NULL) {
"IXL1394_EMEM_ALLOC_FAIL: for descriptors");
return (DDI_FAILURE);
}
#ifdef _KERNEL
#else
#endif
/*
* convert allocated block's memory address to bus address space
* include properly set Z bits (descriptor count).
*/
return (DDI_SUCCESS);
}
/*
* hci1394_set_xmit_pkt_hdr()
* Compose the 2 quadlets for the xmit packet header.
*/
static void
{
/*
* choose tag and sync bits for header either from default values or
* from currently active set tag and sync IXL command
* (clear command after use)
*/
} else {
}
/*
* build xmit pkt header -
* hdr1 has speed, tag, channel number and sync bits
* hdr2 has the packet length.
*/
}
/*
* hci1394_set_xmit_skip_mode()
* Set current skip mode from default or from currently active command.
* If non-default skip mode command's skip mode is skip to label, find
* and set xfer start IXL command which follows skip to label into
* compiler_privatep of set skipmode IXL command.
*/
static void
{
int err;
} else {
if (err == DDI_FAILURE) {
errmsg, "IXL1394_ENO_DATA_PKTS: "
"label<->jump loop detected for skiplabel "
}
}
}
}
/*
* hci1394_set_xmit_storevalue_desc()
* Set up store_value DMA descriptor.
* XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
* descriptor in the descriptor block (to handle skip mode processing)
*/
static void
{
wvp->descriptors++;
}
/*
* hci1394_set_next_xfer_buf()
* This routine adds the data buffer to the current wvp list.
* Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
* contains the error code.
*/
static int
{
/* error if buffer pointer is null (size may be 0) */
return (DDI_FAILURE);
}
/* count new xfer buffer */
wvp->xfer_bufcnt++;
/* error if exceeds maximum xfer buffer components allowed */
wvp->xfer_bufcnt);
return (DDI_FAILURE);
}
/* save xmit buffer and size */
/* accumulate total packet length */
return (DDI_SUCCESS);
}
/*
* hci1394_flush_end_desc_check()
* Check if flush required before last descriptor block of a
* non-unary set generated by an xfer buff or xmit special command
* or a unary set provided no other flush has already been done.
*
* hci flush is required if xfer is finalized by an updateable
* jump command.
*
* Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
* will contain the error code.
*/
static int
{
if ((count != 0) ||
INITIATING_LBL)) == 0)) {
/* wvp->dma_bld_error is set by above call */
return (DDI_FAILURE);
}
}
}
return (DDI_SUCCESS);
}
/*
* hci1394_flush_hci_cache()
* Sun hci controller (RIO) implementation specific processing!
*
* Allocate dma memory for 1 hci descriptor block which will be left unused.
* During execution this will cause a break in the contiguous address space
* processing required by Sun's RIO implementation of the ohci controller and
* will require the controller to refetch the next descriptor block from
* host memory.
*
* General rules for cache flush preceeding a descriptor block in dma memory:
* 1. Current IXL Xfer Command Updateable Rule:
* Cache flush of IXL xfer command is required if it, or any of the
* non-start IXL packet xfer commands associated with it, is flagged
* updateable.
* 2. Next IXL Xfer Command Indeterminate Rule:
* Cache flush of IXL xfer command is required if an IXL jump command
* which is flagged updateable has finalized the current IXL xfer
* command.
* 3. Updateable IXL Set Command Rule:
* Cache flush of an IXL xfer command is required if any of the IXL
* "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
* command (i.e. immediately preceeding it), is flagged updateable.
* 4. Label Initiating Xfer Command Rule:
* Cache flush of IXL xfer command is required if it is initiated by a
* label IXL command. (This is to allow both a flush of the cache and
* an interrupt to be generated easily and in close proximity to each
* other. This can make possible simpler more successful reset of
* descriptor statuses, especially under circumstances where the cycle
* through the span of xfers, etc... This is especially important for
* input where statuses must be reset before execution cycles back
* again.
*
* Application of above rules:
* Packet mode IXL xfer commands:
* If any of the above flush rules apply, flush cache should be done
* immediately preceeding the generation of the dma descriptor block
* for the packet xfer.
* Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
* SEND_HDR_ONLY, and SEND_NO_PKT):
* If Rules #1, #3 or #4 applies, a flush cache should be done
* immediately before the first generated dma descriptor block of the
* non-packet xfer.
* If Rule #2 applies, a flush cache should be done immediately before
* the last generated dma descriptor block of the non-packet xfer.
*
* Note: The flush cache should be done at most once in each location that is
* required to be flushed no matter how many rules apply (i.e. only once
* descriptor block generated). If more than one place requires a flush,
* then both flush operations must be performed. This is determined by
* taking all rules that apply into account.
*
* Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
* will contain the error code.
*/
static int
{
NULL) {
"IXL1394_EMEM_ALLOC_FAIL: for flush_hci_cache");
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* hci1394_alloc_storevalue_dma_mem()
* Allocate dma memory for a 1 hci component descriptor block
* which will be used as the dma memory location that ixl
* compiler generated storevalue descriptor commands will
* specify as location to store their data value.
*
* Returns 32-bit bound address of allocated mem, or NULL.
*/
static uint32_t
{
"IXL1394_EMEM_ALLOC_FAIL: for storevalue dma",
return (NULL);
}
/* return bound address of allocated memory */
return (dma_bound);
}
/*
* hci1394_alloc_xfer_ctl()
* Allocate an xfer_ctl structure.
*/
static hci1394_xfer_ctl_t *
{
/*
* allocate an xfer_ctl struct which includes dmacnt of
* xfer_ctl_dma structs
*/
#ifdef _KERNEL
return (NULL);
}
#else
/*
* This section makes it possible to easily run and test the compiler in
* user mode.
*/
sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
return (NULL);
}
#endif
/*
* set dma structure count into allocated xfer_ctl struct for
* later deletion.
*/
/* link it to previously allocated xfer_ctl structs or set as first */
} else {
}
/* return allocated xfer_ctl structure */
return (xcsp);
}
/*
* hci1394_alloc_dma_mem()
* Allocates and binds memory for openHCI DMA descriptors as needed.
*/
static void *
{
void *dma_mem_ret;
int ret;
/*
* if no dma has been allocated or current request exceeds
* remaining memory
*/
#ifdef _KERNEL
/* kernel-mode memory allocation for driver */
/* allocate struct to track more dma descriptor memory */
if ((dma_new = (hci1394_idma_desc_mem_t *)
kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
KM_NOSLEEP)) == NULL) {
return (NULL);
}
/*
* if more cookies available from the current mem, try to find
* one of suitable size. Cookies that are too small will be
* skipped and unused. Given that cookie size is always at least
* 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
* it's a small price to pay for code simplicity.
*/
/* new struct is derived from current */
memp->bi_cookie_count--) {
break;
}
}
}
/* if no luck with current buffer, allocate a new one */
if (ret != DDI_SUCCESS) {
sizeof (hci1394_idma_desc_mem_t));
return (NULL);
}
/* paranoia: this is not supposed to happen */
sizeof (hci1394_idma_desc_mem_t));
return (NULL);
}
}
#else
/* user-mode memory allocation for user mode compiler tests */
/* allocate another dma_desc_mem struct */
if ((dma_new = (hci1394_idma_desc_mem_t *)
return (NULL);
}
HCI1394_IXL_PAGESIZE)) == NULL) {
return (NULL);
}
#endif
/* if this is not first dma_desc_mem, link last one to it */
} else {
/* else set it as first one */
}
}
/* now allocate requested memory from current block */
return (dma_mem_ret);
}
/*
* hci1394_is_opcode_valid()
* given an ixl opcode, this routine returns B_TRUE if it is a
* recognized opcode and B_FALSE if it is not recognized.
* Note that the FULL 16 bits of the opcode are checked which includes
* various flags and not just the low order 8 bits of unique code.
*/
static boolean_t
{
/* if it's not one we know about, then it's bad */
switch (ixlopcode) {
case IXL1394_OP_LABEL:
case IXL1394_OP_JUMP:
case IXL1394_OP_CALLBACK:
case IXL1394_OP_RECV_PKT:
case IXL1394_OP_RECV_PKT_ST:
case IXL1394_OP_RECV_BUF:
case IXL1394_OP_SEND_PKT:
case IXL1394_OP_SEND_PKT_ST:
case IXL1394_OP_SEND_BUF:
case IXL1394_OP_SEND_HDR_ONLY:
case IXL1394_OP_SEND_NO_PKT:
case IXL1394_OP_SET_TAGSYNC:
case IXL1394_OP_SET_SKIPMODE:
case IXL1394_OP_SET_SYNCWAIT:
case IXL1394_OP_JUMP_U:
case IXL1394_OP_CALLBACK_U:
case IXL1394_OP_RECV_PKT_U:
case IXL1394_OP_RECV_PKT_ST_U:
case IXL1394_OP_RECV_BUF_U:
case IXL1394_OP_SEND_PKT_U:
case IXL1394_OP_SEND_PKT_ST_U:
case IXL1394_OP_SEND_BUF_U:
case IXL1394_OP_SET_TAGSYNC_U:
"ixl opcode is valid");
return (B_TRUE);
default:
return (B_FALSE);
}
}