Lines Matching refs:swqe

343 eib_data_post_tx(eib_vnic_t *vnic, eib_wqe_t *swqe)
372 swqe->qe_nxt_post = NULL;
374 chan->ch_tx_tail->qe_nxt_post = swqe;
376 chan->ch_tx = swqe;
378 chan->ch_tx_tail = swqe;
545 eib_data_prepare_frame(eib_vnic_t *vnic, eib_wqe_t *swqe, mblk_t *mp,
553 * The swqe defaults are set to use the regular ud work request
559 eib_data_setup_lso(swqe, mp, mss, evh);
563 swqe->qe_wr.send.wr_flags |= IBT_WR_SEND_CKSUM;
565 swqe->qe_wr.send.wr_flags &= (~IBT_WR_SEND_CKSUM);
568 if (eib_data_prepare_sgl(vnic, swqe, mp) != 0)
571 swqe->qe_mp = mp;
764 eib_data_setup_lso(eib_wqe_t *swqe, mblk_t *mp, uint32_t mss,
779 * When the swqe was grabbed, it would've had its wr_opcode and
783 swqe->qe_wr.send.wr_opcode = IBT_WRC_SEND_LSO;
784 lso = &(swqe->qe_wr.send.wr.ud_lso);
785 lso->lso_ud_dest = swqe->qe_dest;
823 lso->lso_hdr = swqe->qe_payload_hdr;
829 * start of wqe->qe_payload_hdr during swqe acquisition. Only
849 eib_data_prepare_sgl(eib_vnic_t *vnic, eib_wqe_t *swqe, mblk_t *mp)
872 * the lso header size in the swqe includes the EoIB encapsulation
876 lsohdr_sz = (swqe->qe_wr.send.wr_opcode == IBT_WRC_SEND) ? 0 :
877 swqe->qe_wr.send.wr.ud_lso.lso_hdr_sz;
935 iov_arr[i].iov_addr = (caddr_t)swqe->qe_payload_hdr;
947 swqe->qe_info |= EIB_WQE_FLG_BUFTYPE_MAPPED;
948 swqe->qe_wr.send.wr_sgl = swqe->qe_big_sgl;
951 &swqe->qe_wr, &swqe->qe_iov_hdl);
964 if (pktsz <= swqe->qe_bufsz) {
965 swqe->qe_wr.send.wr_nds = 1;
966 swqe->qe_wr.send.wr_sgl = &swqe->qe_sgl;
967 swqe->qe_sgl.ds_len = pktsz;
979 bufp = (uchar_t *)(uintptr_t)swqe->qe_sgl.ds_va;
998 swqe->qe_sgl.ds_len = ETHERMIN + EIB_ENCAP_HDR_SZ;
1004 * Copy path for transfers greater than swqe->qe_bufsz
1006 swqe->qe_wr.send.wr_sgl = swqe->qe_big_sgl;
1007 if (eib_rsrc_grab_lsobufs(ss, pktsz, swqe->qe_wr.send.wr_sgl,
1008 &(swqe->qe_wr.send.wr_nds)) != EIB_E_SUCCESS) {
1013 swqe->qe_info |= EIB_WQE_FLG_BUFTYPE_LSO;
1022 for (i = 0; i < swqe->qe_wr.send.wr_nds; i++) {
1023 sgl = swqe->qe_wr.send.wr_sgl + i;
1357 * swqe to the pool.