/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/ |
H A D | oce_mq.c | 42 struct oce_mq_cqe *cqe = NULL; local 50 /* do while we do not reach a cqe that is not valid */ 55 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); 56 while (cqe->u0.dw[3]) { 57 DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe)); 58 if (cqe->u0.s.async_event) { 59 acqe = (struct oce_async_cqe_link_state *)cqe; 76 cqe->u0.dw[3] = 0; 78 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); 100 struct oce_mq_cqe *cqe local [all...] |
H A D | oce_rx.c | 37 struct oce_nic_rx_cqe *cqe); 39 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); 42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe); 44 struct oce_nic_rx_cqe *cqe); 328 * cqe - Pointer to Completion Q entry 333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) argument 346 frag_cnt = cqe->u0.s.num_fragments & 0x7; 354 pkt_len = cqe->u0.s.pkt_size; 393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) argument 410 pkt_len = cqe 440 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe) argument 472 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) argument 496 struct oce_nic_rx_cqe *cqe; local 617 struct oce_nic_rx_cqe *cqe; local [all...] |
H A D | oce_tx.c | 558 struct oce_nic_tx_cqe *cqe; local 571 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 572 while (WQ_CQE_VALID(cqe)) { 574 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe)); 577 if (cqe->u0.s.status != 0) { 588 /* clear the valid bit and progress cqe */ 589 WQ_CQE_INVALIDATE(cqe); 591 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, 619 /* do while we do not reach a cqe that is not valid */
|
/illumos-gate/usr/src/lib/udapl/udapl_tavor/tavor/ |
H A D | dapl_tavor_hw.h | 177 #define TAVOR_CQE_QPNUM_GET(cqe) \ 178 ((BETOH_32(((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \ 180 #define TAVOR_CQE_DQPN_GET(cqe) \ 181 ((BETOH_32(((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \ 183 #define TAVOR_CQE_SL_GET(cqe) \ 184 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \ 186 #define TAVOR_CQE_GRH_GET(cqe) \ 187 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \ 189 #define TAVOR_CQE_PATHBITS_GET(cqe) \ 190 ((BETOH_32(((uint32_t *)(cqe))[ [all...] |
H A D | dapl_tavor_hw.c | 786 tavor_hw_cqe_t *cqe; local 804 cqe = &cq->cq_addr[cons_indx]; 811 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { 812 opcode = TAVOR_CQE_OPCODE_GET(cqe); 817 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); 828 cqe = &cq->cq_addr[cons_indx]; 843 tavor_hw_cqe_t *cqe; local 861 cqe = &cq->cq_addr[cons_indx]; 874 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { 875 status = dapli_tavor_cq_cqe_consume(cq, cqe, 943 tavor_hw_cqe_t *cqe; local 991 dapli_tavor_cq_cqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1096 dapli_tavor_cq_errcqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1721 tavor_hw_cqe_t *cqe; local [all...] |
H A D | dapl_arbel_hw.c | 554 tavor_hw_cqe_t *cqe; local 572 cqe = &cq->cq_addr[cons_indx]; 579 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { 580 opcode = TAVOR_CQE_OPCODE_GET(cqe); 585 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); 596 cqe = &cq->cq_addr[cons_indx]; 614 tavor_hw_cqe_t *cqe; local 631 cqe = &cq->cq_addr[cons_indx]; 644 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { 645 status = dapli_arbel_cq_cqe_consume(cq, cqe, 706 tavor_hw_cqe_t *cqe; local 752 dapli_arbel_cq_cqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 861 dapli_arbel_cq_errcqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1388 tavor_hw_cqe_t *cqe; local [all...] |
H A D | dapl_hermon_hw.c | 42 #define HERMON_CQE_OPCODE_GET(cqe) (((uint8_t *)cqe)[31] & 0x1F) 43 #define HERMON_CQE_SENDRECV_GET(cqe) (((uint8_t *)cqe)[31] & 0x40) 44 #define HERMON_CQE_OWNER_IS_SW(cq, cqe) ((((uint8_t *)cqe)[31] >> 7) == \ 531 uint32_t *cqe; local 542 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; 549 while (HERMON_CQE_OWNER_IS_SW(cq, cqe)) { 550 opcode = HERMON_CQE_OPCODE_GET(cqe); 618 uint32_t *cqe; local 709 uint32_t *cqe; local 759 dapli_hermon_cq_cqe_consume(ib_cq_handle_t cqhdl, uint32_t *cqe, ibt_wc_t *wc) argument 862 dapli_hermon_cq_errcqe_consume(ib_cq_handle_t cqhdl, uint32_t *cqe, ibt_wc_t *wc) argument 1443 tavor_hw_cqe_t *cqe; local [all...] |
H A D | dapl_tavor_wr.c | 59 dapls_tavor_wrid_get_entry(ib_cq_handle_t cq, tavor_hw_cqe_t *cqe, argument 71 qpnum = TAVOR_CQE_QPNUM_GET(cqe); 87 wre_tmp = dapli_tavor_wrid_find_match(wq, cqe); 124 dapli_tavor_wrid_find_match(dapls_tavor_workq_hdr_t *wq, tavor_hw_cqe_t *cqe) argument 135 wqeaddr_size = TAVOR_CQE_WQEADDRSZ_GET(cqe); 161 curr = dapli_tavor_wrid_find_match_srq(container, cqe); 257 tavor_hw_cqe_t *cqe) 266 wqe_addr = TAVOR_CQE_WQEADDRSZ_GET(cqe) & 0xFFFFFFC0; 256 dapli_tavor_wrid_find_match_srq(dapls_tavor_wrid_list_hdr_t *wl, tavor_hw_cqe_t *cqe) argument
|
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/ |
H A D | lm_recv.c | 338 * @param cqe 350 IN const struct eth_end_agg_rx_cqe* cqe, 359 u32_t sge_size = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size; 372 DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size); 380 pkt->l2pkt_rx_info->total_packet_size = mm_le16_to_cpu(cqe->pkt_len); 381 pkt->l2pkt_rx_info->coal_seg_cnt = mm_le16_to_cpu(cqe->num_of_coalesced_segs); 382 pkt->l2pkt_rx_info->dup_ack_cnt = cqe->pure_ack_count; 383 pkt->l2pkt_rx_info->ts_delta = mm_le32_to_cpu(cqe->timestamp_delta); 392 ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl)); 393 DbgBreakIf(ARRSIZE(cqe 348 lm_tpa_stop( IN lm_device_t* pdev, INOUT s_list_t* rcvd_list, IN const struct eth_end_agg_rx_cqe* cqe, IN const u32_t chain_idx, IN u32_t pkt_cnt, IN const u8_t queue_index) argument 501 lm_tpa_start_flags_handle( IN lm_device_t* pdev, IN const struct eth_fast_path_rx_cqe* cqe, INOUT lm_packet_t* pkt, IN const u16_t parse_flags) argument 569 lm_regular_flags_handle( IN lm_device_t* pdev, IN const struct eth_fast_path_rx_cqe* cqe, INOUT lm_packet_t* pkt, IN const u16_t parse_flags) argument 933 union eth_rx_cqe* cqe = NULL; local [all...] |
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/ |
H A D | lm_l4fp.c | 263 * returns the next cqe in the cqe_buffer and updates the buffer params 267 char * cqe; local 269 cqe = cqe_buffer->head; 271 if(cqe == cqe_buffer->last) { 274 cqe_buffer->head = cqe + cqe_buffer->qe_size; 280 return cqe; 284 * returns the next occupied cqe in the cqe_buffer and updates the buffer params 289 char * cqe; local 291 cqe = cqe_buffer->tail; 293 if ((cqe 316 char * cqe; local [all...] |
H A D | lm_l4fp.h | 43 * returns the next cqe in the cqe_buffer and updates the buffer params 49 * returns the next occupied cqe in the cqe_buffer and updates the buffer params 64 * written cqe 69 * processes a single rx cqe 74 struct toe_rx_cqe * cqe, 79 * processes a single tx cqe 84 struct toe_tx_cqe * cqe,
|
H A D | lm_l4tx.c | 371 struct toe_tx_cqe * cqe, 377 /* get the cmd from cqe */ 378 cmd = ((cqe->params & TOE_TX_CQE_COMPLETION_OPCODE) >> TOE_TX_CQE_COMPLETION_OPCODE_SHIFT); 382 /* Check that the cqe len make sense, we could have got here by chance... */ 383 DbgBreakIfAll(cqe->len & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a bit odd...*/ 386 * fast-path part (nbytes completed) which will be handled in any case that cqe->len > 0 */ 389 if (cqe->len && 394 on RST recv cqe and do so only later on one of the following ramrod completions, 395 we need to ignore this too late completed bytes thus we nullify cqe->len */ 400 lm_tcp_tx_inc_trm_aborted_bytes(pdev, tcp, cqe 369 lm_tcp_tx_process_cqe( lm_device_t * pdev, struct toe_tx_cqe * cqe, lm_tcp_state_t * tcp ) argument 455 struct toe_tx_cqe *cqe, *hist_cqe; local [all...] |
H A D | lm_l4rx.c | 1179 * processes a single cqe. 1183 struct toe_rx_cqe * cqe, 1191 cmd = ((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT); 1194 /* Check that the cqe nbytes make sense, we could have got here by chance... */ 1197 nbytes = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_NBYTES) >> TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT; 1198 isle_num = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_ISLE_NUM) >> TOE_RX_CQE_OOO_PARAMS_ISLE_NUM_SHIFT; 1204 nbytes = cqe->data.raw_data; 1206 nbytes = (cqe->data.in_order_params.in_order_params & TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES) >> TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES_SHIFT; 1221 //DbgMessage(pdev, WARN, "GenericAdd cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes); 1254 //DbgMessage(pdev, WARN, "GenericRelease cid=%d nbytes=%d!\n", tcp->cid, cqe 1181 lm_tcp_rx_process_cqe( lm_device_t * pdev, struct toe_rx_cqe * cqe, lm_tcp_state_t * tcp, u8_t sb_idx) argument 1427 struct toe_rx_cqe *cqe, *hist_cqe; local [all...] |
/illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/ |
H A D | hermon_cq.c | 58 hermon_hw_cqe_t *cqe, ibt_wc_t *wc); 60 hermon_hw_cqe_t *cqe, ibt_wc_t *wc); 749 * in the cq_hdl, and setting up for the next cqe polling 870 hermon_hw_cqe_t *cqe; local 899 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; 912 while (HERMON_CQE_OWNER_IS_SW(cq, cqe, cons_indx, shift, mask)) { 915 opcode = HERMON_CQE_OPCODE_GET(cq, cqe); 926 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; 936 hermon_cq_cqe_consume(state, cq, cqe, &wc_p[polled_cnt++]); 942 cqe 1291 hermon_cq_cqe_consume(hermon_state_t *state, hermon_cqhdl_t cq, hermon_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1476 hermon_cq_errcqe_consume(hermon_state_t *state, hermon_cqhdl_t cq, hermon_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1650 hermon_hw_cqe_t *cqe, *next_cqe; local [all...] |
/illumos-gate/usr/src/uts/common/io/ib/adapters/tavor/ |
H A D | tavor_cq.c | 50 tavor_hw_cqe_t *cqe, ibt_wc_t *wc); 52 tavor_hw_cqe_t *cqe, ibt_wc_t *wc); 53 static void tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, 910 tavor_hw_cqe_t *cqe; local 941 cqe = &cq->cq_buf[cons_indx]; 944 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORCPU); 957 while (TAVOR_CQE_OWNER_IS_SW(cq, cqe)) { 958 status = tavor_cq_cqe_consume(state, cq, cqe, 962 TAVOR_CQE_OWNER_SET_HW(cq, cqe); 965 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORDE 1300 tavor_cq_cqe_consume(tavor_state_t *state, tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1472 tavor_cq_errcqe_consume(tavor_state_t *state, tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, ibt_wc_t *wc) argument 1636 tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, uint_t flag) argument 1736 tavor_hw_cqe_t *cqe; local [all...] |
/illumos-gate/usr/src/uts/common/sys/ib/adapters/hermon/ |
H A D | hermon_wr.h | 55 #define HERMON_CQE_WQEADDRSZ_GET(cq, cqe) \ 56 ((uint32_t)((((uint8_t *)(cqe))[0x18]) << 8) | ((uint8_t *)(cqe))[0x19]) 186 ibt_wrid_t hermon_wrid_get_entry(hermon_cqhdl_t cqhdl, hermon_hw_cqe_t *cqe);
|
H A D | hermon_hw.h | 2650 #define HERMON_CQE_IPOK 0x10 /* byte 0x10 in cqe */ 2653 #define HERMON_CQE_IS_IPOK(cq, cqe) \ 2654 (((uint8_t *)(cqe))[HERMON_CQE_IPOK] & HERMON_CQE_IPOK_BIT) 2656 #define HERMON_CQE_CKSUM(cq, cqe) \ 2657 ((((uint8_t *)(cqe))[HERMON_CQE_CKSUM_15_8] << 8) | \ 2658 (((uint8_t *)(cqe))[HERMON_CQE_CKSUM_7_0])) 2660 #define HERMON_CQE_IPOIB_STATUS(cq, cqe) \ 2661 htonl((((uint32_t *)(cqe)))[4]) 2663 #define HERMON_CQE_QPNUM_GET(cq, cqe) \ 2664 ((htonl((((uint32_t *)(cqe)))[ [all...] |
/illumos-gate/usr/src/lib/udapl/udapl_tavor/common/ |
H A D | dapl_evd_util.c | 55 IN ib_work_completion_t cqe); 405 * cqe 417 IN ib_work_completion_t cqe) 433 dto_cookie = (DAPL_COOKIE *) (uintptr_t)DAPL_GET_CQE_WRID(&cqe); 440 "\t\t work_req_id 0x%llx\n", DAPL_GET_CQE_WRID(&cqe)); 442 "\t\t op_type: %s\n", optable[DAPL_GET_CQE_OPTYPE(&cqe)]); 443 if ((DAPL_GET_CQE_OPTYPE(&cqe) == OP_SEND) || 444 (DAPL_GET_CQE_OPTYPE(&cqe) == OP_RDMA_WRITE)) { 449 "\t\t bytes_num %d\n", DAPL_GET_CQE_BYTESNUM(&cqe)); 452 "\t\t status %d\n", DAPL_GET_CQE_STATUS(&cqe)); 416 dapli_evd_eh_print_cqe( IN ib_work_completion_t cqe) argument 844 ib_work_completion_t *cqe; local 1220 ib_work_completion_t cqe[MAX_CQES_PER_POLL]; local [all...] |
/illumos-gate/usr/src/uts/common/io/ib/mgt/ibmf/ |
H A D | ibmf_handlers.c | 313 ibt_wc_t cqe; local 330 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); 348 ibmf_i_process_completion(ibmf_cip, &cqe); 358 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); 376 ibmf_i_process_completion(ibmf_cip, &cqe);
|
/illumos-gate/usr/src/uts/common/io/nvme/ |
H A D | nvme.c | 792 nvme_cqe_t *cqe; local 798 cqe = &qp->nq_cq[qp->nq_cqhead]; 801 if (cqe->cqe_sf.sf_p == qp->nq_phase) 804 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); 805 ASSERT(cqe->cqe_cid < qp->nq_nentry); 808 cmd = qp->nq_cmd[cqe->cqe_cid]; 809 qp->nq_cmd[cqe->cqe_cid] = NULL; 815 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 816 ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid); 817 bcopy(cqe, 835 nvme_cqe_t *cqe = &cmd->nc_cqe; local 856 nvme_cqe_t *cqe = &cmd->nc_cqe; local 874 nvme_cqe_t *cqe = &cmd->nc_cqe; local 897 nvme_cqe_t *cqe = &cmd->nc_cqe; local 997 nvme_cqe_t *cqe = &cmd->nc_cqe; local 1096 nvme_cqe_t *cqe = &cmd->nc_cqe; local [all...] |
/illumos-gate/usr/src/uts/common/sys/ib/adapters/tavor/ |
H A D | tavor_hw.h | 1441 #define TAVOR_CQE_QPNUM_GET(cq, cqe) \ 1443 &((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \ 1445 #define TAVOR_CQE_DQPN_GET(cq, cqe) \ 1447 &((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \ 1449 #define TAVOR_CQE_SL_GET(cq, cqe) \ 1451 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \ 1453 #define TAVOR_CQE_GRH_GET(cq, cqe) \ 1455 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \ 1457 #define TAVOR_CQE_PATHBITS_GET(cq, cqe) \ 1459 &((uint32_t *)(cqe))[ [all...] |
H A D | tavor_wr.h | 303 uint64_t tavor_wrid_get_entry(tavor_cqhdl_t cqhdl, tavor_hw_cqe_t *cqe, 314 tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe);
|
/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/ |
H A D | emlxs_sli4.c | 140 CQE_ASYNC_t *cqe); 142 CQE_ASYNC_t *cqe); 4962 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe) argument 4968 if (hba->link_event_tag == cqe->un.link.event_tag) { 4970 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) { 4973 hba->link_event_tag = cqe->un.link.event_tag; 4975 switch (cqe->event_code) { 4979 switch (cqe->un.link.link_status) { 4984 cqe->valid, cqe 5188 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe) argument 5407 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp) argument 5519 CQE_CmplWQ_t cqe; local 5602 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe) argument 5626 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe) argument 5753 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_RelWQ_t *cqe) argument 5917 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_UnsolRcv_t *cqe) argument 6741 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_XRI_Abort_t *cqe) argument 6786 CQE_u *cqe; local 9029 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe) argument 9080 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe) argument [all...] |
/illumos-gate/usr/src/uts/common/io/ib/clients/of/sol_uverbs/ |
H A D | sol_uverbs_comp.c | 208 cq_attr.cq_size = cmd.cqe; 220 if (!cmd.cqe) { 221 SOL_OFS_DPRINTF_L2(sol_uverbs_dbg_str, "create_cq: 0 cqe"); 328 resp.cqe = real_size; 534 resize_status = ibt_resize_cq(ucq->cq, cmd.cqe, &resp.cqe);
|
/illumos-gate/usr/src/uts/common/sys/ib/clients/of/rdma/ |
H A D | ib_user_verbs.h | 310 uint32_t cqe; member in struct:ib_uverbs_create_cq 326 uint32_t cqe; member in struct:ib_uverbs_create_cq_resp 333 uint32_t cqe; member in struct:ib_uverbs_resize_cq 338 uint32_t cqe; member in struct:ib_uverbs_resize_cq_resp
|