/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_cq.c | 40 * 1 cqe returned 48 struct t3_cqe cqe, *rd_cqe; local 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, 83 wc->vendor_err = CQE_STATUS(cqe); 88 CQE_QPID(cqe), CQE_TYPE(cqe), 89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), 90 CQE_WRID_LOW(cqe), (unsigne [all...] |
H A D | iwch_ev.c | 52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); 56 __func__, CQE_STATUS(rsp_msg->cqe), 57 CQE_QPID(rsp_msg->cqe)); 66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); 73 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 74 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 75 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); [all...] |
H A D | cxio_hal.c | 75 struct t3_cqe *cqe; local 105 * Now rptr is the index for the (last) cqe that was 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { 352 struct t3_cqe cqe; local 356 memset(&cqe, 0, sizeof(cqe)); 357 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 364 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; 389 struct t3_cqe cqe; local 430 struct t3_cqe *cqe, *swcqe; local 446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument 466 struct t3_cqe *cqe; local 484 struct t3_cqe *cqe; local 1147 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument [all...] |
H A D | cxio_wr.h | 675 struct t3_cqe cqe; member in struct:t3_swsq 728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ 729 CQE_GENBIT(*cqe)) 769 struct t3_cqe *cqe; local 771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); 772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) 773 return cqe; 779 struct t3_cqe *cqe; local 782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); 783 return cqe; 790 struct t3_cqe *cqe; local [all...] |
H A D | cxio_hal.h | 146 struct t3_cqe cqe; /* flits 2-3 */ member in struct:respQ_msg_t 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
/drivers/infiniband/hw/mlx4/ |
H A D | cq.c | 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); local 81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); 84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) argument 134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); 139 u64 buf_addr, int cqe) 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_siz 137 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument 335 struct mlx4_cqe *cqe, *new_cqe; local 481 dump_cqe(void *cqe) argument 491 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) argument 566 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) argument 598 struct mlx4_cqe *cqe; local 870 struct mlx4_cqe *cqe, *dest; local [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; 191 __be32 *cqe = cqe_ptr; local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[ 269 is_recv_cqe(struct mthca_cqe *cqe) argument 281 struct mthca_cqe *cqe; local 316 cqe, MTHCA_CQ_ENTRY_SIZE); local 371 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument 377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument 490 struct mthca_cqe *cqe; local [all...] |
/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 180 struct t4_cqe cqe; local 184 memset(&cqe, 0, sizeof(cqe)); 185 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 190 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 191 cq->sw_queue[cq->sw_pidx] = cqe; 213 struct t4_cqe cqe; local 217 memset(&cqe, 0, sizeof(cqe)); 218 cqe 412 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument 430 struct t4_cqe *cqe; local 463 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument 990 c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument [all...] |
H A D | t4.h | 185 /* macros for flit 0 of the cqe */ 239 /* macros for flit 3 of the cqe */ 262 struct t4_cqe cqe; member in struct:t4_swsqe 614 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) argument 616 return (CQE_GENBIT(cqe) == cq->gen); 619 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument 638 *cqe = &cq->queue[cq->cidx]; 658 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument 665 *cqe = &cq->sw_queue[cq->sw_cidx]; 667 ret = t4_next_hw_cqe(cq, cqe); [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); local 87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 91 return cqe; 120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument 124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 138 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 170 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument 233 dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) argument 245 mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) argument 370 get_sig_err_item(struct mlx5_sig_err_cqe *cqe, struct ib_sig_err *item) argument 417 void *cqe; local 677 void *cqe; local 856 void *cqe, *dest; local [all...] |
/drivers/infiniband/hw/ipath/ |
H A D | ipath_cq.c | 63 if (head >= (unsigned) cq->ibcq.cqe) { 64 head = cq->ibcq.cqe; 147 if (tail > (u32) cq->ibcq.cqe) 148 tail = (u32) cq->ibcq.cqe; 154 if (tail >= cq->ibcq.cqe) 279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 283 cq->ibcq.cqe = entries; 371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument 380 if (cqe < 1 || cqe > ib_ipath_max_cqe [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_cq.c | 66 if (head >= (unsigned) cq->ibcq.cqe) { 67 head = cq->ibcq.cqe; 154 if (tail > (u32) cq->ibcq.cqe) 155 tail = (u32) cq->ibcq.cqe; 161 if (tail >= cq->ibcq.cqe) 294 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 299 cq->ibcq.cqe = entries; 387 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument 396 if (cqe < 1 || cqe > ib_qib_max_cqe [all...] |
/drivers/infiniband/hw/ehca/ |
H A D | ehca_reqs.c | 629 struct ehca_cqe *cqe; local 636 cqe = (struct ehca_cqe *) 638 if (!cqe) { 650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { 655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); 658 "could not find qp -> ignore cqe", 659 my_cq->cq_number, cqe->local_qp_number); 660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", 661 my_cq->cq_number, cqe->local_qp_number); 662 /* ignore this purged cqe */ [all...] |
H A D | ehca_cq.c | 116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, argument 134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 183 param.nr_cqe = cqe + additional_cqe; 261 my_cq->ib_cq.cqe = my_cq->nr_of_entries = 369 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
|
H A D | ipz_pt_fn.h | 144 struct ehca_cqe *cqe = ipz_qeit_get(queue); local 145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
|
H A D | ehca_iverbs.h | 129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, 135 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
|
/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma.h | 445 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) argument 448 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 452 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) argument 454 return (le32_to_cpu(cqe->flags_status_srcqpn) & 458 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) argument 460 return (le32_to_cpu(cqe->flags_status_srcqpn) & 464 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) argument 466 return (le32_to_cpu(cqe->flags_status_srcqpn) & 470 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) argument 472 return (le32_to_cpu(cqe [all...] |
H A D | ocrdma_verbs.c | 957 ibcq->cqe = new_cnt; 968 struct ocrdma_cqe *cqe = NULL; local 970 cqe = cq->va; 978 if (is_cqe_valid(cq, cqe)) 980 cqe++; 1342 /* syncronize with wqe, rqe posting and cqe processing contexts */ 1527 /* discard the cqe for a given QP */ 1534 struct ocrdma_cqe *cqe; local 1543 * we don't complete out of order cqe. 1553 cqe 2406 ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe) argument 2438 ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument 2460 ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument 2470 ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument 2480 ocrdma_poll_err_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument 2521 ocrdma_poll_success_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled) argument 2547 ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument 2563 ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) argument 2579 ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp) argument 2600 ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop, int status) argument 2631 ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc) argument 2662 ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument 2688 ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, u16 cur_getp) argument 2709 struct ocrdma_cqe *cqe; local [all...] |
H A D | ocrdma_verbs.h | 61 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
/drivers/scsi/bnx2i/ |
H A D | bnx2i.h | 383 * @num_cqe_rcvd: statistic counter, total cqe's received 506 struct cqe { struct 650 struct cqe *cq_virt; 654 struct cqe *cq_prod_qe; 655 struct cqe *cq_cons_qe; 656 struct cqe *cq_first_qe; 657 struct cqe *cq_last_qe; 774 struct cqe cqe; member in struct:bnx2i_work 881 struct cqe *cq [all...] |
H A D | bnx2i_hwi.c | 1349 * @cqe: pointer to newly DMA'ed CQE entry for processing 1355 struct cqe *cqe) 1365 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1400 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1446 * @cqe: pointer to newly DMA'ed CQE entry for processing 1452 struct cqe *cqe) 1461 login = (struct bnx2i_login_response *) cqe; 1514 * @cqe 1353 bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1450 bnx2i_process_login_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1518 bnx2i_process_text_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1579 bnx2i_process_tmf_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1618 bnx2i_process_logout_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1664 bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1705 bnx2i_process_nopin_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1757 bnx2i_process_async_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1807 bnx2i_process_reject_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1844 bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument 1915 bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe) argument [all...] |
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_rx.c | 650 struct mlx4_cqe *cqe; local 672 * reading 'cqe->index' */ 674 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; 677 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 689 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 692 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, 693 ((struct mlx4_err_cqe *)cqe)->syndrome); 696 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 739 length = be32_to_cpu(cqe->byte_cnt); 744 (cqe [all...] |
H A D | en_clock.c | 95 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) argument 98 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
|
/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 543 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) argument 545 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; 546 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) 548 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && 549 (cqe->header_length == 0)) 555 struct sk_buff *skb, struct ehea_cqe *cqe, 558 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 565 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { 567 skb->csum = csum_unfold(~cqe->inet_checksum_value); 576 struct ehea_cqe *cqe) 554 ehea_fill_skb(struct net_device *dev, struct sk_buff *skb, struct ehea_cqe *cqe, struct ehea_port_res *pr) argument 574 get_skb_by_index(struct sk_buff **skb_array, int arr_len, struct ehea_cqe *cqe) argument 628 ehea_treat_poll_error(struct ehea_port_res *pr, int rq, struct ehea_cqe *cqe, int *processed_rq2, int *processed_rq3) argument 670 struct ehea_cqe *cqe; local 817 struct ehea_cqe *cqe; local 894 struct ehea_cqe *cqe; local [all...] |
/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_cmn.c | 354 struct eth_end_agg_rx_cqe *cqe) 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 409 const struct eth_fast_path_rx_cqe *cqe, 414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { 417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; 422 return le32_to_cpu(cqe->rss_hash_result); 430 struct eth_fast_path_rx_cqe *cqe) 473 le16_to_cpu(cqe 352 bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, u16 sge_len, struct eth_end_agg_rx_cqe *cqe) argument 408 bnx2x_get_rxhash(const struct bnx2x *bp, const struct eth_fast_path_rx_cqe *cqe, enum pkt_hash_types *rxhash_type) argument 428 bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, u16 cons, u16 prod, struct eth_fast_path_rx_cqe *cqe) argument 574 bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, struct sk_buff *skb, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) argument 739 bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) argument 843 bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, struct bnx2x_fastpath *fp, struct bnx2x_eth_q_stats *qstats) argument 872 union eth_rx_cqe *cqe; local [all...] |