Searched refs:cqe (Results 1 - 25 of 50) sorted by relevance

12

/drivers/infiniband/hw/cxgb3/
H A Diwch_cq.c40 * 1 cqe returned
48 struct t3_cqe cqe, *rd_cqe; local
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
83 wc->vendor_err = CQE_STATUS(cqe);
88 CQE_QPID(cqe), CQE_TYPE(cqe),
89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
90 CQE_WRID_LOW(cqe), (unsigne
[all...]
H A Diwch_ev.c52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
56 __func__, CQE_STATUS(rsp_msg->cqe),
57 CQE_QPID(rsp_msg->cqe));
66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
73 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
74 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
75 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
[all...]
H A Dcxio_hal.c75 struct t3_cqe *cqe; local
105 * Now rptr is the index for the (last) cqe that was
109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
352 struct t3_cqe cqe; local
356 memset(&cqe, 0, sizeof(cqe));
357 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
364 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
389 struct t3_cqe cqe; local
430 struct t3_cqe *cqe, *swcqe; local
446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument
466 struct t3_cqe *cqe; local
484 struct t3_cqe *cqe; local
1149 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
[all...]
H A Dcxio_wr.h675 struct t3_cqe cqe; member in struct:t3_swsq
728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
729 CQE_GENBIT(*cqe))
769 struct t3_cqe *cqe; local
771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
773 return cqe;
779 struct t3_cqe *cqe; local
782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
783 return cqe;
790 struct t3_cqe *cqe; local
[all...]
H A Dcxio_hal.h146 struct t3_cqe cqe; /* flits 2-3 */ member in struct:respQ_msg_t
195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
H A Diwch_provider.c191 chp->ibcq.cqe = 1 << chp->cq.size_log2;
244 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
251 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
254 if (cqe <= cq->cqe)
258 cqe = roundup_pow_of_two(cqe+1);
259 newcq.size_log2 = ilog2(cqe);
262 if (cqe < Q_COUN
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
191 __be32 *cqe = cqe_ptr; local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[
269 is_recv_cqe(struct mthca_cqe *cqe) argument
281 struct mthca_cqe *cqe; local
316 cqe, MTHCA_CQ_ENTRY_SIZE); local
371 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument
377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
490 struct mthca_cqe *cqe; local
[all...]
H A Dmthca_provider.h193 int cqe; member in struct:mthca_cq_resize
H A Dmthca_provider.c757 cq->resize_buf->cqe = entries - 1;
780 if (entries == ibcq->cqe + 1) {
803 cq->resize_buf->cqe);
820 tcqe = cq->ibcq.cqe;
822 cq->ibcq.cqe = cq->resize_buf->cqe;
825 tcqe = cq->resize_buf->cqe;
834 ibcq->cqe = entries - 1;
/drivers/infiniband/hw/mlx4/
H A Dcq.c79 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
81 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
82 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
130 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) argument
132 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
137 u64 buf_addr, int cqe)
141 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
183 cq->ibcq.cqe
135 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument
329 struct mlx4_cqe *cqe, *new_cqe; local
462 dump_cqe(void *cqe) argument
472 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) argument
551 struct mlx4_cqe *cqe; local
776 struct mlx4_cqe *cqe, *dest; local
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dcq.c179 struct t4_cqe cqe; local
183 memset(&cqe, 0, sizeof(cqe));
184 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
189 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
190 cq->sw_queue[cq->sw_pidx] = cqe;
212 struct t4_cqe cqe; local
216 memset(&cqe, 0, sizeof(cqe));
217 cqe
251 struct t4_cqe *cqe = NULL, *swcqe; local
268 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
286 struct t4_cqe *cqe; local
305 struct t4_cqe *cqe; local
402 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
568 struct t4_cqe cqe = {0, 0}, *rd_cqe; local
879 c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
[all...]
H A Dt4.h185 /* macros for flit 0 of the cqe */
239 /* macros for flit 3 of the cqe */
262 struct t4_cqe cqe; member in struct:t4_swsqe
521 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) argument
523 return (CQE_GENBIT(cqe) == cq->gen);
526 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument
541 *cqe = &cq->queue[cq->cidx];
555 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument
562 *cqe = &cq->sw_queue[cq->sw_cidx];
564 ret = t4_next_hw_cqe(cq, cqe);
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
147 if (tail > (u32) cq->ibcq.cqe)
148 tail = (u32) cq->ibcq.cqe;
154 if (tail >= cq->ibcq.cqe)
279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
283 cq->ibcq.cqe = entries;
371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
380 if (cqe < 1 || cqe > ib_ipath_max_cqe
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
146 if (tail > (u32) cq->ibcq.cqe)
147 tail = (u32) cq->ibcq.cqe;
153 if (tail >= cq->ibcq.cqe)
286 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
290 cq->ibcq.cqe = entries;
378 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
387 if (cqe < 1 || cqe > ib_qib_max_cqe
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_reqs.c629 struct ehca_cqe *cqe; local
636 cqe = (struct ehca_cqe *)
638 if (!cqe) {
650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
658 "could not find qp -> ignore cqe",
659 my_cq->cq_number, cqe->local_qp_number);
660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
661 my_cq->cq_number, cqe->local_qp_number);
662 /* ignore this purged cqe */
[all...]
H A Dehca_cq.c116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, argument
134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
196 param.nr_cqe = cqe + additional_cqe;
274 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
381 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
H A Dipz_pt_fn.h144 struct ehca_cqe *cqe = ipz_qeit_get(queue); local
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
H A Dehca_iverbs.h129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
135 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
/drivers/scsi/bnx2i/
H A Dbnx2i.h333 * @num_cqe_rcvd: statistic counter, total cqe's received
447 struct cqe { struct
591 struct cqe *cq_virt;
595 struct cqe *cq_prod_qe;
596 struct cqe *cq_cons_qe;
597 struct cqe *cq_first_qe;
598 struct cqe *cq_last_qe;
715 struct cqe cqe; member in struct:bnx2i_work
820 struct cqe *cq
[all...]
H A Dbnx2i_hwi.c1344 * @cqe: pointer to newly DMA'ed CQE entry for processing
1350 struct cqe *cqe)
1359 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1384 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1430 * @cqe: pointer to newly DMA'ed CQE entry for processing
1436 struct cqe *cqe)
1445 login = (struct bnx2i_login_response *) cqe;
1498 * @cqe
1348 bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1434 bnx2i_process_login_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1502 bnx2i_process_text_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1563 bnx2i_process_tmf_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1602 bnx2i_process_logout_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1648 bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1689 bnx2i_process_nopin_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1741 bnx2i_process_async_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1791 bnx2i_process_reject_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1828 bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1899 bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe) argument
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Den_rx.c532 struct mlx4_cqe *cqe; local
550 * reading 'cqe->index' */
552 cqe = &cq->buf[index];
555 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
567 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
571 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
572 ((struct mlx4_err_cqe *) cqe)->syndrome);
575 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
596 length = be32_to_cpu(cqe->byte_cnt);
602 if ((cqe
[all...]
H A Den_tx.c312 struct mlx4_cqe *cqe; local
325 cqe = &buf[index];
329 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
338 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
352 cqe = &buf[index];
/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c534 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) argument
536 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
537 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
539 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
540 (cqe->header_length == 0))
546 struct sk_buff *skb, struct ehea_cqe *cqe,
549 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
556 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
558 skb->csum = csum_unfold(~cqe->inet_checksum_value);
567 struct ehea_cqe *cqe)
545 ehea_fill_skb(struct net_device *dev, struct sk_buff *skb, struct ehea_cqe *cqe, struct ehea_port_res *pr) argument
565 get_skb_by_index(struct sk_buff **skb_array, int arr_len, struct ehea_cqe *cqe) argument
619 ehea_treat_poll_error(struct ehea_port_res *pr, int rq, struct ehea_cqe *cqe, int *processed_rq2, int *processed_rq3) argument
661 struct ehea_cqe *cqe; local
809 struct ehea_cqe *cqe; local
886 struct ehea_cqe *cqe; local
[all...]
/drivers/block/
H A Dnvme.c204 struct nvme_completion *cqe)
213 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 cqe->command_id, le16_to_cpup(&cqe->sq_id));
371 struct nvme_completion *cqe)
375 u16 status = le16_to_cpup(&cqe->status) >> 1;
646 struct nvme_completion cqe = nvmeq->cqes[head]; local
647 if ((le16_to_cpu(cqe.status) & 1) != phase)
649 nvmeq->sq_head = le16_to_cpu(cqe
203 special_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
370 bio_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
688 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; local
707 sync_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
1231 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; local
[all...]
/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_cmn.c213 struct eth_end_agg_rx_cqe *cqe)
226 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
229 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
268 const struct eth_fast_path_rx_cqe *cqe)
272 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
273 return le32_to_cpu(cqe->rss_hash_result);
279 struct eth_fast_path_rx_cqe *cqe)
322 le16_to_cpu(cqe->pars_flags.flags);
323 tpa_info->vlan_tag = le16_to_cpu(cqe
211 bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, u16 sge_len, struct eth_end_agg_rx_cqe *cqe) argument
267 bnx2x_get_rxhash(const struct bnx2x *bp, const struct eth_fast_path_rx_cqe *cqe) argument
277 bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, u16 cons, u16 prod, struct eth_fast_path_rx_cqe *cqe) argument
399 bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, struct sk_buff *skb, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) argument
497 bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) argument
571 bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, struct bnx2x_fastpath *fp) argument
627 union eth_rx_cqe *cqe; local
[all...]

Completed in 425 milliseconds

12