Searched defs:cqe (Results 1 - 25 of 35) sorted by relevance

12

/drivers/infiniband/hw/cxgb3/
H A Diwch_cq.c40 * 1 cqe returned
48 struct t3_cqe cqe, *rd_cqe; local
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
83 wc->vendor_err = CQE_STATUS(cqe);
88 CQE_QPID(cqe), CQE_TYPE(cqe),
89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
90 CQE_WRID_LOW(cqe), (unsigne
[all...]
H A Dcxio_hal.h146 struct t3_cqe cqe; /* flits 2-3 */ member in struct:respQ_msg_t
195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
H A Dcxio_hal.c75 struct t3_cqe *cqe; local
105 * Now rptr is the index for the (last) cqe that was
109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
352 struct t3_cqe cqe; local
356 memset(&cqe, 0, sizeof(cqe));
357 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
364 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
389 struct t3_cqe cqe; local
430 struct t3_cqe *cqe, *swcqe; local
446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument
466 struct t3_cqe *cqe; local
484 struct t3_cqe *cqe; local
1149 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
[all...]
H A Diwch_provider.c191 chp->ibcq.cqe = 1 << chp->cq.size_log2;
244 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
251 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
254 if (cqe <= cq->cqe)
258 cqe = roundup_pow_of_two(cqe+1);
259 newcq.size_log2 = ilog2(cqe);
262 if (cqe < Q_COUN
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_cq.c116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, argument
134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
196 param.nr_cqe = cqe + additional_cqe;
274 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
381 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
H A Dipz_pt_fn.h144 struct ehca_cqe *cqe = ipz_qeit_get(queue); local
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
H A Dehca_reqs.c629 struct ehca_cqe *cqe; local
636 cqe = (struct ehca_cqe *)
638 if (!cqe) {
650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
658 "could not find qp -> ignore cqe",
659 my_cq->cq_number, cqe->local_qp_number);
660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
661 my_cq->cq_number, cqe->local_qp_number);
662 /* ignore this purged cqe */
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
147 if (tail > (u32) cq->ibcq.cqe)
148 tail = (u32) cq->ibcq.cqe;
154 if (tail >= cq->ibcq.cqe)
279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
283 cq->ibcq.cqe = entries;
371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
380 if (cqe < 1 || cqe > ib_ipath_max_cqe
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
146 if (tail > (u32) cq->ibcq.cqe)
147 tail = (u32) cq->ibcq.cqe;
153 if (tail >= cq->ibcq.cqe)
286 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
290 cq->ibcq.cqe = entries;
378 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
387 if (cqe < 1 || cqe > ib_qib_max_cqe
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dcq.c179 struct t4_cqe cqe; local
183 memset(&cqe, 0, sizeof(cqe));
184 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
189 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
190 cq->sw_queue[cq->sw_pidx] = cqe;
212 struct t4_cqe cqe; local
216 memset(&cqe, 0, sizeof(cqe));
217 cqe
251 struct t4_cqe *cqe = NULL, *swcqe; local
268 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
286 struct t4_cqe *cqe; local
305 struct t4_cqe *cqe; local
402 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
568 struct t4_cqe cqe = {0, 0}, *rd_cqe; local
879 c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
[all...]
H A Dt4.h185 /* macros for flit 0 of the cqe */
239 /* macros for flit 3 of the cqe */
262 struct t4_cqe cqe; member in struct:t4_swsqe
521 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) argument
523 return (CQE_GENBIT(cqe) == cq->gen);
526 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument
541 *cqe = &cq->queue[cq->cidx];
555 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) argument
562 *cqe = &cq->sw_queue[cq->sw_cidx];
564 ret = t4_next_hw_cqe(cq, cqe);
[all...]
/drivers/infiniband/hw/mlx4/
H A Dcq.c79 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
81 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
82 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
130 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) argument
132 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
137 u64 buf_addr, int cqe)
141 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
183 cq->ibcq.cqe
135 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument
329 struct mlx4_cqe *cqe, *new_cqe; local
462 dump_cqe(void *cqe) argument
472 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) argument
551 struct mlx4_cqe *cqe; local
776 struct mlx4_cqe *cqe, *dest; local
[all...]
H A Dmlx4_ib.h73 int cqe; member in struct:mlx4_ib_cq_resize
/drivers/net/ethernet/mellanox/mlx4/
H A Den_rx.c532 struct mlx4_cqe *cqe; local
550 * reading 'cqe->index' */
552 cqe = &cq->buf[index];
555 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
567 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
571 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
572 ((struct mlx4_err_cqe *) cqe)->syndrome);
575 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
596 length = be32_to_cpu(cqe->byte_cnt);
602 if ((cqe
[all...]
H A Den_tx.c312 struct mlx4_cqe *cqe; local
325 cqe = &buf[index];
329 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
338 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
352 cqe = &buf[index];
/drivers/infiniband/core/
H A Dverbs.c909 void *cq_context, int cqe, int comp_vector)
913 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
944 int ib_resize_cq(struct ib_cq *cq, int cqe) argument
947 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
906 ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), void *cq_context, int cqe, int comp_vector) argument
/drivers/atm/
H A Dfirestream.c597 struct FS_QENTRY *cqe; local
609 cqe = bus_to_virt (wp);
610 if (qe != cqe) {
611 fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe);
/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
191 __be32 *cqe = cqe_ptr; local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[
269 is_recv_cqe(struct mthca_cqe *cqe) argument
281 struct mthca_cqe *cqe; local
316 cqe, MTHCA_CQ_ENTRY_SIZE); local
371 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument
377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
490 struct mthca_cqe *cqe; local
[all...]
H A Dmthca_provider.h193 int cqe; member in struct:mthca_cq_resize
/drivers/scsi/bnx2fc/
H A Dbnx2fc_hwi.c1010 struct fcoe_cqe *cqe; local
1029 cqe = &cq[cq_cons];
1031 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1064 cqe++;
1070 cqe = cq;
/drivers/block/
H A Dnvme.c204 struct nvme_completion *cqe)
213 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 cqe->command_id, le16_to_cpup(&cqe->sq_id));
371 struct nvme_completion *cqe)
375 u16 status = le16_to_cpup(&cqe->status) >> 1;
646 struct nvme_completion cqe = nvmeq->cqes[head]; local
647 if ((le16_to_cpu(cqe.status) & 1) != phase)
649 nvmeq->sq_head = le16_to_cpu(cqe
203 special_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
370 bio_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
688 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; local
707 sync_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) argument
1231 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; local
[all...]
/drivers/infiniband/hw/nes/
H A Dnes_verbs.c1438 /* Zero the context value so cqe will be ignored */
1563 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
3638 struct nes_hw_cqe cqe; local
3666 cqe = nescq->hw_cq.cq_vbase[head];
3667 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3671 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
3677 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
3680 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
3684 /* The rest of the cqe's will be marked as flushed */
3695 if (le32_to_cpu(cqe
[all...]
/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_ethtool.c1836 union eth_rx_cqe *cqe; local
1967 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
1968 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1973 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
1981 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c1344 * @cqe: pointer to newly DMA'ed CQE entry for processing
1350 struct cqe *cqe)
1359 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1384 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1430 * @cqe: pointer to newly DMA'ed CQE entry for processing
1436 struct cqe *cqe)
1445 login = (struct bnx2i_login_response *) cqe;
1498 * @cqe
1348 bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1434 bnx2i_process_login_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1502 bnx2i_process_text_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1563 bnx2i_process_tmf_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1602 bnx2i_process_logout_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1648 bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1689 bnx2i_process_nopin_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1741 bnx2i_process_async_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1791 bnx2i_process_reject_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1828 bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1899 bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe) argument
[all...]
/drivers/scsi/lpfc/
H A Dlpfc_sli.h45 } cqe; member in struct:lpfc_cq_event

Completed in 1629 milliseconds

12