Searched refs:qp (Results 76 - 100 of 172) sorted by relevance

1234567

/drivers/crypto/
H A Dn2_core.c233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
236 qp->head != qp->tail)
477 static unsigned long wait_for_tail(struct spu_queue *qp) argument
482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
487 if (head == qp->tail) {
488 qp->head = head;
495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, argument
498 unsigned long hv_ret = spu_queue_submit(qp, ent);
501 hv_ret = wait_for_tail(qp);
514 struct spu_queue *qp; local
826 __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, struct spu_queue *qp, bool encrypt) argument
978 struct spu_queue *qp; local
1030 struct spu_queue *qp; local
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c681 ep->com.qp = NULL;
713 ep->com.qp = NULL;
742 ep->com.qp = NULL;
925 err = iwch_modify_qp(ep->com.qp->rhp,
926 ep->com.qp, mask, &attrs, 1);
930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1497 if (ep->com.cm_id && ep->com.qp) {
1499 iwch_modify_qp(ep->com.qp
1801 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); local
[all...]
H A Diwch_provider.h182 void iwch_qp_add_ref(struct ib_qp *qp);
183 void iwch_qp_rem_ref(struct ib_qp *qp);
331 int iwch_bind_mw(struct ib_qp *qp,
/drivers/infiniband/ulp/iser/
H A Diser_verbs.c59 iser_err("got qp event %d\n",cause->event);
471 ib_conn->qp = ib_conn->cma_id->qp;
472 iser_info("setting conn %p cma_id %p qp %p\n",
474 ib_conn->cma_id->qp);
584 iser_info("freeing conn %p cma_id %p qp %p\n",
585 iser_conn, ib_conn->cma_id, ib_conn->qp);
589 if (ib_conn->qp != NULL) {
592 ib_conn->qp = NULL;
664 err = ib_post_send(ib_conn->qp,
[all...]
/drivers/infiniband/hw/amso1100/
H A Dc2.h489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
490 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
492 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
494 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
516 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
H A Dc2_intr.c185 c2_set_qp_state(req->qp,
H A Dc2_vq.c113 r->qp = NULL;
/drivers/scsi/
H A Dqlogicpti.h504 #define for_each_qlogicpti(qp) \
505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
/drivers/infiniband/core/
H A Duverbs_main.c185 static void ib_uverbs_detach_umcast(struct ib_qp *qp, argument
191 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
233 struct ib_qp *qp = uobj->object; local
238 if (qp != qp->real_qp) {
239 ib_close_qp(qp);
241 ib_uverbs_detach_umcast(qp, uqp);
242 ib_destroy_qp(qp);
506 /* for XRC target qp's, check that qp i
[all...]
H A Dmad.c327 if (!port_priv->qp_info[qpn].qp) {
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
702 static void build_smp_wc(struct ib_qp *qp, argument
713 wc->qp = qp;
782 build_smp_wc(mad_agent_priv->agent.qp,
1245 ib_redirect_mad_qp(struct ib_qp *qp, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context) argument
2757 struct ib_qp *qp; local
[all...]
H A Dcma.c520 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) argument
530 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
535 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
541 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
546 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) argument
556 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
563 struct ib_qp *qp; local
570 qp = ib_create_qp(pd, qp_init_attr);
571 if (IS_ERR(qp))
572 return PTR_ERR(qp);
[all...]
/drivers/infiniband/hw/ocrdma/
H A Docrdma_main.c560 struct ocrdma_qp *qp, **cur_qp; local
570 qp = cur_qp[i];
571 if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
573 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
576 err_event.element.qp = &qp->ibqp;
H A Docrdma_verbs.h75 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
/drivers/infiniband/hw/cxgb4/
H A Dresource.c43 rdev->lldi.vr->qp.start,
44 rdev->lldi.vr->qp.size,
45 rdev->lldi.vr->qp.size, 0))
48 for (i = rdev->lldi.vr->qp.start;
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
126 * now put the same ids on the qp list since they all
H A Dcm.c153 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
160 c4iw_qp_add_ref(&ep->com.qp->ibqp);
1456 err = c4iw_modify_qp(ep->com.qp->rhp,
1457 ep->com.qp, mask, &attrs, 1);
1471 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1491 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1687 BUG_ON(!ep->com.qp);
1691 __func__, ep->com.qp
2768 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); local
[all...]
/drivers/media/pci/solo6x10/
H A Dsolo6x10-enc.c182 unsigned int qp)
187 if ((ch > 31) || (qp > 3))
206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch;
181 solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) argument
/drivers/infiniband/hw/mlx5/
H A Dmain.c1035 mlx5_ib_destroy_qp(dev->umrc.qp);
1051 struct ib_qp *qp; local
1092 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
1093 if (IS_ERR(qp)) {
1095 ret = PTR_ERR(qp);
1098 qp->device = &dev->ib_dev;
1099 qp->real_qp = qp;
1100 qp->uobject = NULL;
1101 qp
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_eq.c144 } __attribute__((packed)) qp; member in union:mthca_eqe::__anon1540
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
/drivers/infiniband/hw/ehca/
H A Dehca_qp.c58 * attributes not supported by query qp
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
96 * returns ehca qp state corresponding to given ib qp state
123 * returns ib qp state corresponding to given ehca qp state
163 * returns ehca qp type corresponding to ib qp type
238 * ib qp typ
400 ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) argument
1887 ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) argument
2228 ehca_destroy_qp(struct ib_qp *qp) argument
[all...]
/drivers/infiniband/hw/mlx4/
H A Dmain.c51 #include <linux/mlx4/qp.h>
207 props->max_qp = dev->dev->quotas.qp;
922 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, argument
928 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
978 struct ib_qp *qp,
1014 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, argument
1022 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1056 ctrl->qpn = cpu_to_be32(qp->qp_num);
1061 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1064 mdev, qp, default_tabl
976 __mlx4_ib_create_default_rules( struct mlx4_ib_dev *mdev, struct ib_qp *qp, const struct default_rules *pdefault_rules, struct _rule_hw *mlx4_spec) argument
1109 mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, u64 *reg_id) argument
1133 mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) argument
1263 find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) argument
1690 struct mlx4_ib_qp *qp; local
[all...]
H A Dcq.c35 #include <linux/mlx4/qp.h>
566 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, argument
571 ib_dma_sync_single_for_cpu(qp->ibqp.device,
572 qp->sqp_proxy_rcv[tail].map,
575 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
670 wc->qp = &(*cur_qp)->ibqp;
672 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
790 is_eth = (rdma_port_get_link_layer(wc->qp->device,
/drivers/atm/
H A Dfirestream.c631 static int qp; variable
651 pq[qp].cmd = cmd;
652 pq[qp].p0 = p1;
653 pq[qp].p1 = p2;
654 pq[qp].p2 = p3;
655 qp++;
656 if (qp >= 60) qp = 0;
1950 i, pq[qp].cmd, pq[qp]
[all...]
/drivers/scsi/sym53c8xx_2/
H A Dsym_hipd.c1542 SYM_QUEHEAD *qp; local
1556 qp = sym_remque_head(&lp->waiting_ccbq);
1557 if (!qp)
1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
1563 sym_insque_head(qp, &lp->waiting_ccbq);
1572 sym_insque_head(qp, &lp->waiting_ccbq);
1581 sym_insque_tail(qp, &lp->started_ccbq);
1643 SYM_QUEHEAD *qp; local
1646 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
1648 cp = sym_que_entry(qp, struc
1947 SYM_QUEHEAD *qp; local
3200 SYM_QUEHEAD qtmp, *qp; local
3283 SYM_QUEHEAD *qp; local
4657 SYM_QUEHEAD *qp; local
5333 SYM_QUEHEAD *qp; local
5805 SYM_QUEHEAD *qp; local
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Den_tx.c37 #include <linux/mlx4/qp.h>
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
118 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
121 ring->qp.event = mlx4_en_sqp_event;
171 mlx4_qp_remove(mdev->dev, &ring->qp);
172 mlx4_qp_free(mdev->dev, &ring->qp);
198 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
207 &ring->qp, &ring->qp_state);
221 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
/drivers/net/ethernet/sun/
H A Dsunqe.c937 struct sunqe *qp = platform_get_drvdata(op); local
938 struct net_device *net_dev = qp->dev;
942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
945 qp->qe_block, qp->qblock_dvma);
947 qp->buffers, qp->buffers_dvma);

Completed in 539 milliseconds

1234567