/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | mcg.c | 42 __be32 qpn; member in struct:mlx5_attach_mcg_mbox_in 54 __be32 qpn; member in struct:mlx5_detach_mcg_mbox_in 64 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) argument 74 in.qpn = cpu_to_be32(qpn); 86 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) argument 96 in.qpn = cpu_to_be32(qpn);
|
H A D | qp.c | 117 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; 118 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); 122 err = radix_tree_insert(&table->tree, qp->qpn, qp); 132 qp->qpn); 145 din.qpn = cpu_to_be32(qp->qpn); 164 radix_tree_delete(&table->tree, qp->qpn); 173 in.qpn [all...] |
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | qp.c | 45 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) argument 52 qp = __mlx4_qp_lookup(dev, qpn); 59 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); 75 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; 77 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && 78 qp->qpn <= dev->phys_caps.base_sqpn + 1; 141 ret = mlx4_cmd(dev, 0, qp->qpn, 2, 146 port = (qp->qpn & 1) + 1; 170 cpu_to_be32(qp->qpn); 275 __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) argument 319 mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) argument 332 __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) argument 344 mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) argument 358 mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) argument 393 mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) argument [all...] |
H A D | mcg.c | 126 u32 qpn) 137 if (pqp->qpn == qpn) 150 unsigned int index, u32 qpn) 174 /* If the given qpn is also a promisc qp, 177 pqp = get_promisc_qp(dev, port, steer, qpn); 184 dqp->qpn = qpn; 209 /* don't add already existing qpn */ 210 if (pqp->qpn 124 get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) argument 148 new_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument 240 existing_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument 289 check_duplicate_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument 356 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; local 419 add_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) argument 549 remove_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) argument 1027 mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, int port, int qpn, u16 prio, u64 *reg_id) argument 1315 int qpn; local 1435 mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) argument 1528 u32 qpn = (u32) vhcr->in_param & 0xffffffff; local 1545 mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, enum mlx4_steer_type steer, u8 add, u8 port) argument 1553 mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) argument 1562 mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) argument 1571 mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) argument 1580 mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) argument [all...] |
H A D | en_resources.c | 41 int is_tx, int rss, int qpn, int cqn, 58 context->local_qpn = cpu_to_be32(qpn); 74 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); 40 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) argument
|
H A D | resource_tracker.c | 221 int qpn; member in struct:res_fs_rule 700 u8 slave, u32 qpn) 717 if (mlx4_is_qp_reserved(dev, qpn)) 731 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); 974 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) argument 984 ret->qpn = qpn; 1257 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, argument 1267 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); 1527 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) argument 698 update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, u8 slave, u32 qpn) argument 1533 fw_reserved(struct mlx4_dev *dev, int qpn) argument 1545 int qpn; local 2112 int qpn; local 2682 u32 qpn = vhcr->in_modifier & 0xffffff; local 2699 int qpn = vhcr->in_modifier & 0x7fffff; local 2905 u32 qpn; local 3441 int qpn = vhcr->in_modifier & 0x7fffff; local 3530 int qpn = vhcr->in_modifier & 0x7fffff; local 3695 int qpn = vhcr->in_modifier & 0x7fffff; local 3851 int qpn; local 3992 u32 qpn = vhcr->in_modifier & 0xffffff; local 4053 int qpn; local 4273 int qpn; local [all...] |
H A D | en_netdev.c | 189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 474 int qpn, u64 *reg_id) 481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 493 unsigned char *mac, int *qpn, u64 *reg_id) 504 qp.qpn = *qpn; 524 rule.qpn = *qpn; 545 unsigned char *mac, int qpn, u6 473 mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, int qpn, u64 *reg_id) argument 492 mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, unsigned char *mac, int *qpn, u64 *reg_id) argument 544 mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, unsigned char *mac, int qpn, u64 reg_id) argument 579 int *qpn = &priv->base_qpn; local 646 int qpn = priv->base_qpn; local 687 mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, unsigned char *new_mac, unsigned char *prev_mac) argument [all...] |
H A D | en_rx.c | 981 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, argument 994 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); 996 en_err(priv, "Failed to allocate qp #%x\n", qpn); 1003 qpn, ring->cqn, -1, context); 1027 u32 qpn; local 1029 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); 1031 en_err(priv, "Failed reserving drop qpn\n"); 1034 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); 1037 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 1046 u32 qpn; local 1065 int i, qpn; local [all...] |
H A D | en_tx.c | 49 struct mlx4_en_tx_ring **pring, int qpn, u32 size, 115 ring->qpn = qpn; 116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); 118 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 167 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 198 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); 201 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 48 mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, int qpn, u32 size, u16 stride, int node, int queue_index) argument
|
H A D | en_ethtool.c | 601 ring_index[n] = rss_map->qps[n % rss_rings].qpn - 930 u32 qpn; local 946 qpn = priv->drop_qp.qpn; 948 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 955 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 956 if (!qpn) { 962 rule.qpn = qpn; [all...] |
/drivers/infiniband/core/ |
H A D | agent.h | 49 int port_num, int qpn);
|
H A D | cm_msgs.h | 115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) argument 117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) argument 525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) argument 645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) argument 694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 829 __be32 qpn) 831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 828 cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, __be32 qpn) argument [all...] |
H A D | agent.c | 83 int port_num, int qpn) 101 agent = port_priv->agent[qpn]; 81 agent_send_response(struct ib_mad *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num, int qpn) argument
|
H A D | user_mad.c | 231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 522 be32_to_cpu(packet->mad.hdr.qpn), 640 if (ureq.qpn != 0 && ureq.qpn != 1) { 643 ureq.qpn); 678 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 742 if (ureq.qpn != 0 && ureq.qpn != 1) { 745 ureq.qpn); 794 ureq.qpn [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_qp.c | 126 u32 i, offset, max_scan, qpn; local 144 qpn = qpt->last + 2; 145 if (qpn >= QPN_MAX) 146 qpn = 2; 147 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) 148 qpn = (qpn | qpt->mask) + 2; 149 offset = qpn & BITS_PER_PAGE_MASK; 150 map = &qpt->map[qpn / BITS_PER_PAGE]; 160 qpt->last = qpn; 202 free_qpn(struct qib_qpn_table *qpt, u32 qpn) argument 211 qpn_hash(struct qib_ibdev *dev, u32 qpn) argument 338 qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) argument [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 197 return qp->qpn >= dev->qp_table.sqp_start && 198 qp->qpn <= dev->qp_table.sqp_start + 3; 203 return qp->qpn >= dev->qp_table.sqp_start && 204 qp->qpn <= dev->qp_table.sqp_start + 1; 237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, argument 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 251 event_type, qpn); 448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); 614 qp_context->local_qpn = cpu_to_be32(qp->qpn); 755 ((qp->qpn 1343 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_sqp *sqp) argument [all...] |
H A D | mthca_mad.c | 165 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; local 167 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
|
H A D | mthca_eq.c | 143 __be32 qpn; member in struct:mthca_eqe::__anon1540::__anon1543 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
|
H A D | mthca_provider.h | 264 u32 qpn; member in struct:mthca_qp
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_qp.c | 107 u32 i, offset, max_scan, qpn; local 132 qpn = qpt->last + 1; 133 if (qpn >= QPN_MAX) 134 qpn = 2; 135 offset = qpn & BITS_PER_PAGE_MASK; 136 map = &qpt->map[qpn / BITS_PER_PAGE]; 148 qpt->last = qpn; 149 ret = qpn; 153 qpn = mk_qpn(qpt, map, offset); 162 } while (offset < BITS_PER_PAGE && qpn < QPN_MA 190 free_qpn(struct ipath_qp_table *qpt, u32 qpn) argument 301 ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) argument [all...] |
/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 135 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && 136 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + 147 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && 148 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); 154 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || 155 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { 172 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && 173 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); 179 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { 328 "on QP %06x\n", type, qp->qpn); 611 qp0_enabled_vf(struct mlx4_dev *dev, int qpn) argument 626 int qpn; local 1968 vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) argument [all...] |
H A D | mlx4_ib.h | 657 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 658 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 788 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn); 789 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
|
/drivers/infiniband/hw/amso1100/ |
H A D | c2_provider.h | 113 int qpn; member in struct:c2_qp
|
H A D | c2_qp.c | 390 qp->qpn = ret; 397 static void c2_free_qpn(struct c2_dev *c2dev, int qpn) argument 400 idr_remove(&c2dev->qp_table.idr, qpn); 404 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) argument 410 qp = idr_find(&c2dev->qp_table.idr, qpn); 432 qp->ibqp.qp_num = qp->qpn; 570 c2_free_qpn(c2dev, qp->qpn); 613 c2_free_qpn(c2dev, qp->qpn);
|
H A D | c2_cm.c | 54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); 301 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
|