Searched refs:attr_mask (Results 1 - 25 of 35) sorted by relevance

12

/drivers/infiniband/hw/ipath/
H A Dipath_qp.c445 * @attr_mask: the mask of attributes to modify
451 int attr_mask, struct ib_udata *udata)
461 cur_state = attr_mask & IB_QP_CUR_STATE ?
463 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
466 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
469 if (attr_mask & IB_QP_AV) {
479 if (attr_mask & IB_QP_PKEY_INDEX)
483 if (attr_mask & IB_QP_MIN_RNR_TIMER)
487 if (attr_mask & IB_QP_PORT)
496 if ((attr_mask
450 ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
621 ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) argument
[all...]
H A Dipath_srq.c212 * @attr_mask: indicates which attributes to modify
216 enum ib_srq_attr_mask attr_mask,
223 if (attr_mask & IB_SRQ_MAX_WR) {
230 ((attr_mask & IB_SRQ_LIMIT) ?
303 if (attr_mask & IB_SRQ_LIMIT)
333 } else if (attr_mask & IB_SRQ_LIMIT) {
215 ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
/drivers/infiniband/hw/qib/
H A Dqib_qp.c563 * @attr_mask: the mask of attributes to modify
569 int attr_mask, struct ib_udata *udata)
583 cur_state = attr_mask & IB_QP_CUR_STATE ?
585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
588 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
591 if (attr_mask & IB_QP_AV) {
598 if (attr_mask & IB_QP_ALT_PATH) {
607 if (attr_mask & IB_QP_PKEY_INDEX)
611 if (attr_mask & IB_QP_MIN_RNR_TIMER)
615 if (attr_mask
568 qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
854 qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) argument
[all...]
H A Dqib_srq.c208 * @attr_mask: indicates which attributes to modify
212 enum ib_srq_attr_mask attr_mask,
219 if (attr_mask & IB_SRQ_MAX_WR) {
226 ((attr_mask & IB_SRQ_LIMIT) ?
299 if (attr_mask & IB_SRQ_LIMIT)
333 } else if (attr_mask & IB_SRQ_LIMIT) {
211 qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c82 int attr_mask; local
91 attr_mask =
96 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
104 attr_mask &= ~IB_QP_PORT;
105 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
113 attr_mask |= IB_QP_SQ_PSN;
114 attr_mask &= ~IB_QP_PKEY_INDEX;
115 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.h68 int attr_mask);
70 int attr_mask, struct ib_udata *udata);
H A Docrdma_hw.h115 struct ib_qp_attr *attrs, int attr_mask);
H A Docrdma_hw.c2277 int attr_mask)
2317 if (attr_mask & IB_QP_VID) {
2330 struct ib_qp_attr *attrs, int attr_mask)
2334 if (attr_mask & IB_QP_PKEY_INDEX) {
2339 if (attr_mask & IB_QP_QKEY) {
2344 if (attr_mask & IB_QP_AV) {
2345 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2363 if (attr_mask & IB_QP_DEST_QPN) {
2368 if (attr_mask
2274 ocrdma_set_av_params(struct ocrdma_qp *qp, struct ocrdma_modify_qp *cmd, struct ib_qp_attr *attrs, int attr_mask) argument
2328 ocrdma_set_qp_params(struct ocrdma_qp *qp, struct ocrdma_modify_qp *cmd, struct ib_qp_attr *attrs, int attr_mask) argument
2434 ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, struct ib_qp_attr *attrs, int attr_mask) argument
[all...]
H A Docrdma_main.c563 int attr_mask = IB_QP_STATE; local
573 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c299 int attr_mask)
301 if (attr_mask & IB_QP_PKEY_INDEX)
303 if (attr_mask & IB_QP_QKEY)
305 if (attr_mask & IB_QP_SQ_PSN)
328 int attr_mask)
334 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
339 if (attr_mask & IB_QP_ACCESS_FLAGS)
544 const struct ib_qp_attr *attr, int attr_mask,
567 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
588 else if (attr_mask
298 store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) argument
327 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
543 __mthca_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) argument
796 store_attrs(to_msqp(qp), attr, attr_mask); local
842 mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
[all...]
H A Dmthca_srq.c365 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
372 if (attr_mask & IB_SRQ_MAX_WR)
375 if (attr_mask & IB_SRQ_LIMIT) {
364 mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
H A Dmthca_dev.h516 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
531 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
/drivers/infiniband/hw/ehca/
H A Dehca_qp.c1239 int attr_mask, int smi_reset2init)
1293 (attr_mask & IB_QP_STATE) &&
1315 if ((attr_mask & IB_QP_CUR_STATE) &&
1327 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1336 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1337 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1428 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1434 if (attr_mask
1237 internal_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, int smi_reset2init) argument
1778 ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
2032 ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
[all...]
H A Dehca_iverbs.h149 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
170 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
/drivers/infiniband/hw/mlx4/
H A Dqp.c1197 int attr_mask)
1203 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1208 if (attr_mask & IB_QP_ACCESS_FLAGS)
1227 int attr_mask)
1229 if (attr_mask & IB_QP_PKEY_INDEX)
1231 if (attr_mask & IB_QP_QKEY)
1233 if (attr_mask & IB_QP_SQ_PSN)
1416 const struct ib_qp_attr *attr, int attr_mask,
1430 if (attr_mask & IB_QP_ALT_PATH &&
1442 if (!(attr_mask
1196 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1226 store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) argument
1415 __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) argument
1750 store_sqp_attrs(to_msqp(qp), attr, attr_mask); local
1883 mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
[all...]
H A Dsrq.c229 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
236 if (attr_mask & IB_SRQ_MAX_WR)
239 if (attr_mask & IB_SRQ_LIMIT) {
228 mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
H A Dmlx4_ib.h668 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
680 int attr_mask, struct ib_udata *udata);
/drivers/infiniband/hw/usnic/
H A Dusnic_ib_verbs.h46 int attr_mask, struct ib_udata *udata);
H A Dusnic_ib_verbs.c547 int attr_mask, struct ib_udata *udata)
557 if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
559 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
561 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
565 attr_mask & IB_QP_STATE, attr->qp_state);
546 usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
/drivers/infiniband/hw/mlx5/
H A Dqp.c1248 int attr_mask)
1254 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1259 if (attr_mask & IB_QP_ACCESS_FLAGS)
1303 struct mlx5_qp_path *path, u8 port, int attr_mask,
1313 if (attr_mask & IB_QP_PKEY_INDEX)
1340 if (attr_mask & IB_QP_TIMEOUT)
1492 const struct ib_qp_attr *attr, int attr_mask,
1520 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1541 } else if (attr_mask & IB_QP_PATH_MTU) {
1551 if (attr_mask
1247 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1302 mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr) argument
1491 __mlx5_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) argument
1688 mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
[all...]
H A Dsrq.c347 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
354 if (attr_mask & IB_SRQ_MAX_WR)
357 if (attr_mask & IB_SRQ_LIMIT) {
346 mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) argument
H A Dmlx5_ib.h475 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
484 int attr_mask, struct ib_udata *udata);
/drivers/infiniband/hw/cxgb4/
H A Dqp.c1814 int attr_mask, struct ib_udata *udata)
1824 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1825 attr_mask &= ~IB_QP_STATE;
1828 if (!attr_mask)
1843 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1844 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1856 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1857 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1872 int attr_mask, struct ib_qp_init_attr *init_attr)
1813 c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
1871 c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) argument
/drivers/infiniband/hw/cxgb3/
H A Diwch_provider.c1047 int attr_mask, struct ib_udata *udata)
1057 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1058 attr_mask &= ~IB_QP_STATE;
1061 if (!attr_mask)
1076 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1077 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1046 iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
/drivers/infiniband/hw/amso1100/
H A Dc2_qp.c135 struct ib_qp_attr *attr, int attr_mask)
163 if (attr_mask & IB_QP_STATE) {
185 } else if (attr_mask & IB_QP_CUR_STATE) {
134 c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, struct ib_qp_attr *attr, int attr_mask) argument

Completed in 278 milliseconds

12