Lines Matching refs:qp

120 void c2_set_qp_state(struct c2_qp *qp, int c2_state)
124 pr_debug("%s: qp[%p] state modify %s --> %s\n",
126 qp,
127 to_ib_state_str(qp->state),
129 qp->state = new_state;
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
144 pr_debug("%s:%d qp=%p, %s --> %s\n",
146 qp,
147 to_ib_state_str(qp->state),
157 wr.qp_handle = qp->adapter_handle;
173 spin_lock_irqsave(&qp->lock, flags);
174 if (qp->cm_id && qp->state == IB_QPS_RTS) {
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
178 vq_req->cm_id = qp->cm_id;
181 spin_unlock_irqrestore(&qp->lock, flags);
225 qp->state = next_state;
235 spin_lock_irqsave(&qp->lock, flags);
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
237 qp->cm_id->rem_ref(qp->cm_id);
238 qp->cm_id = NULL;
240 spin_unlock_irqrestore(&qp->lock, flags);
246 pr_debug("%s:%d qp=%p, cur_state=%s\n",
248 qp,
249 to_ib_state_str(qp->state));
253 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
268 wr.qp_handle = qp->adapter_handle;
302 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
324 wr.qp_handle = qp->adapter_handle;
331 spin_lock_irqsave(&qp->lock, flags);
332 if (qp->cm_id && qp->state == IB_QPS_RTS) {
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
336 vq_req->qp = qp;
337 vq_req->cm_id = qp->cm_id;
340 spin_unlock_irqrestore(&qp->lock, flags);
368 spin_lock_irqsave(&qp->lock, flags);
369 if (qp->cm_id) {
370 qp->cm_id->rem_ref(qp->cm_id);
371 qp->cm_id = NULL;
373 spin_unlock_irqrestore(&qp->lock, flags);
381 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
390 qp->qpn = ret;
407 struct c2_qp *qp;
410 qp = idr_find(&c2dev->qp_table.idr, qpn);
412 return qp;
417 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
429 err = c2_alloc_qpn(c2dev, qp);
432 qp->ibqp.qp_num = qp->qpn;
433 qp->ibqp.qp_type = IB_QPT_RC;
436 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
437 &qp->sq_mq.shared_dma, GFP_KERNEL);
438 if (!qp->sq_mq.shared) {
443 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
444 &qp->rq_mq.shared_dma, GFP_KERNEL);
445 if (!qp->rq_mq.shared) {
472 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
473 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
477 wr.user_context = (unsigned long) qp;
506 atomic_set(&qp->refcount, 1);
507 qp->adapter_handle = reply->qp_handle;
508 qp->state = IB_QPS_RESET;
509 qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
510 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
511 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
512 init_waitqueue_head(&qp->wait);
525 c2_mq_req_init(&qp->sq_mq,
544 c2_mq_req_init(&qp->rq_mq,
558 iounmap(qp->sq_mq.peer);
560 destroy_qp(c2dev, qp);
566 c2_free_mqsp(qp->rq_mq.shared);
568 c2_free_mqsp(qp->sq_mq.shared);
570 c2_free_qpn(c2dev, qp->qpn);
600 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
605 send_cq = to_c2cq(qp->ibqp.send_cq);
606 recv_cq = to_c2cq(qp->ibqp.recv_cq);
613 c2_free_qpn(c2dev, qp->qpn);
617 * Destroy qp in the rnic...
619 destroy_qp(c2dev, qp);
624 c2_cq_clean(c2dev, qp, send_cq->cqn);
626 c2_cq_clean(c2dev, qp, recv_cq->cqn);
631 iounmap(qp->sq_mq.peer);
632 iounmap(qp->rq_mq.peer);
633 c2_free_mqsp(qp->sq_mq.shared);
634 c2_free_mqsp(qp->rq_mq.shared);
636 atomic_dec(&qp->refcount);
637 wait_event(qp->wait, !atomic_read(&qp->refcount));
754 * qp - ptr to user qp
762 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
794 struct c2_qp *qp = to_c2qp(ibqp);
804 if (qp->state > IB_QPS_RTS) {
837 if (ib_wr->num_sge > qp->send_sgl_depth) {
855 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
922 spin_lock_irqsave(&qp->lock, lock_flags);
923 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
925 spin_unlock_irqrestore(&qp->lock, lock_flags);
932 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
933 spin_unlock_irqrestore(&qp->lock, lock_flags);
948 struct c2_qp *qp = to_c2qp(ibqp);
953 if (qp->state > IB_QPS_RTS) {
965 if (ib_wr->num_sge > qp->recv_sgl_depth) {
993 spin_lock_irqsave(&qp->lock, lock_flags);
994 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
996 spin_unlock_irqrestore(&qp->lock, lock_flags);
1003 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
1004 spin_unlock_irqrestore(&qp->lock, lock_flags);