Lines Matching refs:qp

203  * @qp: the QP
209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
218 qp->ibqp.qp_num = ret;
224 qp->next = qpt->table[ret];
225 qpt->table[ret] = qp;
226 atomic_inc(&qp->refcount);
238 * @qp: the QP to remove
243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
253 if (q == qp) {
254 *qpp = qp->next;
255 qp->next = NULL;
256 atomic_dec(&qp->refcount);
274 struct ipath_qp *qp;
279 qp = qpt->table[n];
282 for (; qp; qp = qp->next)
304 struct ipath_qp *qp;
308 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
309 if (qp->ibqp.qp_num == qpn) {
310 atomic_inc(&qp->refcount);
316 return qp;
321 * @qp: the QP to reset
324 static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
326 qp->remote_qpn = 0;
327 qp->qkey = 0;
328 qp->qp_access_flags = 0;
329 atomic_set(&qp->s_dma_busy, 0);
330 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
331 qp->s_hdrwords = 0;
332 qp->s_wqe = NULL;
333 qp->s_pkt_delay = 0;
334 qp->s_draining = 0;
335 qp->s_psn = 0;
336 qp->r_psn = 0;
337 qp->r_msn = 0;
339 qp->s_state = IB_OPCODE_RC_SEND_LAST;
340 qp->r_state = IB_OPCODE_RC_SEND_LAST;
342 qp->s_state = IB_OPCODE_UC_SEND_LAST;
343 qp->r_state = IB_OPCODE_UC_SEND_LAST;
345 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
346 qp->r_nak_state = 0;
347 qp->r_aflags = 0;
348 qp->r_flags = 0;
349 qp->s_rnr_timeout = 0;
350 qp->s_head = 0;
351 qp->s_tail = 0;
352 qp->s_cur = 0;
353 qp->s_last = 0;
354 qp->s_ssn = 1;
355 qp->s_lsn = 0;
356 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
357 qp->r_head_ack_queue = 0;
358 qp->s_tail_ack_queue = 0;
359 qp->s_num_rd_atomic = 0;
360 if (qp->r_rq.wq) {
361 qp->r_rq.wq->head = 0;
362 qp->r_rq.wq->tail = 0;
368 * @qp: the QP to put into the error state
377 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
379 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
383 if (qp->state == IB_QPS_ERR)
386 qp->state = IB_QPS_ERR;
389 if (!list_empty(&qp->timerwait))
390 list_del_init(&qp->timerwait);
391 if (!list_empty(&qp->piowait))
392 list_del_init(&qp->piowait);
396 if (qp->s_last != qp->s_head)
397 ipath_schedule_send(qp);
400 wc.qp = &qp->ibqp;
403 if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
404 wc.wr_id = qp->r_wr_id;
406 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
410 if (qp->r_rq.wq) {
415 spin_lock(&qp->r_rq.lock);
418 wq = qp->r_rq.wq;
420 if (head >= qp->r_rq.size)
423 if (tail >= qp->r_rq.size)
426 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
427 if (++tail >= qp->r_rq.size)
429 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
433 spin_unlock(&qp->r_rq.lock);
434 } else if (qp->ibqp.event_handler)
454 struct ipath_qp *qp = to_iqp(ibqp);
459 spin_lock_irq(&qp->s_lock);
462 attr->cur_qp_state : qp->state;
512 if (qp->state != IB_QPS_RESET) {
513 qp->state = IB_QPS_RESET;
515 if (!list_empty(&qp->timerwait))
516 list_del_init(&qp->timerwait);
517 if (!list_empty(&qp->piowait))
518 list_del_init(&qp->piowait);
520 qp->s_flags &= ~IPATH_S_ANY_WAIT;
521 spin_unlock_irq(&qp->s_lock);
523 tasklet_kill(&qp->s_task);
524 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
525 spin_lock_irq(&qp->s_lock);
527 ipath_reset_qp(qp, ibqp->qp_type);
531 qp->s_draining = qp->s_last != qp->s_cur;
532 qp->state = new_state;
536 if (qp->ibqp.qp_type == IB_QPT_RC)
538 qp->state = new_state;
542 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
546 qp->state = new_state;
551 qp->s_pkey_index = attr->pkey_index;
554 qp->remote_qpn = attr->dest_qp_num;
557 qp->s_psn = qp->s_next_psn = attr->sq_psn;
558 qp->s_last_psn = qp->s_next_psn - 1;
562 qp->r_psn = attr->rq_psn;
565 qp->qp_access_flags = attr->qp_access_flags;
568 qp->remote_ah_attr = attr->ah_attr;
569 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
573 qp->path_mtu = attr->path_mtu;
576 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
579 qp->s_rnr_retry = attr->rnr_retry;
580 if (qp->s_rnr_retry > 7)
581 qp->s_rnr_retry = 7;
582 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
586 qp->r_min_rnr_timer = attr->min_rnr_timer;
589 qp->timeout = attr->timeout;
592 qp->qkey = attr->qkey;
595 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
598 qp->s_max_rd_atomic = attr->max_rd_atomic;
600 spin_unlock_irq(&qp->s_lock);
605 ev.device = qp->ibqp.device;
606 ev.element.qp = &qp->ibqp;
608 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
614 spin_unlock_irq(&qp->s_lock);
624 struct ipath_qp *qp = to_iqp(ibqp);
626 attr->qp_state = qp->state;
628 attr->path_mtu = qp->path_mtu;
630 attr->qkey = qp->qkey;
631 attr->rq_psn = qp->r_psn;
632 attr->sq_psn = qp->s_next_psn;
633 attr->dest_qp_num = qp->remote_qpn;
634 attr->qp_access_flags = qp->qp_access_flags;
635 attr->cap.max_send_wr = qp->s_size - 1;
636 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
637 attr->cap.max_send_sge = qp->s_max_sge;
638 attr->cap.max_recv_sge = qp->r_rq.max_sge;
640 attr->ah_attr = qp->remote_ah_attr;
642 attr->pkey_index = qp->s_pkey_index;
645 attr->sq_draining = qp->s_draining;
646 attr->max_rd_atomic = qp->s_max_rd_atomic;
647 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
648 attr->min_rnr_timer = qp->r_min_rnr_timer;
650 attr->timeout = qp->timeout;
651 attr->retry_cnt = qp->s_retry_cnt;
652 attr->rnr_retry = qp->s_rnr_retry_cnt;
656 init_attr->event_handler = qp->ibqp.event_handler;
657 init_attr->qp_context = qp->ibqp.qp_context;
658 init_attr->send_cq = qp->ibqp.send_cq;
659 init_attr->recv_cq = qp->ibqp.recv_cq;
660 init_attr->srq = qp->ibqp.srq;
662 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
666 init_attr->qp_type = qp->ibqp.qp_type;
673 * @qp: the queue pair to compute the AETH for
677 __be32 ipath_compute_aeth(struct ipath_qp *qp)
679 u32 aeth = qp->r_msn & IPATH_MSN_MASK;
681 if (qp->ibqp.srq) {
690 struct ipath_rwq *wq = qp->r_rq.wq;
696 if (head >= qp->r_rq.size)
699 if (tail >= qp->r_rq.size)
708 credits += qp->r_rq.size;
745 struct ipath_qp *qp;
794 sz = sizeof(*qp);
800 sg_list_sz = sizeof(*qp->r_sg_list) *
803 sg_list_sz = sizeof(*qp->r_sg_list) *
805 qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
806 if (!qp) {
813 qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
814 if (!qp->r_ud_sg_list) {
819 qp->r_ud_sg_list = NULL;
822 qp->r_rq.size = 0;
823 qp->r_rq.max_sge = 0;
824 qp->r_rq.wq = NULL;
828 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
829 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
830 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
832 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
833 qp->r_rq.size * sz);
834 if (!qp->r_rq.wq) {
841 * ib_create_qp() will initialize qp->ibqp
842 * except for qp->ibqp.qp_num.
844 spin_lock_init(&qp->s_lock);
845 spin_lock_init(&qp->r_rq.lock);
846 atomic_set(&qp->refcount, 0);
847 init_waitqueue_head(&qp->wait);
848 init_waitqueue_head(&qp->wait_dma);
849 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
850 INIT_LIST_HEAD(&qp->piowait);
851 INIT_LIST_HEAD(&qp->timerwait);
852 qp->state = IB_QPS_RESET;
853 qp->s_wq = swq;
854 qp->s_size = init_attr->cap.max_send_wr + 1;
855 qp->s_max_sge = init_attr->cap.max_send_sge;
857 qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
859 qp->s_flags = 0;
861 err = ipath_alloc_qpn(&dev->qp_table, qp,
865 vfree(qp->r_rq.wq);
868 qp->ip = NULL;
869 qp->s_tx = NULL;
870 ipath_reset_qp(qp, init_attr->qp_type);
886 if (!qp->r_rq.wq) {
897 qp->r_rq.size * sz;
899 qp->ip =
902 qp->r_rq.wq);
903 if (!qp->ip) {
908 err = ib_copy_to_udata(udata, &(qp->ip->offset),
909 sizeof(qp->ip->offset));
927 if (qp->ip) {
929 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
933 ret = &qp->ibqp;
937 if (qp->ip)
938 kref_put(&qp->ip->ref, ipath_release_mmap_info);
940 vfree(qp->r_rq.wq);
941 ipath_free_qp(&dev->qp_table, qp);
942 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
944 kfree(qp->r_ud_sg_list);
946 kfree(qp);
964 struct ipath_qp *qp = to_iqp(ibqp);
968 spin_lock_irq(&qp->s_lock);
969 if (qp->state != IB_QPS_RESET) {
970 qp->state = IB_QPS_RESET;
972 if (!list_empty(&qp->timerwait))
973 list_del_init(&qp->timerwait);
974 if (!list_empty(&qp->piowait))
975 list_del_init(&qp->piowait);
977 qp->s_flags &= ~IPATH_S_ANY_WAIT;
978 spin_unlock_irq(&qp->s_lock);
980 tasklet_kill(&qp->s_task);
981 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
983 spin_unlock_irq(&qp->s_lock);
985 ipath_free_qp(&dev->qp_table, qp);
987 if (qp->s_tx) {
988 atomic_dec(&qp->refcount);
989 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
990 kfree(qp->s_tx->txreq.map_addr);
992 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
994 qp->s_tx = NULL;
997 wait_event(qp->wait, !atomic_read(&qp->refcount));
1000 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
1005 if (qp->ip)
1006 kref_put(&qp->ip->ref, ipath_release_mmap_info);
1008 vfree(qp->r_rq.wq);
1009 kfree(qp->r_ud_sg_list);
1010 vfree(qp->s_wq);
1011 kfree(qp);
1050 * @qp: the qp who's send work queue to flush
1055 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1065 qp->s_lsn = (u32) -1;
1066 else if (qp->s_lsn != (u32) -1) {
1069 if (ipath_cmp24(credit, qp->s_lsn) > 0)
1070 qp->s_lsn = credit;
1074 if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
1075 qp->s_cur != qp->s_head &&
1076 (qp->s_lsn == (u32) -1 ||
1077 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1078 qp->s_lsn + 1) <= 0))
1079 ipath_schedule_send(qp);