Lines Matching refs:qp

42  * @qp: a pointer to the QP
46 int qib_make_uc_req(struct qib_qp *qp)
54 u32 pmtu = qp->pmtu;
57 spin_lock_irqsave(&qp->s_lock, flags);
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
63 if (qp->s_last == qp->s_head)
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= QIB_S_WAIT_DMA;
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
88 if (!(ib_qib_state_ops[qp->state] &
92 if (qp->s_cur == qp->s_head)
97 wqe->psn = qp->s_next_psn;
98 qp->s_psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
104 qp->s_len = len;
109 qp->s_state = OP(SEND_FIRST);
114 qp->s_state = OP(SEND_ONLY);
116 qp->s_state =
124 qp->s_wqe = wqe;
125 if (++qp->s_cur >= qp->s_size)
126 qp->s_cur = 0;
138 qp->s_state = OP(RDMA_WRITE_FIRST);
143 qp->s_state = OP(RDMA_WRITE_ONLY);
145 qp->s_state =
153 qp->s_wqe = wqe;
154 if (++qp->s_cur >= qp->s_size)
155 qp->s_cur = 0;
164 qp->s_state = OP(SEND_MIDDLE);
167 len = qp->s_len;
173 qp->s_state = OP(SEND_LAST);
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
182 qp->s_wqe = wqe;
183 if (++qp->s_cur >= qp->s_size)
184 qp->s_cur = 0;
188 qp->s_state = OP(RDMA_WRITE_MIDDLE);
191 len = qp->s_len;
197 qp->s_state = OP(RDMA_WRITE_LAST);
199 qp->s_state =
207 qp->s_wqe = wqe;
208 if (++qp->s_cur >= qp->s_size)
209 qp->s_cur = 0;
212 qp->s_len -= len;
213 qp->s_hdrwords = hwords;
214 qp->s_cur_sge = &qp->s_sge;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217 qp->s_next_psn++ & QIB_PSN_MASK);
223 qp->s_flags &= ~QIB_S_BUSY;
225 spin_unlock_irqrestore(&qp->s_lock, flags);
236 * @qp: the QP for this packet.
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
251 u32 pmtu = qp->pmtu;
265 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
272 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
277 qp->r_psn = psn;
279 if (qp->r_state == OP(SEND_FIRST) ||
280 qp->r_state == OP(SEND_MIDDLE)) {
281 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
282 qp->r_sge.num_sge = 0;
284 while (qp->r_sge.num_sge) {
285 atomic_dec(&qp->r_sge.sge.mr->refcount);
286 if (--qp->r_sge.num_sge)
287 qp->r_sge.sge = *qp->r_sge.sg_list++;
289 qp->r_state = OP(SEND_LAST);
307 switch (qp->r_state) {
335 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
336 qp->r_flags |= QIB_R_COMM_EST;
337 if (qp->ibqp.event_handler) {
340 ev.device = qp->ibqp.device;
341 ev.element.qp = &qp->ibqp;
343 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
353 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
354 qp->r_sge = qp->s_rdma_read_sge;
356 ret = qib_get_rwqe(qp, 0);
362 * qp->s_rdma_read_sge will be the owner
365 qp->s_rdma_read_sge = qp->r_sge;
367 qp->r_rcv_len = 0;
377 qp->r_rcv_len += pmtu;
378 if (unlikely(qp->r_rcv_len > qp->r_len))
380 qib_copy_sge(&qp->r_sge, data, pmtu, 0);
402 wc.byte_len = tlen + qp->r_rcv_len;
403 if (unlikely(wc.byte_len > qp->r_len))
407 qib_copy_sge(&qp->r_sge, data, tlen, 0);
408 while (qp->s_rdma_read_sge.num_sge) {
409 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
410 if (--qp->s_rdma_read_sge.num_sge)
411 qp->s_rdma_read_sge.sge =
412 *qp->s_rdma_read_sge.sg_list++;
414 wc.wr_id = qp->r_wr_id;
416 wc.qp = &qp->ibqp;
417 wc.src_qp = qp->remote_qpn;
418 wc.slid = qp->remote_ah_attr.dlid;
419 wc.sl = qp->remote_ah_attr.sl;
426 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
435 if (unlikely(!(qp->qp_access_flags &
441 qp->r_len = be32_to_cpu(reth->length);
442 qp->r_rcv_len = 0;
443 qp->r_sge.sg_list = NULL;
444 if (qp->r_len != 0) {
450 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
454 qp->r_sge.num_sge = 1;
456 qp->r_sge.num_sge = 0;
457 qp->r_sge.sge.mr = NULL;
458 qp->r_sge.sge.vaddr = NULL;
459 qp->r_sge.sge.length = 0;
460 qp->r_sge.sge.sge_length = 0;
473 qp->r_rcv_len += pmtu;
474 if (unlikely(qp->r_rcv_len > qp->r_len))
476 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
493 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
495 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
496 while (qp->s_rdma_read_sge.num_sge) {
497 atomic_dec(&qp->s_rdma_read_sge.sge.mr->
499 if (--qp->s_rdma_read_sge.num_sge)
500 qp->s_rdma_read_sge.sge =
501 *qp->s_rdma_read_sge.sg_list++;
504 ret = qib_get_rwqe(qp, 1);
510 wc.byte_len = qp->r_len;
524 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
526 qib_copy_sge(&qp->r_sge, data, tlen, 1);
527 while (qp->r_sge.num_sge) {
528 atomic_dec(&qp->r_sge.sge.mr->refcount);
529 if (--qp->r_sge.num_sge)
530 qp->r_sge.sge = *qp->r_sge.sg_list++;
538 qp->r_psn++;
539 qp->r_state = opcode;
543 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
544 qp->r_sge.num_sge = 0;
550 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);