ipath_ruc.c revision e28c00ad67164dba688c1d19c208c5fb554465f2
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ipath_verbs.h"
34
35/*
36 * Convert the AETH RNR timeout code into the number of milliseconds.
37 */
38const u32 ib_ipath_rnr_table[32] = {
39	656,			/* 0 */
40	1,			/* 1 */
41	1,			/* 2 */
42	1,			/* 3 */
43	1,			/* 4 */
44	1,			/* 5 */
45	1,			/* 6 */
46	1,			/* 7 */
47	1,			/* 8 */
48	1,			/* 9 */
49	1,			/* A */
50	1,			/* B */
51	1,			/* C */
52	1,			/* D */
53	2,			/* E */
54	2,			/* F */
55	3,			/* 10 */
56	4,			/* 11 */
57	6,			/* 12 */
58	8,			/* 13 */
59	11,			/* 14 */
60	16,			/* 15 */
61	21,			/* 16 */
62	31,			/* 17 */
63	41,			/* 18 */
64	62,			/* 19 */
65	82,			/* 1A */
66	123,			/* 1B */
67	164,			/* 1C */
68	246,			/* 1D */
69	328,			/* 1E */
70	492			/* 1F */
71};
72
73/**
74 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
75 * @qp: the QP
76 *
77 * XXX Use a simple list for now.  We might need a priority
78 * queue if we have lots of QPs waiting for RNR timeouts
79 * but that should be rare.
80 */
81void ipath_insert_rnr_queue(struct ipath_qp *qp)
82{
83	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
84	unsigned long flags;
85
86	spin_lock_irqsave(&dev->pending_lock, flags);
87	if (list_empty(&dev->rnrwait))
88		list_add(&qp->timerwait, &dev->rnrwait);
89	else {
90		struct list_head *l = &dev->rnrwait;
91		struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
92						  timerwait);
93
94		while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
95			qp->s_rnr_timeout -= nqp->s_rnr_timeout;
96			l = l->next;
97			if (l->next == &dev->rnrwait)
98				break;
99			nqp = list_entry(l->next, struct ipath_qp,
100					 timerwait);
101		}
102		list_add(&qp->timerwait, l);
103	}
104	spin_unlock_irqrestore(&dev->pending_lock, flags);
105}
106
107/**
108 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
109 * @qp: the QP
110 * @wr_id_only: update wr_id only, not SGEs
111 *
112 * Return 0 if no RWQE is available, otherwise return 1.
113 *
114 * Called at interrupt level with the QP r_rq.lock held.
115 */
116int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
117{
118	struct ipath_rq *rq;
119	struct ipath_srq *srq;
120	struct ipath_rwqe *wqe;
121	int ret;
122
123	if (!qp->ibqp.srq) {
124		rq = &qp->r_rq;
125		if (unlikely(rq->tail == rq->head)) {
126			ret = 0;
127			goto bail;
128		}
129		wqe = get_rwqe_ptr(rq, rq->tail);
130		qp->r_wr_id = wqe->wr_id;
131		if (!wr_id_only) {
132			qp->r_sge.sge = wqe->sg_list[0];
133			qp->r_sge.sg_list = wqe->sg_list + 1;
134			qp->r_sge.num_sge = wqe->num_sge;
135			qp->r_len = wqe->length;
136		}
137		if (++rq->tail >= rq->size)
138			rq->tail = 0;
139		ret = 1;
140		goto bail;
141	}
142
143	srq = to_isrq(qp->ibqp.srq);
144	rq = &srq->rq;
145	spin_lock(&rq->lock);
146	if (unlikely(rq->tail == rq->head)) {
147		spin_unlock(&rq->lock);
148		ret = 0;
149		goto bail;
150	}
151	wqe = get_rwqe_ptr(rq, rq->tail);
152	qp->r_wr_id = wqe->wr_id;
153	if (!wr_id_only) {
154		qp->r_sge.sge = wqe->sg_list[0];
155		qp->r_sge.sg_list = wqe->sg_list + 1;
156		qp->r_sge.num_sge = wqe->num_sge;
157		qp->r_len = wqe->length;
158	}
159	if (++rq->tail >= rq->size)
160		rq->tail = 0;
161	if (srq->ibsrq.event_handler) {
162		struct ib_event ev;
163		u32 n;
164
165		if (rq->head < rq->tail)
166			n = rq->size + rq->head - rq->tail;
167		else
168			n = rq->head - rq->tail;
169		if (n < srq->limit) {
170			srq->limit = 0;
171			spin_unlock(&rq->lock);
172			ev.device = qp->ibqp.device;
173			ev.element.srq = qp->ibqp.srq;
174			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
175			srq->ibsrq.event_handler(&ev,
176						 srq->ibsrq.srq_context);
177		} else
178			spin_unlock(&rq->lock);
179	} else
180		spin_unlock(&rq->lock);
181	ret = 1;
182
183bail:
184	return ret;
185}
186
187/**
188 * ipath_ruc_loopback - handle UC and RC lookback requests
189 * @sqp: the loopback QP
190 * @wc: the work completion entry
191 *
192 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
193 * forward a WQE addressed to the same HCA.
194 * Note that although we are single threaded due to the tasklet, we still
195 * have to protect against post_send().  We don't have to worry about
196 * receive interrupts since this is a connected protocol and all packets
197 * will pass through here.
198 */
199void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
200{
201	struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
202	struct ipath_qp *qp;
203	struct ipath_swqe *wqe;
204	struct ipath_sge *sge;
205	unsigned long flags;
206	u64 sdata;
207
208	qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
209	if (!qp) {
210		dev->n_pkt_drops++;
211		return;
212	}
213
214again:
215	spin_lock_irqsave(&sqp->s_lock, flags);
216
217	if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
218		spin_unlock_irqrestore(&sqp->s_lock, flags);
219		goto done;
220	}
221
222	/* Get the next send request. */
223	if (sqp->s_last == sqp->s_head) {
224		/* Send work queue is empty. */
225		spin_unlock_irqrestore(&sqp->s_lock, flags);
226		goto done;
227	}
228
229	/*
230	 * We can rely on the entry not changing without the s_lock
231	 * being held until we update s_last.
232	 */
233	wqe = get_swqe_ptr(sqp, sqp->s_last);
234	spin_unlock_irqrestore(&sqp->s_lock, flags);
235
236	wc->wc_flags = 0;
237	wc->imm_data = 0;
238
239	sqp->s_sge.sge = wqe->sg_list[0];
240	sqp->s_sge.sg_list = wqe->sg_list + 1;
241	sqp->s_sge.num_sge = wqe->wr.num_sge;
242	sqp->s_len = wqe->length;
243	switch (wqe->wr.opcode) {
244	case IB_WR_SEND_WITH_IMM:
245		wc->wc_flags = IB_WC_WITH_IMM;
246		wc->imm_data = wqe->wr.imm_data;
247		/* FALLTHROUGH */
248	case IB_WR_SEND:
249		spin_lock_irqsave(&qp->r_rq.lock, flags);
250		if (!ipath_get_rwqe(qp, 0)) {
251		rnr_nak:
252			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
253			/* Handle RNR NAK */
254			if (qp->ibqp.qp_type == IB_QPT_UC)
255				goto send_comp;
256			if (sqp->s_rnr_retry == 0) {
257				wc->status = IB_WC_RNR_RETRY_EXC_ERR;
258				goto err;
259			}
260			if (sqp->s_rnr_retry_cnt < 7)
261				sqp->s_rnr_retry--;
262			dev->n_rnr_naks++;
263			sqp->s_rnr_timeout =
264				ib_ipath_rnr_table[sqp->s_min_rnr_timer];
265			ipath_insert_rnr_queue(sqp);
266			goto done;
267		}
268		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
269		break;
270
271	case IB_WR_RDMA_WRITE_WITH_IMM:
272		wc->wc_flags = IB_WC_WITH_IMM;
273		wc->imm_data = wqe->wr.imm_data;
274		spin_lock_irqsave(&qp->r_rq.lock, flags);
275		if (!ipath_get_rwqe(qp, 1))
276			goto rnr_nak;
277		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
278		/* FALLTHROUGH */
279	case IB_WR_RDMA_WRITE:
280		if (wqe->length == 0)
281			break;
282		if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
283					    wqe->wr.wr.rdma.remote_addr,
284					    wqe->wr.wr.rdma.rkey,
285					    IB_ACCESS_REMOTE_WRITE))) {
286		acc_err:
287			wc->status = IB_WC_REM_ACCESS_ERR;
288		err:
289			wc->wr_id = wqe->wr.wr_id;
290			wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
291			wc->vendor_err = 0;
292			wc->byte_len = 0;
293			wc->qp_num = sqp->ibqp.qp_num;
294			wc->src_qp = sqp->remote_qpn;
295			wc->pkey_index = 0;
296			wc->slid = sqp->remote_ah_attr.dlid;
297			wc->sl = sqp->remote_ah_attr.sl;
298			wc->dlid_path_bits = 0;
299			wc->port_num = 0;
300			ipath_sqerror_qp(sqp, wc);
301			goto done;
302		}
303		break;
304
305	case IB_WR_RDMA_READ:
306		if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
307					    wqe->wr.wr.rdma.remote_addr,
308					    wqe->wr.wr.rdma.rkey,
309					    IB_ACCESS_REMOTE_READ)))
310			goto acc_err;
311		if (unlikely(!(qp->qp_access_flags &
312			       IB_ACCESS_REMOTE_READ)))
313			goto acc_err;
314		qp->r_sge.sge = wqe->sg_list[0];
315		qp->r_sge.sg_list = wqe->sg_list + 1;
316		qp->r_sge.num_sge = wqe->wr.num_sge;
317		break;
318
319	case IB_WR_ATOMIC_CMP_AND_SWP:
320	case IB_WR_ATOMIC_FETCH_AND_ADD:
321		if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
322					    wqe->wr.wr.rdma.remote_addr,
323					    wqe->wr.wr.rdma.rkey,
324					    IB_ACCESS_REMOTE_ATOMIC)))
325			goto acc_err;
326		/* Perform atomic OP and save result. */
327		sdata = wqe->wr.wr.atomic.swap;
328		spin_lock_irqsave(&dev->pending_lock, flags);
329		qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
330		if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
331			*(u64 *) qp->r_sge.sge.vaddr =
332				qp->r_atomic_data + sdata;
333		else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
334			*(u64 *) qp->r_sge.sge.vaddr = sdata;
335		spin_unlock_irqrestore(&dev->pending_lock, flags);
336		*(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
337		goto send_comp;
338
339	default:
340		goto done;
341	}
342
343	sge = &sqp->s_sge.sge;
344	while (sqp->s_len) {
345		u32 len = sqp->s_len;
346
347		if (len > sge->length)
348			len = sge->length;
349		BUG_ON(len == 0);
350		ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
351		sge->vaddr += len;
352		sge->length -= len;
353		sge->sge_length -= len;
354		if (sge->sge_length == 0) {
355			if (--sqp->s_sge.num_sge)
356				*sge = *sqp->s_sge.sg_list++;
357		} else if (sge->length == 0 && sge->mr != NULL) {
358			if (++sge->n >= IPATH_SEGSZ) {
359				if (++sge->m >= sge->mr->mapsz)
360					break;
361				sge->n = 0;
362			}
363			sge->vaddr =
364				sge->mr->map[sge->m]->segs[sge->n].vaddr;
365			sge->length =
366				sge->mr->map[sge->m]->segs[sge->n].length;
367		}
368		sqp->s_len -= len;
369	}
370
371	if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
372	    wqe->wr.opcode == IB_WR_RDMA_READ)
373		goto send_comp;
374
375	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
376		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
377	else
378		wc->opcode = IB_WC_RECV;
379	wc->wr_id = qp->r_wr_id;
380	wc->status = IB_WC_SUCCESS;
381	wc->vendor_err = 0;
382	wc->byte_len = wqe->length;
383	wc->qp_num = qp->ibqp.qp_num;
384	wc->src_qp = qp->remote_qpn;
385	/* XXX do we know which pkey matched? Only needed for GSI. */
386	wc->pkey_index = 0;
387	wc->slid = qp->remote_ah_attr.dlid;
388	wc->sl = qp->remote_ah_attr.sl;
389	wc->dlid_path_bits = 0;
390	/* Signal completion event if the solicited bit is set. */
391	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
392		       wqe->wr.send_flags & IB_SEND_SOLICITED);
393
394send_comp:
395	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
396
397	if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
398	    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
399		wc->wr_id = wqe->wr.wr_id;
400		wc->status = IB_WC_SUCCESS;
401		wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
402		wc->vendor_err = 0;
403		wc->byte_len = wqe->length;
404		wc->qp_num = sqp->ibqp.qp_num;
405		wc->src_qp = 0;
406		wc->pkey_index = 0;
407		wc->slid = 0;
408		wc->sl = 0;
409		wc->dlid_path_bits = 0;
410		wc->port_num = 0;
411		ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0);
412	}
413
414	/* Update s_last now that we are finished with the SWQE */
415	spin_lock_irqsave(&sqp->s_lock, flags);
416	if (++sqp->s_last >= sqp->s_size)
417		sqp->s_last = 0;
418	spin_unlock_irqrestore(&sqp->s_lock, flags);
419	goto again;
420
421done:
422	if (atomic_dec_and_test(&qp->refcount))
423		wake_up(&qp->wait);
424}
425
426/**
427 * ipath_no_bufs_available - tell the layer driver we need buffers
428 * @qp: the QP that caused the problem
429 * @dev: the device we ran out of buffers on
430 *
431 * Called when we run out of PIO buffers.
432 */
433void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
434{
435	unsigned long flags;
436
437	spin_lock_irqsave(&dev->pending_lock, flags);
438	if (qp->piowait.next == LIST_POISON1)
439		list_add_tail(&qp->piowait, &dev->piowait);
440	spin_unlock_irqrestore(&dev->pending_lock, flags);
441	/*
442	 * Note that as soon as ipath_layer_want_buffer() is called and
443	 * possibly before it returns, ipath_ib_piobufavail()
444	 * could be called.  If we are still in the tasklet function,
445	 * tasklet_hi_schedule() will not call us until the next time
446	 * tasklet_hi_schedule() is called.
447	 * We clear the tasklet flag now since we are committing to return
448	 * from the tasklet function.
449	 */
450	clear_bit(IPATH_S_BUSY, &qp->s_flags);
451	tasklet_unlock(&qp->s_task);
452	ipath_layer_want_buffer(dev->dd);
453	dev->n_piowait++;
454}
455
456/**
457 * ipath_post_rc_send - post RC and UC sends
458 * @qp: the QP to post on
459 * @wr: the work request to send
460 */
461int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
462{
463	struct ipath_swqe *wqe;
464	unsigned long flags;
465	u32 next;
466	int i, j;
467	int acc;
468	int ret;
469
470	/*
471	 * Don't allow RDMA reads or atomic operations on UC or
472	 * undefined operations.
473	 * Make sure buffer is large enough to hold the result for atomics.
474	 */
475	if (qp->ibqp.qp_type == IB_QPT_UC) {
476		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
477			ret = -EINVAL;
478			goto bail;
479		}
480	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
481		ret = -EINVAL;
482		goto bail;
483	} else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
484		   (wr->num_sge == 0 ||
485		    wr->sg_list[0].length < sizeof(u64) ||
486		    wr->sg_list[0].addr & (sizeof(u64) - 1))) {
487		ret = -EINVAL;
488		goto bail;
489	}
490	/* IB spec says that num_sge == 0 is OK. */
491	if (wr->num_sge > qp->s_max_sge) {
492		ret = -ENOMEM;
493		goto bail;
494	}
495	spin_lock_irqsave(&qp->s_lock, flags);
496	next = qp->s_head + 1;
497	if (next >= qp->s_size)
498		next = 0;
499	if (next == qp->s_last) {
500		spin_unlock_irqrestore(&qp->s_lock, flags);
501		ret = -EINVAL;
502		goto bail;
503	}
504
505	wqe = get_swqe_ptr(qp, qp->s_head);
506	wqe->wr = *wr;
507	wqe->ssn = qp->s_ssn++;
508	wqe->sg_list[0].mr = NULL;
509	wqe->sg_list[0].vaddr = NULL;
510	wqe->sg_list[0].length = 0;
511	wqe->sg_list[0].sge_length = 0;
512	wqe->length = 0;
513	acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
514	for (i = 0, j = 0; i < wr->num_sge; i++) {
515		if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
516			spin_unlock_irqrestore(&qp->s_lock, flags);
517			ret = -EINVAL;
518			goto bail;
519		}
520		if (wr->sg_list[i].length == 0)
521			continue;
522		if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
523				   &wqe->sg_list[j], &wr->sg_list[i],
524				   acc)) {
525			spin_unlock_irqrestore(&qp->s_lock, flags);
526			ret = -EINVAL;
527			goto bail;
528		}
529		wqe->length += wr->sg_list[i].length;
530		j++;
531	}
532	wqe->wr.num_sge = j;
533	qp->s_head = next;
534	/*
535	 * Wake up the send tasklet if the QP is not waiting
536	 * for an RNR timeout.
537	 */
538	next = qp->s_rnr_timeout;
539	spin_unlock_irqrestore(&qp->s_lock, flags);
540
541	if (next == 0) {
542		if (qp->ibqp.qp_type == IB_QPT_UC)
543			ipath_do_uc_send((unsigned long) qp);
544		else
545			ipath_do_rc_send((unsigned long) qp);
546	}
547
548	ret = 0;
549
550bail:
551	return ret;
552}
553