ipath_qp.c revision 8d0208cb59a43bf867e16b977c34c4d6cd618f59
1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/vmalloc.h>
36
37#include "ipath_verbs.h"
38#include "ipath_kernel.h"
39
40#define BITS_PER_PAGE		(PAGE_SIZE*BITS_PER_BYTE)
41#define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
42#define mk_qpn(qpt, map, off)	(((map) - (qpt)->map) * BITS_PER_PAGE + \
43				 (off))
44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
45						      BITS_PER_PAGE, off)
46
47/*
48 * Convert the AETH credit code into the number of credits.
49 */
50static u32 credit_table[31] = {
51	0,			/* 0 */
52	1,			/* 1 */
53	2,			/* 2 */
54	3,			/* 3 */
55	4,			/* 4 */
56	6,			/* 5 */
57	8,			/* 6 */
58	12,			/* 7 */
59	16,			/* 8 */
60	24,			/* 9 */
61	32,			/* A */
62	48,			/* B */
63	64,			/* C */
64	96,			/* D */
65	128,			/* E */
66	192,			/* F */
67	256,			/* 10 */
68	384,			/* 11 */
69	512,			/* 12 */
70	768,			/* 13 */
71	1024,			/* 14 */
72	1536,			/* 15 */
73	2048,			/* 16 */
74	3072,			/* 17 */
75	4096,			/* 18 */
76	6144,			/* 19 */
77	8192,			/* 1A */
78	12288,			/* 1B */
79	16384,			/* 1C */
80	24576,			/* 1D */
81	32768			/* 1E */
82};
83
84static u32 alloc_qpn(struct ipath_qp_table *qpt)
85{
86	u32 i, offset, max_scan, qpn;
87	struct qpn_map *map;
88	u32 ret;
89
90	qpn = qpt->last + 1;
91	if (qpn >= QPN_MAX)
92		qpn = 2;
93	offset = qpn & BITS_PER_PAGE_MASK;
94	map = &qpt->map[qpn / BITS_PER_PAGE];
95	max_scan = qpt->nmaps - !offset;
96	for (i = 0;;) {
97		if (unlikely(!map->page)) {
98			unsigned long page = get_zeroed_page(GFP_KERNEL);
99			unsigned long flags;
100
101			/*
102			 * Free the page if someone raced with us
103			 * installing it:
104			 */
105			spin_lock_irqsave(&qpt->lock, flags);
106			if (map->page)
107				free_page(page);
108			else
109				map->page = (void *)page;
110			spin_unlock_irqrestore(&qpt->lock, flags);
111			if (unlikely(!map->page))
112				break;
113		}
114		if (likely(atomic_read(&map->n_free))) {
115			do {
116				if (!test_and_set_bit(offset, map->page)) {
117					atomic_dec(&map->n_free);
118					qpt->last = qpn;
119					ret = qpn;
120					goto bail;
121				}
122				offset = find_next_offset(map, offset);
123				qpn = mk_qpn(qpt, map, offset);
124				/*
125				 * This test differs from alloc_pidmap().
126				 * If find_next_offset() does find a zero
127				 * bit, we don't need to check for QPN
128				 * wrapping around past our starting QPN.
129				 * We just need to be sure we don't loop
130				 * forever.
131				 */
132			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
133		}
134		/*
135		 * In order to keep the number of pages allocated to a
136		 * minimum, we scan the all existing pages before increasing
137		 * the size of the bitmap table.
138		 */
139		if (++i > max_scan) {
140			if (qpt->nmaps == QPNMAP_ENTRIES)
141				break;
142			map = &qpt->map[qpt->nmaps++];
143			offset = 0;
144		} else if (map < &qpt->map[qpt->nmaps]) {
145			++map;
146			offset = 0;
147		} else {
148			map = &qpt->map[0];
149			offset = 2;
150		}
151		qpn = mk_qpn(qpt, map, offset);
152	}
153
154	ret = 0;
155
156bail:
157	return ret;
158}
159
160static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
161{
162	struct qpn_map *map;
163
164	map = qpt->map + qpn / BITS_PER_PAGE;
165	if (map->page)
166		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
167	atomic_inc(&map->n_free);
168}
169
170/**
171 * ipath_alloc_qpn - allocate a QP number
172 * @qpt: the QP table
173 * @qp: the QP
174 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
175 *
176 * Allocate the next available QPN and put the QP into the hash table.
177 * The hash table holds a reference to the QP.
178 */
179static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
180			   enum ib_qp_type type)
181{
182	unsigned long flags;
183	u32 qpn;
184	int ret;
185
186	if (type == IB_QPT_SMI)
187		qpn = 0;
188	else if (type == IB_QPT_GSI)
189		qpn = 1;
190	else {
191		/* Allocate the next available QPN */
192		qpn = alloc_qpn(qpt);
193		if (qpn == 0) {
194			ret = -ENOMEM;
195			goto bail;
196		}
197	}
198	qp->ibqp.qp_num = qpn;
199
200	/* Add the QP to the hash table. */
201	spin_lock_irqsave(&qpt->lock, flags);
202
203	qpn %= qpt->max;
204	qp->next = qpt->table[qpn];
205	qpt->table[qpn] = qp;
206	atomic_inc(&qp->refcount);
207
208	spin_unlock_irqrestore(&qpt->lock, flags);
209	ret = 0;
210
211bail:
212	return ret;
213}
214
215/**
216 * ipath_free_qp - remove a QP from the QP table
217 * @qpt: the QP table
218 * @qp: the QP to remove
219 *
220 * Remove the QP from the table so it can't be found asynchronously by
221 * the receive interrupt routine.
222 */
223static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
224{
225	struct ipath_qp *q, **qpp;
226	unsigned long flags;
227	int fnd = 0;
228
229	spin_lock_irqsave(&qpt->lock, flags);
230
231	/* Remove QP from the hash table. */
232	qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
233	for (; (q = *qpp) != NULL; qpp = &q->next) {
234		if (q == qp) {
235			*qpp = qp->next;
236			qp->next = NULL;
237			atomic_dec(&qp->refcount);
238			fnd = 1;
239			break;
240		}
241	}
242
243	spin_unlock_irqrestore(&qpt->lock, flags);
244
245	if (!fnd)
246		return;
247
248	/* If QPN is not reserved, mark QPN free in the bitmap. */
249	if (qp->ibqp.qp_num > 1)
250		free_qpn(qpt, qp->ibqp.qp_num);
251
252	wait_event(qp->wait, !atomic_read(&qp->refcount));
253}
254
255/**
256 * ipath_free_all_qps - remove all QPs from the table
257 * @qpt: the QP table to empty
258 */
259void ipath_free_all_qps(struct ipath_qp_table *qpt)
260{
261	unsigned long flags;
262	struct ipath_qp *qp, *nqp;
263	u32 n;
264
265	for (n = 0; n < qpt->max; n++) {
266		spin_lock_irqsave(&qpt->lock, flags);
267		qp = qpt->table[n];
268		qpt->table[n] = NULL;
269		spin_unlock_irqrestore(&qpt->lock, flags);
270
271		while (qp) {
272			nqp = qp->next;
273			if (qp->ibqp.qp_num > 1)
274				free_qpn(qpt, qp->ibqp.qp_num);
275			if (!atomic_dec_and_test(&qp->refcount) ||
276			    !ipath_destroy_qp(&qp->ibqp))
277				ipath_dbg(KERN_INFO "QP memory leak!\n");
278			qp = nqp;
279		}
280	}
281
282	for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
283		if (qpt->map[n].page)
284			free_page((unsigned long)qpt->map[n].page);
285	}
286}
287
288/**
289 * ipath_lookup_qpn - return the QP with the given QPN
290 * @qpt: the QP table
291 * @qpn: the QP number to look up
292 *
293 * The caller is responsible for decrementing the QP reference count
294 * when done.
295 */
296struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
297{
298	unsigned long flags;
299	struct ipath_qp *qp;
300
301	spin_lock_irqsave(&qpt->lock, flags);
302
303	for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
304		if (qp->ibqp.qp_num == qpn) {
305			atomic_inc(&qp->refcount);
306			break;
307		}
308	}
309
310	spin_unlock_irqrestore(&qpt->lock, flags);
311	return qp;
312}
313
314/**
315 * ipath_reset_qp - initialize the QP state to the reset state
316 * @qp: the QP to reset
317 */
318static void ipath_reset_qp(struct ipath_qp *qp)
319{
320	qp->remote_qpn = 0;
321	qp->qkey = 0;
322	qp->qp_access_flags = 0;
323	clear_bit(IPATH_S_BUSY, &qp->s_flags);
324	qp->s_hdrwords = 0;
325	qp->s_psn = 0;
326	qp->r_psn = 0;
327	qp->r_msn = 0;
328	if (qp->ibqp.qp_type == IB_QPT_RC) {
329		qp->s_state = IB_OPCODE_RC_SEND_LAST;
330		qp->r_state = IB_OPCODE_RC_SEND_LAST;
331	} else {
332		qp->s_state = IB_OPCODE_UC_SEND_LAST;
333		qp->r_state = IB_OPCODE_UC_SEND_LAST;
334	}
335	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
336	qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
337	qp->r_nak_state = 0;
338	qp->r_wrid_valid = 0;
339	qp->s_rnr_timeout = 0;
340	qp->s_head = 0;
341	qp->s_tail = 0;
342	qp->s_cur = 0;
343	qp->s_last = 0;
344	qp->s_ssn = 1;
345	qp->s_lsn = 0;
346	qp->s_wait_credit = 0;
347	if (qp->r_rq.wq) {
348		qp->r_rq.wq->head = 0;
349		qp->r_rq.wq->tail = 0;
350	}
351	qp->r_reuse_sge = 0;
352}
353
354/**
355 * ipath_error_qp - put a QP into an error state
356 * @qp: the QP to put into an error state
357 * @err: the receive completion error to signal if a RWQE is active
358 *
359 * Flushes both send and receive work queues.
360 * QP s_lock should be held and interrupts disabled.
361 */
362
363void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
364{
365	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
366	struct ib_wc wc;
367
368	ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
369		  qp->ibqp.qp_num, qp->remote_qpn);
370
371	spin_lock(&dev->pending_lock);
372	/* XXX What if its already removed by the timeout code? */
373	if (!list_empty(&qp->timerwait))
374		list_del_init(&qp->timerwait);
375	if (!list_empty(&qp->piowait))
376		list_del_init(&qp->piowait);
377	spin_unlock(&dev->pending_lock);
378
379	wc.vendor_err = 0;
380	wc.byte_len = 0;
381	wc.imm_data = 0;
382	wc.qp_num = qp->ibqp.qp_num;
383	wc.src_qp = 0;
384	wc.wc_flags = 0;
385	wc.pkey_index = 0;
386	wc.slid = 0;
387	wc.sl = 0;
388	wc.dlid_path_bits = 0;
389	wc.port_num = 0;
390	if (qp->r_wrid_valid) {
391		qp->r_wrid_valid = 0;
392		wc.status = err;
393		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
394	}
395	wc.status = IB_WC_WR_FLUSH_ERR;
396
397	while (qp->s_last != qp->s_head) {
398		struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
399
400		wc.wr_id = wqe->wr.wr_id;
401		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
402		if (++qp->s_last >= qp->s_size)
403			qp->s_last = 0;
404		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
405	}
406	qp->s_cur = qp->s_tail = qp->s_head;
407	qp->s_hdrwords = 0;
408	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
409
410	if (qp->r_rq.wq) {
411		struct ipath_rwq *wq;
412		u32 head;
413		u32 tail;
414
415		spin_lock(&qp->r_rq.lock);
416
417		/* sanity check pointers before trusting them */
418		wq = qp->r_rq.wq;
419		head = wq->head;
420		if (head >= qp->r_rq.size)
421			head = 0;
422		tail = wq->tail;
423		if (tail >= qp->r_rq.size)
424			tail = 0;
425		wc.opcode = IB_WC_RECV;
426		while (tail != head) {
427			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
428			if (++tail >= qp->r_rq.size)
429				tail = 0;
430			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
431		}
432		wq->tail = tail;
433
434		spin_unlock(&qp->r_rq.lock);
435	}
436}
437
438/**
439 * ipath_modify_qp - modify the attributes of a queue pair
440 * @ibqp: the queue pair who's attributes we're modifying
441 * @attr: the new attributes
442 * @attr_mask: the mask of attributes to modify
443 * @udata: user data for ipathverbs.so
444 *
445 * Returns 0 on success, otherwise returns an errno.
446 */
447int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
448		    int attr_mask, struct ib_udata *udata)
449{
450	struct ipath_ibdev *dev = to_idev(ibqp->device);
451	struct ipath_qp *qp = to_iqp(ibqp);
452	enum ib_qp_state cur_state, new_state;
453	unsigned long flags;
454	int ret;
455
456	spin_lock_irqsave(&qp->s_lock, flags);
457
458	cur_state = attr_mask & IB_QP_CUR_STATE ?
459		attr->cur_qp_state : qp->state;
460	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
461
462	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
463				attr_mask))
464		goto inval;
465
466	if (attr_mask & IB_QP_AV) {
467		if (attr->ah_attr.dlid == 0 ||
468		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
469			goto inval;
470
471		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
472		    (attr->ah_attr.grh.sgid_index > 1))
473			goto inval;
474	}
475
476	if (attr_mask & IB_QP_PKEY_INDEX)
477		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
478			goto inval;
479
480	if (attr_mask & IB_QP_MIN_RNR_TIMER)
481		if (attr->min_rnr_timer > 31)
482			goto inval;
483
484	if (attr_mask & IB_QP_PORT)
485		if (attr->port_num == 0 ||
486		    attr->port_num > ibqp->device->phys_port_cnt)
487			goto inval;
488
489	if (attr_mask & IB_QP_PATH_MTU)
490		if (attr->path_mtu > IB_MTU_4096)
491			goto inval;
492
493	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
494		if (attr->max_dest_rd_atomic > 1)
495			goto inval;
496
497	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
498		if (attr->max_rd_atomic > 1)
499			goto inval;
500
501	if (attr_mask & IB_QP_PATH_MIG_STATE)
502		if (attr->path_mig_state != IB_MIG_MIGRATED &&
503		    attr->path_mig_state != IB_MIG_REARM)
504			goto inval;
505
506	switch (new_state) {
507	case IB_QPS_RESET:
508		ipath_reset_qp(qp);
509		break;
510
511	case IB_QPS_ERR:
512		ipath_error_qp(qp, IB_WC_GENERAL_ERR);
513		break;
514
515	default:
516		break;
517
518	}
519
520	if (attr_mask & IB_QP_PKEY_INDEX)
521		qp->s_pkey_index = attr->pkey_index;
522
523	if (attr_mask & IB_QP_DEST_QPN)
524		qp->remote_qpn = attr->dest_qp_num;
525
526	if (attr_mask & IB_QP_SQ_PSN) {
527		qp->s_psn = qp->s_next_psn = attr->sq_psn;
528		qp->s_last_psn = qp->s_next_psn - 1;
529	}
530
531	if (attr_mask & IB_QP_RQ_PSN)
532		qp->r_psn = attr->rq_psn;
533
534	if (attr_mask & IB_QP_ACCESS_FLAGS)
535		qp->qp_access_flags = attr->qp_access_flags;
536
537	if (attr_mask & IB_QP_AV)
538		qp->remote_ah_attr = attr->ah_attr;
539
540	if (attr_mask & IB_QP_PATH_MTU)
541		qp->path_mtu = attr->path_mtu;
542
543	if (attr_mask & IB_QP_RETRY_CNT)
544		qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
545
546	if (attr_mask & IB_QP_RNR_RETRY) {
547		qp->s_rnr_retry = attr->rnr_retry;
548		if (qp->s_rnr_retry > 7)
549			qp->s_rnr_retry = 7;
550		qp->s_rnr_retry_cnt = qp->s_rnr_retry;
551	}
552
553	if (attr_mask & IB_QP_MIN_RNR_TIMER)
554		qp->r_min_rnr_timer = attr->min_rnr_timer;
555
556	if (attr_mask & IB_QP_TIMEOUT)
557		qp->timeout = attr->timeout;
558
559	if (attr_mask & IB_QP_QKEY)
560		qp->qkey = attr->qkey;
561
562	qp->state = new_state;
563	spin_unlock_irqrestore(&qp->s_lock, flags);
564
565	ret = 0;
566	goto bail;
567
568inval:
569	spin_unlock_irqrestore(&qp->s_lock, flags);
570	ret = -EINVAL;
571
572bail:
573	return ret;
574}
575
576int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
577		   int attr_mask, struct ib_qp_init_attr *init_attr)
578{
579	struct ipath_qp *qp = to_iqp(ibqp);
580
581	attr->qp_state = qp->state;
582	attr->cur_qp_state = attr->qp_state;
583	attr->path_mtu = qp->path_mtu;
584	attr->path_mig_state = 0;
585	attr->qkey = qp->qkey;
586	attr->rq_psn = qp->r_psn;
587	attr->sq_psn = qp->s_next_psn;
588	attr->dest_qp_num = qp->remote_qpn;
589	attr->qp_access_flags = qp->qp_access_flags;
590	attr->cap.max_send_wr = qp->s_size - 1;
591	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
592	attr->cap.max_send_sge = qp->s_max_sge;
593	attr->cap.max_recv_sge = qp->r_rq.max_sge;
594	attr->cap.max_inline_data = 0;
595	attr->ah_attr = qp->remote_ah_attr;
596	memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
597	attr->pkey_index = qp->s_pkey_index;
598	attr->alt_pkey_index = 0;
599	attr->en_sqd_async_notify = 0;
600	attr->sq_draining = 0;
601	attr->max_rd_atomic = 1;
602	attr->max_dest_rd_atomic = 1;
603	attr->min_rnr_timer = qp->r_min_rnr_timer;
604	attr->port_num = 1;
605	attr->timeout = qp->timeout;
606	attr->retry_cnt = qp->s_retry_cnt;
607	attr->rnr_retry = qp->s_rnr_retry;
608	attr->alt_port_num = 0;
609	attr->alt_timeout = 0;
610
611	init_attr->event_handler = qp->ibqp.event_handler;
612	init_attr->qp_context = qp->ibqp.qp_context;
613	init_attr->send_cq = qp->ibqp.send_cq;
614	init_attr->recv_cq = qp->ibqp.recv_cq;
615	init_attr->srq = qp->ibqp.srq;
616	init_attr->cap = attr->cap;
617	if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
618		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
619	else
620		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
621	init_attr->qp_type = qp->ibqp.qp_type;
622	init_attr->port_num = 1;
623	return 0;
624}
625
626/**
627 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
628 * @qp: the queue pair to compute the AETH for
629 *
630 * Returns the AETH.
631 */
632__be32 ipath_compute_aeth(struct ipath_qp *qp)
633{
634	u32 aeth = qp->r_msn & IPATH_MSN_MASK;
635
636	if (qp->ibqp.srq) {
637		/*
638		 * Shared receive queues don't generate credits.
639		 * Set the credit field to the invalid value.
640		 */
641		aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
642	} else {
643		u32 min, max, x;
644		u32 credits;
645		struct ipath_rwq *wq = qp->r_rq.wq;
646		u32 head;
647		u32 tail;
648
649		/* sanity check pointers before trusting them */
650		head = wq->head;
651		if (head >= qp->r_rq.size)
652			head = 0;
653		tail = wq->tail;
654		if (tail >= qp->r_rq.size)
655			tail = 0;
656		/*
657		 * Compute the number of credits available (RWQEs).
658		 * XXX Not holding the r_rq.lock here so there is a small
659		 * chance that the pair of reads are not atomic.
660		 */
661		credits = head - tail;
662		if ((int)credits < 0)
663			credits += qp->r_rq.size;
664		/*
665		 * Binary search the credit table to find the code to
666		 * use.
667		 */
668		min = 0;
669		max = 31;
670		for (;;) {
671			x = (min + max) / 2;
672			if (credit_table[x] == credits)
673				break;
674			if (credit_table[x] > credits)
675				max = x;
676			else if (min == x)
677				break;
678			else
679				min = x;
680		}
681		aeth |= x << IPATH_AETH_CREDIT_SHIFT;
682	}
683	return cpu_to_be32(aeth);
684}
685
686/**
687 * ipath_create_qp - create a queue pair for a device
688 * @ibpd: the protection domain who's device we create the queue pair for
689 * @init_attr: the attributes of the queue pair
690 * @udata: unused by InfiniPath
691 *
692 * Returns the queue pair on success, otherwise returns an errno.
693 *
694 * Called by the ib_create_qp() core verbs function.
695 */
696struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
697			      struct ib_qp_init_attr *init_attr,
698			      struct ib_udata *udata)
699{
700	struct ipath_qp *qp;
701	int err;
702	struct ipath_swqe *swq = NULL;
703	struct ipath_ibdev *dev;
704	size_t sz;
705	struct ib_qp *ret;
706
707	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
708	    init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
709	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
710	    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
711		ret = ERR_PTR(-ENOMEM);
712		goto bail;
713	}
714
715	if (init_attr->cap.max_send_sge +
716	    init_attr->cap.max_recv_sge +
717	    init_attr->cap.max_send_wr +
718	    init_attr->cap.max_recv_wr == 0) {
719		ret = ERR_PTR(-EINVAL);
720		goto bail;
721	}
722
723	switch (init_attr->qp_type) {
724	case IB_QPT_UC:
725	case IB_QPT_RC:
726		sz = sizeof(struct ipath_sge) *
727			init_attr->cap.max_send_sge +
728			sizeof(struct ipath_swqe);
729		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
730		if (swq == NULL) {
731			ret = ERR_PTR(-ENOMEM);
732			goto bail;
733		}
734		/* FALLTHROUGH */
735	case IB_QPT_UD:
736	case IB_QPT_SMI:
737	case IB_QPT_GSI:
738		sz = sizeof(*qp);
739		if (init_attr->srq) {
740			struct ipath_srq *srq = to_isrq(init_attr->srq);
741
742			sz += sizeof(*qp->r_sg_list) *
743				srq->rq.max_sge;
744		} else
745			sz += sizeof(*qp->r_sg_list) *
746				init_attr->cap.max_recv_sge;
747		qp = kmalloc(sz, GFP_KERNEL);
748		if (!qp) {
749			ret = ERR_PTR(-ENOMEM);
750			goto bail_swq;
751		}
752		if (init_attr->srq) {
753			sz = 0;
754			qp->r_rq.size = 0;
755			qp->r_rq.max_sge = 0;
756			qp->r_rq.wq = NULL;
757			init_attr->cap.max_recv_wr = 0;
758			init_attr->cap.max_recv_sge = 0;
759		} else {
760			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
761			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
762			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
763				sizeof(struct ipath_rwqe);
764			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
765					      qp->r_rq.size * sz);
766			if (!qp->r_rq.wq) {
767				ret = ERR_PTR(-ENOMEM);
768				goto bail_qp;
769			}
770		}
771
772		/*
773		 * ib_create_qp() will initialize qp->ibqp
774		 * except for qp->ibqp.qp_num.
775		 */
776		spin_lock_init(&qp->s_lock);
777		spin_lock_init(&qp->r_rq.lock);
778		atomic_set(&qp->refcount, 0);
779		init_waitqueue_head(&qp->wait);
780		tasklet_init(&qp->s_task, ipath_do_ruc_send,
781			     (unsigned long)qp);
782		INIT_LIST_HEAD(&qp->piowait);
783		INIT_LIST_HEAD(&qp->timerwait);
784		qp->state = IB_QPS_RESET;
785		qp->s_wq = swq;
786		qp->s_size = init_attr->cap.max_send_wr + 1;
787		qp->s_max_sge = init_attr->cap.max_send_sge;
788		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
789			qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
790		else
791			qp->s_flags = 0;
792		dev = to_idev(ibpd->device);
793		err = ipath_alloc_qpn(&dev->qp_table, qp,
794				      init_attr->qp_type);
795		if (err) {
796			ret = ERR_PTR(err);
797			goto bail_rwq;
798		}
799		qp->ip = NULL;
800		ipath_reset_qp(qp);
801		break;
802
803	default:
804		/* Don't support raw QPs */
805		ret = ERR_PTR(-ENOSYS);
806		goto bail;
807	}
808
809	init_attr->cap.max_inline_data = 0;
810
811	/*
812	 * Return the address of the RWQ as the offset to mmap.
813	 * See ipath_mmap() for details.
814	 */
815	if (udata && udata->outlen >= sizeof(__u64)) {
816		struct ipath_mmap_info *ip;
817		__u64 offset = (__u64) qp->r_rq.wq;
818		int err;
819
820		err = ib_copy_to_udata(udata, &offset, sizeof(offset));
821		if (err) {
822			ret = ERR_PTR(err);
823			goto bail_rwq;
824		}
825
826		if (qp->r_rq.wq) {
827			/* Allocate info for ipath_mmap(). */
828			ip = kmalloc(sizeof(*ip), GFP_KERNEL);
829			if (!ip) {
830				ret = ERR_PTR(-ENOMEM);
831				goto bail_rwq;
832			}
833			qp->ip = ip;
834			ip->context = ibpd->uobject->context;
835			ip->obj = qp->r_rq.wq;
836			kref_init(&ip->ref);
837			ip->mmap_cnt = 0;
838			ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
839					      qp->r_rq.size * sz);
840			spin_lock_irq(&dev->pending_lock);
841			ip->next = dev->pending_mmaps;
842			dev->pending_mmaps = ip;
843			spin_unlock_irq(&dev->pending_lock);
844		}
845	}
846
847	spin_lock(&dev->n_qps_lock);
848	if (dev->n_qps_allocated == ib_ipath_max_qps) {
849		spin_unlock(&dev->n_qps_lock);
850		ret = ERR_PTR(-ENOMEM);
851		goto bail_ip;
852	}
853
854	dev->n_qps_allocated++;
855	spin_unlock(&dev->n_qps_lock);
856
857	ret = &qp->ibqp;
858	goto bail;
859
860bail_ip:
861	kfree(qp->ip);
862bail_rwq:
863	vfree(qp->r_rq.wq);
864bail_qp:
865	kfree(qp);
866bail_swq:
867	vfree(swq);
868bail:
869	return ret;
870}
871
872/**
873 * ipath_destroy_qp - destroy a queue pair
874 * @ibqp: the queue pair to destroy
875 *
876 * Returns 0 on success.
877 *
878 * Note that this can be called while the QP is actively sending or
879 * receiving!
880 */
881int ipath_destroy_qp(struct ib_qp *ibqp)
882{
883	struct ipath_qp *qp = to_iqp(ibqp);
884	struct ipath_ibdev *dev = to_idev(ibqp->device);
885	unsigned long flags;
886
887	spin_lock_irqsave(&qp->s_lock, flags);
888	qp->state = IB_QPS_ERR;
889	spin_unlock_irqrestore(&qp->s_lock, flags);
890	spin_lock(&dev->n_qps_lock);
891	dev->n_qps_allocated--;
892	spin_unlock(&dev->n_qps_lock);
893
894	/* Stop the sending tasklet. */
895	tasklet_kill(&qp->s_task);
896
897	/* Make sure the QP isn't on the timeout list. */
898	spin_lock_irqsave(&dev->pending_lock, flags);
899	if (!list_empty(&qp->timerwait))
900		list_del_init(&qp->timerwait);
901	if (!list_empty(&qp->piowait))
902		list_del_init(&qp->piowait);
903	spin_unlock_irqrestore(&dev->pending_lock, flags);
904
905	/*
906	 * Make sure that the QP is not in the QPN table so receive
907	 * interrupts will discard packets for this QP.  XXX Also remove QP
908	 * from multicast table.
909	 */
910	if (atomic_read(&qp->refcount) != 0)
911		ipath_free_qp(&dev->qp_table, qp);
912
913	if (qp->ip)
914		kref_put(&qp->ip->ref, ipath_release_mmap_info);
915	else
916		vfree(qp->r_rq.wq);
917	vfree(qp->s_wq);
918	kfree(qp);
919	return 0;
920}
921
922/**
923 * ipath_init_qp_table - initialize the QP table for a device
924 * @idev: the device who's QP table we're initializing
925 * @size: the size of the QP table
926 *
927 * Returns 0 on success, otherwise returns an errno.
928 */
929int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
930{
931	int i;
932	int ret;
933
934	idev->qp_table.last = 1;	/* QPN 0 and 1 are special. */
935	idev->qp_table.max = size;
936	idev->qp_table.nmaps = 1;
937	idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
938				       GFP_KERNEL);
939	if (idev->qp_table.table == NULL) {
940		ret = -ENOMEM;
941		goto bail;
942	}
943
944	for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
945		atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
946		idev->qp_table.map[i].page = NULL;
947	}
948
949	ret = 0;
950
951bail:
952	return ret;
953}
954
955/**
956 * ipath_sqerror_qp - put a QP's send queue into an error state
957 * @qp: QP who's send queue will be put into an error state
958 * @wc: the WC responsible for putting the QP in this state
959 *
960 * Flushes the send work queue.
961 * The QP s_lock should be held.
962 */
963
964void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
965{
966	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
967	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
968
969	ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
970		  qp->ibqp.qp_num, qp->remote_qpn, wc->status);
971
972	spin_lock(&dev->pending_lock);
973	/* XXX What if its already removed by the timeout code? */
974	if (!list_empty(&qp->timerwait))
975		list_del_init(&qp->timerwait);
976	if (!list_empty(&qp->piowait))
977		list_del_init(&qp->piowait);
978	spin_unlock(&dev->pending_lock);
979
980	ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
981	if (++qp->s_last >= qp->s_size)
982		qp->s_last = 0;
983
984	wc->status = IB_WC_WR_FLUSH_ERR;
985
986	while (qp->s_last != qp->s_head) {
987		wc->wr_id = wqe->wr.wr_id;
988		wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
989		ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
990		if (++qp->s_last >= qp->s_size)
991			qp->s_last = 0;
992		wqe = get_swqe_ptr(qp, qp->s_last);
993	}
994	qp->s_cur = qp->s_tail = qp->s_head;
995	qp->state = IB_QPS_SQE;
996}
997
998/**
999 * ipath_get_credit - flush the send work queue of a QP
1000 * @qp: the qp who's send work queue to flush
1001 * @aeth: the Acknowledge Extended Transport Header
1002 *
1003 * The QP s_lock should be held.
1004 */
1005void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1006{
1007	u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1008
1009	/*
1010	 * If the credit is invalid, we can send
1011	 * as many packets as we like.  Otherwise, we have to
1012	 * honor the credit field.
1013	 */
1014	if (credit == IPATH_AETH_CREDIT_INVAL)
1015		qp->s_lsn = (u32) -1;
1016	else if (qp->s_lsn != (u32) -1) {
1017		/* Compute new LSN (i.e., MSN + credit) */
1018		credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1019		if (ipath_cmp24(credit, qp->s_lsn) > 0)
1020			qp->s_lsn = credit;
1021	}
1022
1023	/* Restart sending if it was blocked due to lack of credits. */
1024	if (qp->s_cur != qp->s_head &&
1025	    (qp->s_lsn == (u32) -1 ||
1026	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1027			 qp->s_lsn + 1) <= 0))
1028		tasklet_hi_schedule(&qp->s_task);
1029}
1030