ipath_qp.c revision b846f25aa2a353355aec5202fe4dbdc6674dfc64
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/vmalloc.h>
36
37#include "ipath_verbs.h"
38#include "ipath_kernel.h"
39
40#define BITS_PER_PAGE		(PAGE_SIZE*BITS_PER_BYTE)
41#define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
42#define mk_qpn(qpt, map, off)	(((map) - (qpt)->map) * BITS_PER_PAGE + \
43				 (off))
44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
45						      BITS_PER_PAGE, off)
46
47/*
48 * Convert the AETH credit code into the number of credits.
49 */
50static u32 credit_table[31] = {
51	0,			/* 0 */
52	1,			/* 1 */
53	2,			/* 2 */
54	3,			/* 3 */
55	4,			/* 4 */
56	6,			/* 5 */
57	8,			/* 6 */
58	12,			/* 7 */
59	16,			/* 8 */
60	24,			/* 9 */
61	32,			/* A */
62	48,			/* B */
63	64,			/* C */
64	96,			/* D */
65	128,			/* E */
66	192,			/* F */
67	256,			/* 10 */
68	384,			/* 11 */
69	512,			/* 12 */
70	768,			/* 13 */
71	1024,			/* 14 */
72	1536,			/* 15 */
73	2048,			/* 16 */
74	3072,			/* 17 */
75	4096,			/* 18 */
76	6144,			/* 19 */
77	8192,			/* 1A */
78	12288,			/* 1B */
79	16384,			/* 1C */
80	24576,			/* 1D */
81	32768			/* 1E */
82};
83
84
85static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
86{
87	unsigned long page = get_zeroed_page(GFP_KERNEL);
88	unsigned long flags;
89
90	/*
91	 * Free the page if someone raced with us installing it.
92	 */
93
94	spin_lock_irqsave(&qpt->lock, flags);
95	if (map->page)
96		free_page(page);
97	else
98		map->page = (void *)page;
99	spin_unlock_irqrestore(&qpt->lock, flags);
100}
101
102
103static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
104{
105	u32 i, offset, max_scan, qpn;
106	struct qpn_map *map;
107	u32 ret = -1;
108
109	if (type == IB_QPT_SMI)
110		ret = 0;
111	else if (type == IB_QPT_GSI)
112		ret = 1;
113
114	if (ret != -1) {
115		map = &qpt->map[0];
116		if (unlikely(!map->page)) {
117			get_map_page(qpt, map);
118			if (unlikely(!map->page)) {
119				ret = -ENOMEM;
120				goto bail;
121			}
122		}
123		if (!test_and_set_bit(ret, map->page))
124			atomic_dec(&map->n_free);
125		else
126			ret = -EBUSY;
127		goto bail;
128	}
129
130	qpn = qpt->last + 1;
131	if (qpn >= QPN_MAX)
132		qpn = 2;
133	offset = qpn & BITS_PER_PAGE_MASK;
134	map = &qpt->map[qpn / BITS_PER_PAGE];
135	max_scan = qpt->nmaps - !offset;
136	for (i = 0;;) {
137		if (unlikely(!map->page)) {
138			get_map_page(qpt, map);
139			if (unlikely(!map->page))
140				break;
141		}
142		if (likely(atomic_read(&map->n_free))) {
143			do {
144				if (!test_and_set_bit(offset, map->page)) {
145					atomic_dec(&map->n_free);
146					qpt->last = qpn;
147					ret = qpn;
148					goto bail;
149				}
150				offset = find_next_offset(map, offset);
151				qpn = mk_qpn(qpt, map, offset);
152				/*
153				 * This test differs from alloc_pidmap().
154				 * If find_next_offset() does find a zero
155				 * bit, we don't need to check for QPN
156				 * wrapping around past our starting QPN.
157				 * We just need to be sure we don't loop
158				 * forever.
159				 */
160			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
161		}
162		/*
163		 * In order to keep the number of pages allocated to a
164		 * minimum, we scan the all existing pages before increasing
165		 * the size of the bitmap table.
166		 */
167		if (++i > max_scan) {
168			if (qpt->nmaps == QPNMAP_ENTRIES)
169				break;
170			map = &qpt->map[qpt->nmaps++];
171			offset = 0;
172		} else if (map < &qpt->map[qpt->nmaps]) {
173			++map;
174			offset = 0;
175		} else {
176			map = &qpt->map[0];
177			offset = 2;
178		}
179		qpn = mk_qpn(qpt, map, offset);
180	}
181
182	ret = -ENOMEM;
183
184bail:
185	return ret;
186}
187
188static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
189{
190	struct qpn_map *map;
191
192	map = qpt->map + qpn / BITS_PER_PAGE;
193	if (map->page)
194		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
195	atomic_inc(&map->n_free);
196}
197
198/**
199 * ipath_alloc_qpn - allocate a QP number
200 * @qpt: the QP table
201 * @qp: the QP
202 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
203 *
204 * Allocate the next available QPN and put the QP into the hash table.
205 * The hash table holds a reference to the QP.
206 */
207static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
208			   enum ib_qp_type type)
209{
210	unsigned long flags;
211	int ret;
212
213	ret = alloc_qpn(qpt, type);
214	if (ret < 0)
215		goto bail;
216	qp->ibqp.qp_num = ret;
217
218	/* Add the QP to the hash table. */
219	spin_lock_irqsave(&qpt->lock, flags);
220
221	ret %= qpt->max;
222	qp->next = qpt->table[ret];
223	qpt->table[ret] = qp;
224	atomic_inc(&qp->refcount);
225
226	spin_unlock_irqrestore(&qpt->lock, flags);
227	ret = 0;
228
229bail:
230	return ret;
231}
232
233/**
234 * ipath_free_qp - remove a QP from the QP table
235 * @qpt: the QP table
236 * @qp: the QP to remove
237 *
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
240 */
241static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
242{
243	struct ipath_qp *q, **qpp;
244	unsigned long flags;
245	int fnd = 0;
246
247	spin_lock_irqsave(&qpt->lock, flags);
248
249	/* Remove QP from the hash table. */
250	qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
251	for (; (q = *qpp) != NULL; qpp = &q->next) {
252		if (q == qp) {
253			*qpp = qp->next;
254			qp->next = NULL;
255			atomic_dec(&qp->refcount);
256			fnd = 1;
257			break;
258		}
259	}
260
261	spin_unlock_irqrestore(&qpt->lock, flags);
262
263	if (!fnd)
264		return;
265
266	free_qpn(qpt, qp->ibqp.qp_num);
267
268	wait_event(qp->wait, !atomic_read(&qp->refcount));
269}
270
271/**
272 * ipath_free_all_qps - remove all QPs from the table
273 * @qpt: the QP table to empty
274 */
275void ipath_free_all_qps(struct ipath_qp_table *qpt)
276{
277	unsigned long flags;
278	struct ipath_qp *qp, *nqp;
279	u32 n;
280
281	for (n = 0; n < qpt->max; n++) {
282		spin_lock_irqsave(&qpt->lock, flags);
283		qp = qpt->table[n];
284		qpt->table[n] = NULL;
285		spin_unlock_irqrestore(&qpt->lock, flags);
286
287		while (qp) {
288			nqp = qp->next;
289			free_qpn(qpt, qp->ibqp.qp_num);
290			if (!atomic_dec_and_test(&qp->refcount) ||
291			    !ipath_destroy_qp(&qp->ibqp))
292				ipath_dbg("QP memory leak!\n");
293			qp = nqp;
294		}
295	}
296
297	for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
298		if (qpt->map[n].page)
299			free_page((unsigned long)qpt->map[n].page);
300	}
301}
302
303/**
304 * ipath_lookup_qpn - return the QP with the given QPN
305 * @qpt: the QP table
306 * @qpn: the QP number to look up
307 *
308 * The caller is responsible for decrementing the QP reference count
309 * when done.
310 */
311struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
312{
313	unsigned long flags;
314	struct ipath_qp *qp;
315
316	spin_lock_irqsave(&qpt->lock, flags);
317
318	for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
319		if (qp->ibqp.qp_num == qpn) {
320			atomic_inc(&qp->refcount);
321			break;
322		}
323	}
324
325	spin_unlock_irqrestore(&qpt->lock, flags);
326	return qp;
327}
328
329/**
330 * ipath_reset_qp - initialize the QP state to the reset state
331 * @qp: the QP to reset
332 * @type: the QP type
333 */
334static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
335{
336	qp->remote_qpn = 0;
337	qp->qkey = 0;
338	qp->qp_access_flags = 0;
339	qp->s_busy = 0;
340	qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
341	qp->s_hdrwords = 0;
342	qp->s_wqe = NULL;
343	qp->s_psn = 0;
344	qp->r_psn = 0;
345	qp->r_msn = 0;
346	if (type == IB_QPT_RC) {
347		qp->s_state = IB_OPCODE_RC_SEND_LAST;
348		qp->r_state = IB_OPCODE_RC_SEND_LAST;
349	} else {
350		qp->s_state = IB_OPCODE_UC_SEND_LAST;
351		qp->r_state = IB_OPCODE_UC_SEND_LAST;
352	}
353	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
354	qp->r_nak_state = 0;
355	qp->r_wrid_valid = 0;
356	qp->s_rnr_timeout = 0;
357	qp->s_head = 0;
358	qp->s_tail = 0;
359	qp->s_cur = 0;
360	qp->s_last = 0;
361	qp->s_ssn = 1;
362	qp->s_lsn = 0;
363	qp->s_wait_credit = 0;
364	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
365	qp->r_head_ack_queue = 0;
366	qp->s_tail_ack_queue = 0;
367	qp->s_num_rd_atomic = 0;
368	if (qp->r_rq.wq) {
369		qp->r_rq.wq->head = 0;
370		qp->r_rq.wq->tail = 0;
371	}
372	qp->r_reuse_sge = 0;
373}
374
375/**
376 * ipath_error_qp - put a QP into an error state
377 * @qp: the QP to put into an error state
378 * @err: the receive completion error to signal if a RWQE is active
379 *
380 * Flushes both send and receive work queues.
381 * Returns true if last WQE event should be generated.
382 * The QP s_lock should be held and interrupts disabled.
383 */
384
385int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
386{
387	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
388	struct ib_wc wc;
389	int ret = 0;
390
391	ipath_dbg("QP%d/%d in error state (%d)\n",
392		  qp->ibqp.qp_num, qp->remote_qpn, err);
393
394	spin_lock(&dev->pending_lock);
395	if (!list_empty(&qp->timerwait))
396		list_del_init(&qp->timerwait);
397	if (!list_empty(&qp->piowait))
398		list_del_init(&qp->piowait);
399	spin_unlock(&dev->pending_lock);
400
401	wc.vendor_err = 0;
402	wc.byte_len = 0;
403	wc.imm_data = 0;
404	wc.qp = &qp->ibqp;
405	wc.src_qp = 0;
406	wc.wc_flags = 0;
407	wc.pkey_index = 0;
408	wc.slid = 0;
409	wc.sl = 0;
410	wc.dlid_path_bits = 0;
411	wc.port_num = 0;
412	if (qp->r_wrid_valid) {
413		qp->r_wrid_valid = 0;
414		wc.wr_id = qp->r_wr_id;
415		wc.opcode = IB_WC_RECV;
416		wc.status = err;
417		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
418	}
419	wc.status = IB_WC_WR_FLUSH_ERR;
420
421	while (qp->s_last != qp->s_head) {
422		struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
423
424		wc.wr_id = wqe->wr.wr_id;
425		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
426		if (++qp->s_last >= qp->s_size)
427			qp->s_last = 0;
428		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
429	}
430	qp->s_cur = qp->s_tail = qp->s_head;
431	qp->s_hdrwords = 0;
432	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
433
434	if (qp->r_rq.wq) {
435		struct ipath_rwq *wq;
436		u32 head;
437		u32 tail;
438
439		spin_lock(&qp->r_rq.lock);
440
441		/* sanity check pointers before trusting them */
442		wq = qp->r_rq.wq;
443		head = wq->head;
444		if (head >= qp->r_rq.size)
445			head = 0;
446		tail = wq->tail;
447		if (tail >= qp->r_rq.size)
448			tail = 0;
449		wc.opcode = IB_WC_RECV;
450		while (tail != head) {
451			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
452			if (++tail >= qp->r_rq.size)
453				tail = 0;
454			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
455		}
456		wq->tail = tail;
457
458		spin_unlock(&qp->r_rq.lock);
459	} else if (qp->ibqp.event_handler)
460		ret = 1;
461
462	return ret;
463}
464
465/**
466 * ipath_modify_qp - modify the attributes of a queue pair
467 * @ibqp: the queue pair who's attributes we're modifying
468 * @attr: the new attributes
469 * @attr_mask: the mask of attributes to modify
470 * @udata: user data for ipathverbs.so
471 *
472 * Returns 0 on success, otherwise returns an errno.
473 */
474int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
475		    int attr_mask, struct ib_udata *udata)
476{
477	struct ipath_ibdev *dev = to_idev(ibqp->device);
478	struct ipath_qp *qp = to_iqp(ibqp);
479	enum ib_qp_state cur_state, new_state;
480	unsigned long flags;
481	int lastwqe = 0;
482	int ret;
483
484	spin_lock_irqsave(&qp->s_lock, flags);
485
486	cur_state = attr_mask & IB_QP_CUR_STATE ?
487		attr->cur_qp_state : qp->state;
488	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
489
490	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
491				attr_mask))
492		goto inval;
493
494	if (attr_mask & IB_QP_AV) {
495		if (attr->ah_attr.dlid == 0 ||
496		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
497			goto inval;
498
499		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
500		    (attr->ah_attr.grh.sgid_index > 1))
501			goto inval;
502	}
503
504	if (attr_mask & IB_QP_PKEY_INDEX)
505		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
506			goto inval;
507
508	if (attr_mask & IB_QP_MIN_RNR_TIMER)
509		if (attr->min_rnr_timer > 31)
510			goto inval;
511
512	if (attr_mask & IB_QP_PORT)
513		if (attr->port_num == 0 ||
514		    attr->port_num > ibqp->device->phys_port_cnt)
515			goto inval;
516
517	/*
518	 * don't allow invalid Path MTU values or greater than 2048
519	 * unless we are configured for a 4KB MTU
520	 */
521	if ((attr_mask & IB_QP_PATH_MTU) &&
522		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
523		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
524		goto inval;
525
526	if (attr_mask & IB_QP_PATH_MIG_STATE)
527		if (attr->path_mig_state != IB_MIG_MIGRATED &&
528		    attr->path_mig_state != IB_MIG_REARM)
529			goto inval;
530
531	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
532		if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
533			goto inval;
534
535	switch (new_state) {
536	case IB_QPS_RESET:
537		ipath_reset_qp(qp, ibqp->qp_type);
538		break;
539
540	case IB_QPS_ERR:
541		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
542		break;
543
544	default:
545		break;
546
547	}
548
549	if (attr_mask & IB_QP_PKEY_INDEX)
550		qp->s_pkey_index = attr->pkey_index;
551
552	if (attr_mask & IB_QP_DEST_QPN)
553		qp->remote_qpn = attr->dest_qp_num;
554
555	if (attr_mask & IB_QP_SQ_PSN) {
556		qp->s_psn = qp->s_next_psn = attr->sq_psn;
557		qp->s_last_psn = qp->s_next_psn - 1;
558	}
559
560	if (attr_mask & IB_QP_RQ_PSN)
561		qp->r_psn = attr->rq_psn;
562
563	if (attr_mask & IB_QP_ACCESS_FLAGS)
564		qp->qp_access_flags = attr->qp_access_flags;
565
566	if (attr_mask & IB_QP_AV)
567		qp->remote_ah_attr = attr->ah_attr;
568
569	if (attr_mask & IB_QP_PATH_MTU)
570		qp->path_mtu = attr->path_mtu;
571
572	if (attr_mask & IB_QP_RETRY_CNT)
573		qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
574
575	if (attr_mask & IB_QP_RNR_RETRY) {
576		qp->s_rnr_retry = attr->rnr_retry;
577		if (qp->s_rnr_retry > 7)
578			qp->s_rnr_retry = 7;
579		qp->s_rnr_retry_cnt = qp->s_rnr_retry;
580	}
581
582	if (attr_mask & IB_QP_MIN_RNR_TIMER)
583		qp->r_min_rnr_timer = attr->min_rnr_timer;
584
585	if (attr_mask & IB_QP_TIMEOUT)
586		qp->timeout = attr->timeout;
587
588	if (attr_mask & IB_QP_QKEY)
589		qp->qkey = attr->qkey;
590
591	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
592		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
593
594	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
595		qp->s_max_rd_atomic = attr->max_rd_atomic;
596
597	qp->state = new_state;
598	spin_unlock_irqrestore(&qp->s_lock, flags);
599
600	if (lastwqe) {
601		struct ib_event ev;
602
603		ev.device = qp->ibqp.device;
604		ev.element.qp = &qp->ibqp;
605		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
606		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
607	}
608	ret = 0;
609	goto bail;
610
611inval:
612	spin_unlock_irqrestore(&qp->s_lock, flags);
613	ret = -EINVAL;
614
615bail:
616	return ret;
617}
618
619int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
620		   int attr_mask, struct ib_qp_init_attr *init_attr)
621{
622	struct ipath_qp *qp = to_iqp(ibqp);
623
624	attr->qp_state = qp->state;
625	attr->cur_qp_state = attr->qp_state;
626	attr->path_mtu = qp->path_mtu;
627	attr->path_mig_state = 0;
628	attr->qkey = qp->qkey;
629	attr->rq_psn = qp->r_psn;
630	attr->sq_psn = qp->s_next_psn;
631	attr->dest_qp_num = qp->remote_qpn;
632	attr->qp_access_flags = qp->qp_access_flags;
633	attr->cap.max_send_wr = qp->s_size - 1;
634	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
635	attr->cap.max_send_sge = qp->s_max_sge;
636	attr->cap.max_recv_sge = qp->r_rq.max_sge;
637	attr->cap.max_inline_data = 0;
638	attr->ah_attr = qp->remote_ah_attr;
639	memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
640	attr->pkey_index = qp->s_pkey_index;
641	attr->alt_pkey_index = 0;
642	attr->en_sqd_async_notify = 0;
643	attr->sq_draining = 0;
644	attr->max_rd_atomic = qp->s_max_rd_atomic;
645	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
646	attr->min_rnr_timer = qp->r_min_rnr_timer;
647	attr->port_num = 1;
648	attr->timeout = qp->timeout;
649	attr->retry_cnt = qp->s_retry_cnt;
650	attr->rnr_retry = qp->s_rnr_retry_cnt;
651	attr->alt_port_num = 0;
652	attr->alt_timeout = 0;
653
654	init_attr->event_handler = qp->ibqp.event_handler;
655	init_attr->qp_context = qp->ibqp.qp_context;
656	init_attr->send_cq = qp->ibqp.send_cq;
657	init_attr->recv_cq = qp->ibqp.recv_cq;
658	init_attr->srq = qp->ibqp.srq;
659	init_attr->cap = attr->cap;
660	if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
661		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
662	else
663		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
664	init_attr->qp_type = qp->ibqp.qp_type;
665	init_attr->port_num = 1;
666	return 0;
667}
668
669/**
670 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
671 * @qp: the queue pair to compute the AETH for
672 *
673 * Returns the AETH.
674 */
675__be32 ipath_compute_aeth(struct ipath_qp *qp)
676{
677	u32 aeth = qp->r_msn & IPATH_MSN_MASK;
678
679	if (qp->ibqp.srq) {
680		/*
681		 * Shared receive queues don't generate credits.
682		 * Set the credit field to the invalid value.
683		 */
684		aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
685	} else {
686		u32 min, max, x;
687		u32 credits;
688		struct ipath_rwq *wq = qp->r_rq.wq;
689		u32 head;
690		u32 tail;
691
692		/* sanity check pointers before trusting them */
693		head = wq->head;
694		if (head >= qp->r_rq.size)
695			head = 0;
696		tail = wq->tail;
697		if (tail >= qp->r_rq.size)
698			tail = 0;
699		/*
700		 * Compute the number of credits available (RWQEs).
701		 * XXX Not holding the r_rq.lock here so there is a small
702		 * chance that the pair of reads are not atomic.
703		 */
704		credits = head - tail;
705		if ((int)credits < 0)
706			credits += qp->r_rq.size;
707		/*
708		 * Binary search the credit table to find the code to
709		 * use.
710		 */
711		min = 0;
712		max = 31;
713		for (;;) {
714			x = (min + max) / 2;
715			if (credit_table[x] == credits)
716				break;
717			if (credit_table[x] > credits)
718				max = x;
719			else if (min == x)
720				break;
721			else
722				min = x;
723		}
724		aeth |= x << IPATH_AETH_CREDIT_SHIFT;
725	}
726	return cpu_to_be32(aeth);
727}
728
729/**
730 * ipath_create_qp - create a queue pair for a device
731 * @ibpd: the protection domain who's device we create the queue pair for
732 * @init_attr: the attributes of the queue pair
733 * @udata: unused by InfiniPath
734 *
735 * Returns the queue pair on success, otherwise returns an errno.
736 *
737 * Called by the ib_create_qp() core verbs function.
738 */
739struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
740			      struct ib_qp_init_attr *init_attr,
741			      struct ib_udata *udata)
742{
743	struct ipath_qp *qp;
744	int err;
745	struct ipath_swqe *swq = NULL;
746	struct ipath_ibdev *dev;
747	size_t sz;
748	struct ib_qp *ret;
749
750	if (init_attr->create_flags) {
751		ret = ERR_PTR(-EINVAL);
752		goto bail;
753	}
754
755	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
756	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
757		ret = ERR_PTR(-EINVAL);
758		goto bail;
759	}
760
761	/* Check receive queue parameters if no SRQ is specified. */
762	if (!init_attr->srq) {
763		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
764		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
765			ret = ERR_PTR(-EINVAL);
766			goto bail;
767		}
768		if (init_attr->cap.max_send_sge +
769		    init_attr->cap.max_send_wr +
770		    init_attr->cap.max_recv_sge +
771		    init_attr->cap.max_recv_wr == 0) {
772			ret = ERR_PTR(-EINVAL);
773			goto bail;
774		}
775	}
776
777	switch (init_attr->qp_type) {
778	case IB_QPT_UC:
779	case IB_QPT_RC:
780	case IB_QPT_UD:
781	case IB_QPT_SMI:
782	case IB_QPT_GSI:
783		sz = sizeof(struct ipath_sge) *
784			init_attr->cap.max_send_sge +
785			sizeof(struct ipath_swqe);
786		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
787		if (swq == NULL) {
788			ret = ERR_PTR(-ENOMEM);
789			goto bail;
790		}
791		sz = sizeof(*qp);
792		if (init_attr->srq) {
793			struct ipath_srq *srq = to_isrq(init_attr->srq);
794
795			sz += sizeof(*qp->r_sg_list) *
796				srq->rq.max_sge;
797		} else
798			sz += sizeof(*qp->r_sg_list) *
799				init_attr->cap.max_recv_sge;
800		qp = kmalloc(sz, GFP_KERNEL);
801		if (!qp) {
802			ret = ERR_PTR(-ENOMEM);
803			goto bail_swq;
804		}
805		if (init_attr->srq) {
806			sz = 0;
807			qp->r_rq.size = 0;
808			qp->r_rq.max_sge = 0;
809			qp->r_rq.wq = NULL;
810			init_attr->cap.max_recv_wr = 0;
811			init_attr->cap.max_recv_sge = 0;
812		} else {
813			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
814			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
815			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
816				sizeof(struct ipath_rwqe);
817			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
818					      qp->r_rq.size * sz);
819			if (!qp->r_rq.wq) {
820				ret = ERR_PTR(-ENOMEM);
821				goto bail_qp;
822			}
823		}
824
825		/*
826		 * ib_create_qp() will initialize qp->ibqp
827		 * except for qp->ibqp.qp_num.
828		 */
829		spin_lock_init(&qp->s_lock);
830		spin_lock_init(&qp->r_rq.lock);
831		atomic_set(&qp->refcount, 0);
832		init_waitqueue_head(&qp->wait);
833		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
834		INIT_LIST_HEAD(&qp->piowait);
835		INIT_LIST_HEAD(&qp->timerwait);
836		qp->state = IB_QPS_RESET;
837		qp->s_wq = swq;
838		qp->s_size = init_attr->cap.max_send_wr + 1;
839		qp->s_max_sge = init_attr->cap.max_send_sge;
840		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
841			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
842		else
843			qp->s_flags = 0;
844		dev = to_idev(ibpd->device);
845		err = ipath_alloc_qpn(&dev->qp_table, qp,
846				      init_attr->qp_type);
847		if (err) {
848			ret = ERR_PTR(err);
849			vfree(qp->r_rq.wq);
850			goto bail_qp;
851		}
852		qp->ip = NULL;
853		ipath_reset_qp(qp, init_attr->qp_type);
854		break;
855
856	default:
857		/* Don't support raw QPs */
858		ret = ERR_PTR(-ENOSYS);
859		goto bail;
860	}
861
862	init_attr->cap.max_inline_data = 0;
863
864	/*
865	 * Return the address of the RWQ as the offset to mmap.
866	 * See ipath_mmap() for details.
867	 */
868	if (udata && udata->outlen >= sizeof(__u64)) {
869		if (!qp->r_rq.wq) {
870			__u64 offset = 0;
871
872			err = ib_copy_to_udata(udata, &offset,
873					       sizeof(offset));
874			if (err) {
875				ret = ERR_PTR(err);
876				goto bail_ip;
877			}
878		} else {
879			u32 s = sizeof(struct ipath_rwq) +
880				qp->r_rq.size * sz;
881
882			qp->ip =
883			    ipath_create_mmap_info(dev, s,
884						   ibpd->uobject->context,
885						   qp->r_rq.wq);
886			if (!qp->ip) {
887				ret = ERR_PTR(-ENOMEM);
888				goto bail_ip;
889			}
890
891			err = ib_copy_to_udata(udata, &(qp->ip->offset),
892					       sizeof(qp->ip->offset));
893			if (err) {
894				ret = ERR_PTR(err);
895				goto bail_ip;
896			}
897		}
898	}
899
900	spin_lock(&dev->n_qps_lock);
901	if (dev->n_qps_allocated == ib_ipath_max_qps) {
902		spin_unlock(&dev->n_qps_lock);
903		ret = ERR_PTR(-ENOMEM);
904		goto bail_ip;
905	}
906
907	dev->n_qps_allocated++;
908	spin_unlock(&dev->n_qps_lock);
909
910	if (qp->ip) {
911		spin_lock_irq(&dev->pending_lock);
912		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
913		spin_unlock_irq(&dev->pending_lock);
914	}
915
916	ret = &qp->ibqp;
917	goto bail;
918
919bail_ip:
920	if (qp->ip)
921		kref_put(&qp->ip->ref, ipath_release_mmap_info);
922	else
923		vfree(qp->r_rq.wq);
924	ipath_free_qp(&dev->qp_table, qp);
925bail_qp:
926	kfree(qp);
927bail_swq:
928	vfree(swq);
929bail:
930	return ret;
931}
932
933/**
934 * ipath_destroy_qp - destroy a queue pair
935 * @ibqp: the queue pair to destroy
936 *
937 * Returns 0 on success.
938 *
939 * Note that this can be called while the QP is actively sending or
940 * receiving!
941 */
942int ipath_destroy_qp(struct ib_qp *ibqp)
943{
944	struct ipath_qp *qp = to_iqp(ibqp);
945	struct ipath_ibdev *dev = to_idev(ibqp->device);
946	unsigned long flags;
947
948	spin_lock_irqsave(&qp->s_lock, flags);
949	qp->state = IB_QPS_ERR;
950	spin_unlock_irqrestore(&qp->s_lock, flags);
951	spin_lock(&dev->n_qps_lock);
952	dev->n_qps_allocated--;
953	spin_unlock(&dev->n_qps_lock);
954
955	/* Stop the sending tasklet. */
956	tasklet_kill(&qp->s_task);
957
958	/* Make sure the QP isn't on the timeout list. */
959	spin_lock_irqsave(&dev->pending_lock, flags);
960	if (!list_empty(&qp->timerwait))
961		list_del_init(&qp->timerwait);
962	if (!list_empty(&qp->piowait))
963		list_del_init(&qp->piowait);
964	spin_unlock_irqrestore(&dev->pending_lock, flags);
965
966	/*
967	 * Make sure that the QP is not in the QPN table so receive
968	 * interrupts will discard packets for this QP.  XXX Also remove QP
969	 * from multicast table.
970	 */
971	if (atomic_read(&qp->refcount) != 0)
972		ipath_free_qp(&dev->qp_table, qp);
973
974	if (qp->ip)
975		kref_put(&qp->ip->ref, ipath_release_mmap_info);
976	else
977		vfree(qp->r_rq.wq);
978	vfree(qp->s_wq);
979	kfree(qp);
980	return 0;
981}
982
983/**
984 * ipath_init_qp_table - initialize the QP table for a device
985 * @idev: the device who's QP table we're initializing
986 * @size: the size of the QP table
987 *
988 * Returns 0 on success, otherwise returns an errno.
989 */
990int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
991{
992	int i;
993	int ret;
994
995	idev->qp_table.last = 1;	/* QPN 0 and 1 are special. */
996	idev->qp_table.max = size;
997	idev->qp_table.nmaps = 1;
998	idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
999				       GFP_KERNEL);
1000	if (idev->qp_table.table == NULL) {
1001		ret = -ENOMEM;
1002		goto bail;
1003	}
1004
1005	for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
1006		atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
1007		idev->qp_table.map[i].page = NULL;
1008	}
1009
1010	ret = 0;
1011
1012bail:
1013	return ret;
1014}
1015
1016/**
1017 * ipath_sqerror_qp - put a QP's send queue into an error state
1018 * @qp: QP who's send queue will be put into an error state
1019 * @wc: the WC responsible for putting the QP in this state
1020 *
1021 * Flushes the send work queue.
1022 * The QP s_lock should be held and interrupts disabled.
1023 */
1024
1025void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
1026{
1027	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1028	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
1029
1030	ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
1031		  qp->ibqp.qp_num, qp->remote_qpn, wc->status);
1032
1033	spin_lock(&dev->pending_lock);
1034	if (!list_empty(&qp->timerwait))
1035		list_del_init(&qp->timerwait);
1036	if (!list_empty(&qp->piowait))
1037		list_del_init(&qp->piowait);
1038	spin_unlock(&dev->pending_lock);
1039
1040	ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1041	if (++qp->s_last >= qp->s_size)
1042		qp->s_last = 0;
1043
1044	wc->status = IB_WC_WR_FLUSH_ERR;
1045
1046	while (qp->s_last != qp->s_head) {
1047		wqe = get_swqe_ptr(qp, qp->s_last);
1048		wc->wr_id = wqe->wr.wr_id;
1049		wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1050		ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1051		if (++qp->s_last >= qp->s_size)
1052			qp->s_last = 0;
1053	}
1054	qp->s_cur = qp->s_tail = qp->s_head;
1055	qp->state = IB_QPS_SQE;
1056}
1057
1058/**
1059 * ipath_get_credit - flush the send work queue of a QP
1060 * @qp: the qp who's send work queue to flush
1061 * @aeth: the Acknowledge Extended Transport Header
1062 *
1063 * The QP s_lock should be held.
1064 */
1065void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1066{
1067	u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1068
1069	/*
1070	 * If the credit is invalid, we can send
1071	 * as many packets as we like.  Otherwise, we have to
1072	 * honor the credit field.
1073	 */
1074	if (credit == IPATH_AETH_CREDIT_INVAL)
1075		qp->s_lsn = (u32) -1;
1076	else if (qp->s_lsn != (u32) -1) {
1077		/* Compute new LSN (i.e., MSN + credit) */
1078		credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1079		if (ipath_cmp24(credit, qp->s_lsn) > 0)
1080			qp->s_lsn = credit;
1081	}
1082
1083	/* Restart sending if it was blocked due to lack of credits. */
1084	if (qp->s_cur != qp->s_head &&
1085	    (qp->s_lsn == (u32) -1 ||
1086	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1087			 qp->s_lsn + 1) <= 0))
1088		tasklet_hi_schedule(&qp->s_task);
1089}
1090