1/*
2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
37#include <linux/jhash.h>
38#ifdef CONFIG_DEBUG_FS
39#include <linux/seq_file.h>
40#endif
41
42#include "qib.h"
43
44#define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
45#define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
46
47static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48			      struct qpn_map *map, unsigned off)
49{
50	return (map - qpt->map) * BITS_PER_PAGE + off;
51}
52
53static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
54					struct qpn_map *map, unsigned off,
55					unsigned n)
56{
57	if (qpt->mask) {
58		off++;
59		if (((off & qpt->mask) >> 1) >= n)
60			off = (off | qpt->mask) + 2;
61	} else
62		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
63	return off;
64}
65
66/*
67 * Convert the AETH credit code into the number of credits.
68 */
69static u32 credit_table[31] = {
70	0,                      /* 0 */
71	1,                      /* 1 */
72	2,                      /* 2 */
73	3,                      /* 3 */
74	4,                      /* 4 */
75	6,                      /* 5 */
76	8,                      /* 6 */
77	12,                     /* 7 */
78	16,                     /* 8 */
79	24,                     /* 9 */
80	32,                     /* A */
81	48,                     /* B */
82	64,                     /* C */
83	96,                     /* D */
84	128,                    /* E */
85	192,                    /* F */
86	256,                    /* 10 */
87	384,                    /* 11 */
88	512,                    /* 12 */
89	768,                    /* 13 */
90	1024,                   /* 14 */
91	1536,                   /* 15 */
92	2048,                   /* 16 */
93	3072,                   /* 17 */
94	4096,                   /* 18 */
95	6144,                   /* 19 */
96	8192,                   /* 1A */
97	12288,                  /* 1B */
98	16384,                  /* 1C */
99	24576,                  /* 1D */
100	32768                   /* 1E */
101};
102
103static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
104{
105	unsigned long page = get_zeroed_page(GFP_KERNEL);
106
107	/*
108	 * Free the page if someone raced with us installing it.
109	 */
110
111	spin_lock(&qpt->lock);
112	if (map->page)
113		free_page(page);
114	else
115		map->page = (void *)page;
116	spin_unlock(&qpt->lock);
117}
118
119/*
120 * Allocate the next available QPN or
121 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
122 */
123static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
124		     enum ib_qp_type type, u8 port)
125{
126	u32 i, offset, max_scan, qpn;
127	struct qpn_map *map;
128	u32 ret;
129
130	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
131		unsigned n;
132
133		ret = type == IB_QPT_GSI;
134		n = 1 << (ret + 2 * (port - 1));
135		spin_lock(&qpt->lock);
136		if (qpt->flags & n)
137			ret = -EINVAL;
138		else
139			qpt->flags |= n;
140		spin_unlock(&qpt->lock);
141		goto bail;
142	}
143
144	qpn = qpt->last + 2;
145	if (qpn >= QPN_MAX)
146		qpn = 2;
147	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
148		qpn = (qpn | qpt->mask) + 2;
149	offset = qpn & BITS_PER_PAGE_MASK;
150	map = &qpt->map[qpn / BITS_PER_PAGE];
151	max_scan = qpt->nmaps - !offset;
152	for (i = 0;;) {
153		if (unlikely(!map->page)) {
154			get_map_page(qpt, map);
155			if (unlikely(!map->page))
156				break;
157		}
158		do {
159			if (!test_and_set_bit(offset, map->page)) {
160				qpt->last = qpn;
161				ret = qpn;
162				goto bail;
163			}
164			offset = find_next_offset(qpt, map, offset,
165				dd->n_krcv_queues);
166			qpn = mk_qpn(qpt, map, offset);
167			/*
168			 * This test differs from alloc_pidmap().
169			 * If find_next_offset() does find a zero
170			 * bit, we don't need to check for QPN
171			 * wrapping around past our starting QPN.
172			 * We just need to be sure we don't loop
173			 * forever.
174			 */
175		} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
176		/*
177		 * In order to keep the number of pages allocated to a
178		 * minimum, we scan the all existing pages before increasing
179		 * the size of the bitmap table.
180		 */
181		if (++i > max_scan) {
182			if (qpt->nmaps == QPNMAP_ENTRIES)
183				break;
184			map = &qpt->map[qpt->nmaps++];
185			offset = 0;
186		} else if (map < &qpt->map[qpt->nmaps]) {
187			++map;
188			offset = 0;
189		} else {
190			map = &qpt->map[0];
191			offset = 2;
192		}
193		qpn = mk_qpn(qpt, map, offset);
194	}
195
196	ret = -ENOMEM;
197
198bail:
199	return ret;
200}
201
202static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
203{
204	struct qpn_map *map;
205
206	map = qpt->map + qpn / BITS_PER_PAGE;
207	if (map->page)
208		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
209}
210
211static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
212{
213	return jhash_1word(qpn, dev->qp_rnd) &
214		(dev->qp_table_size - 1);
215}
216
217
218/*
219 * Put the QP into the hash table.
220 * The hash table holds a reference to the QP.
221 */
222static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
223{
224	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
225	unsigned long flags;
226	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
227
228	atomic_inc(&qp->refcount);
229	spin_lock_irqsave(&dev->qpt_lock, flags);
230
231	if (qp->ibqp.qp_num == 0)
232		rcu_assign_pointer(ibp->qp0, qp);
233	else if (qp->ibqp.qp_num == 1)
234		rcu_assign_pointer(ibp->qp1, qp);
235	else {
236		qp->next = dev->qp_table[n];
237		rcu_assign_pointer(dev->qp_table[n], qp);
238	}
239
240	spin_unlock_irqrestore(&dev->qpt_lock, flags);
241}
242
243/*
244 * Remove the QP from the table so it can't be found asynchronously by
245 * the receive interrupt routine.
246 */
247static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
248{
249	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
250	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
251	unsigned long flags;
252	int removed = 1;
253
254	spin_lock_irqsave(&dev->qpt_lock, flags);
255
256	if (rcu_dereference_protected(ibp->qp0,
257			lockdep_is_held(&dev->qpt_lock)) == qp) {
258		rcu_assign_pointer(ibp->qp0, NULL);
259	} else if (rcu_dereference_protected(ibp->qp1,
260			lockdep_is_held(&dev->qpt_lock)) == qp) {
261		rcu_assign_pointer(ibp->qp1, NULL);
262	} else {
263		struct qib_qp *q;
264		struct qib_qp __rcu **qpp;
265
266		removed = 0;
267		qpp = &dev->qp_table[n];
268		for (; (q = rcu_dereference_protected(*qpp,
269				lockdep_is_held(&dev->qpt_lock))) != NULL;
270				qpp = &q->next)
271			if (q == qp) {
272				rcu_assign_pointer(*qpp,
273					rcu_dereference_protected(qp->next,
274					 lockdep_is_held(&dev->qpt_lock)));
275				removed = 1;
276				break;
277			}
278	}
279
280	spin_unlock_irqrestore(&dev->qpt_lock, flags);
281	if (removed) {
282		synchronize_rcu();
283		atomic_dec(&qp->refcount);
284	}
285}
286
287/**
288 * qib_free_all_qps - check for QPs still in use
289 * @qpt: the QP table to empty
290 *
291 * There should not be any QPs still in use.
292 * Free memory for table.
293 */
294unsigned qib_free_all_qps(struct qib_devdata *dd)
295{
296	struct qib_ibdev *dev = &dd->verbs_dev;
297	unsigned long flags;
298	struct qib_qp *qp;
299	unsigned n, qp_inuse = 0;
300
301	for (n = 0; n < dd->num_pports; n++) {
302		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
303
304		if (!qib_mcast_tree_empty(ibp))
305			qp_inuse++;
306		rcu_read_lock();
307		if (rcu_dereference(ibp->qp0))
308			qp_inuse++;
309		if (rcu_dereference(ibp->qp1))
310			qp_inuse++;
311		rcu_read_unlock();
312	}
313
314	spin_lock_irqsave(&dev->qpt_lock, flags);
315	for (n = 0; n < dev->qp_table_size; n++) {
316		qp = rcu_dereference_protected(dev->qp_table[n],
317			lockdep_is_held(&dev->qpt_lock));
318		rcu_assign_pointer(dev->qp_table[n], NULL);
319
320		for (; qp; qp = rcu_dereference_protected(qp->next,
321					lockdep_is_held(&dev->qpt_lock)))
322			qp_inuse++;
323	}
324	spin_unlock_irqrestore(&dev->qpt_lock, flags);
325	synchronize_rcu();
326
327	return qp_inuse;
328}
329
330/**
331 * qib_lookup_qpn - return the QP with the given QPN
332 * @qpt: the QP table
333 * @qpn: the QP number to look up
334 *
335 * The caller is responsible for decrementing the QP reference count
336 * when done.
337 */
338struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
339{
340	struct qib_qp *qp = NULL;
341
342	rcu_read_lock();
343	if (unlikely(qpn <= 1)) {
344		if (qpn == 0)
345			qp = rcu_dereference(ibp->qp0);
346		else
347			qp = rcu_dereference(ibp->qp1);
348		if (qp)
349			atomic_inc(&qp->refcount);
350	} else {
351		struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
352		unsigned n = qpn_hash(dev, qpn);
353
354		for (qp = rcu_dereference(dev->qp_table[n]); qp;
355			qp = rcu_dereference(qp->next))
356			if (qp->ibqp.qp_num == qpn) {
357				atomic_inc(&qp->refcount);
358				break;
359			}
360	}
361	rcu_read_unlock();
362	return qp;
363}
364
365/**
366 * qib_reset_qp - initialize the QP state to the reset state
367 * @qp: the QP to reset
368 * @type: the QP type
369 */
370static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
371{
372	qp->remote_qpn = 0;
373	qp->qkey = 0;
374	qp->qp_access_flags = 0;
375	atomic_set(&qp->s_dma_busy, 0);
376	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
377	qp->s_hdrwords = 0;
378	qp->s_wqe = NULL;
379	qp->s_draining = 0;
380	qp->s_next_psn = 0;
381	qp->s_last_psn = 0;
382	qp->s_sending_psn = 0;
383	qp->s_sending_hpsn = 0;
384	qp->s_psn = 0;
385	qp->r_psn = 0;
386	qp->r_msn = 0;
387	if (type == IB_QPT_RC) {
388		qp->s_state = IB_OPCODE_RC_SEND_LAST;
389		qp->r_state = IB_OPCODE_RC_SEND_LAST;
390	} else {
391		qp->s_state = IB_OPCODE_UC_SEND_LAST;
392		qp->r_state = IB_OPCODE_UC_SEND_LAST;
393	}
394	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
395	qp->r_nak_state = 0;
396	qp->r_aflags = 0;
397	qp->r_flags = 0;
398	qp->s_head = 0;
399	qp->s_tail = 0;
400	qp->s_cur = 0;
401	qp->s_acked = 0;
402	qp->s_last = 0;
403	qp->s_ssn = 1;
404	qp->s_lsn = 0;
405	qp->s_mig_state = IB_MIG_MIGRATED;
406	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
407	qp->r_head_ack_queue = 0;
408	qp->s_tail_ack_queue = 0;
409	qp->s_num_rd_atomic = 0;
410	if (qp->r_rq.wq) {
411		qp->r_rq.wq->head = 0;
412		qp->r_rq.wq->tail = 0;
413	}
414	qp->r_sge.num_sge = 0;
415}
416
417static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
418{
419	unsigned n;
420
421	if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
422		qib_put_ss(&qp->s_rdma_read_sge);
423
424	qib_put_ss(&qp->r_sge);
425
426	if (clr_sends) {
427		while (qp->s_last != qp->s_head) {
428			struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
429			unsigned i;
430
431			for (i = 0; i < wqe->wr.num_sge; i++) {
432				struct qib_sge *sge = &wqe->sg_list[i];
433
434				qib_put_mr(sge->mr);
435			}
436			if (qp->ibqp.qp_type == IB_QPT_UD ||
437			    qp->ibqp.qp_type == IB_QPT_SMI ||
438			    qp->ibqp.qp_type == IB_QPT_GSI)
439				atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
440			if (++qp->s_last >= qp->s_size)
441				qp->s_last = 0;
442		}
443		if (qp->s_rdma_mr) {
444			qib_put_mr(qp->s_rdma_mr);
445			qp->s_rdma_mr = NULL;
446		}
447	}
448
449	if (qp->ibqp.qp_type != IB_QPT_RC)
450		return;
451
452	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
453		struct qib_ack_entry *e = &qp->s_ack_queue[n];
454
455		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
456		    e->rdma_sge.mr) {
457			qib_put_mr(e->rdma_sge.mr);
458			e->rdma_sge.mr = NULL;
459		}
460	}
461}
462
463/**
464 * qib_error_qp - put a QP into the error state
465 * @qp: the QP to put into the error state
466 * @err: the receive completion error to signal if a RWQE is active
467 *
468 * Flushes both send and receive work queues.
469 * Returns true if last WQE event should be generated.
470 * The QP r_lock and s_lock should be held and interrupts disabled.
471 * If we are already in error state, just return.
472 */
473int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
474{
475	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
476	struct ib_wc wc;
477	int ret = 0;
478
479	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
480		goto bail;
481
482	qp->state = IB_QPS_ERR;
483
484	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
485		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
486		del_timer(&qp->s_timer);
487	}
488
489	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
490		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
491
492	spin_lock(&dev->pending_lock);
493	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
494		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
495		list_del_init(&qp->iowait);
496	}
497	spin_unlock(&dev->pending_lock);
498
499	if (!(qp->s_flags & QIB_S_BUSY)) {
500		qp->s_hdrwords = 0;
501		if (qp->s_rdma_mr) {
502			qib_put_mr(qp->s_rdma_mr);
503			qp->s_rdma_mr = NULL;
504		}
505		if (qp->s_tx) {
506			qib_put_txreq(qp->s_tx);
507			qp->s_tx = NULL;
508		}
509	}
510
511	/* Schedule the sending tasklet to drain the send work queue. */
512	if (qp->s_last != qp->s_head)
513		qib_schedule_send(qp);
514
515	clear_mr_refs(qp, 0);
516
517	memset(&wc, 0, sizeof(wc));
518	wc.qp = &qp->ibqp;
519	wc.opcode = IB_WC_RECV;
520
521	if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
522		wc.wr_id = qp->r_wr_id;
523		wc.status = err;
524		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
525	}
526	wc.status = IB_WC_WR_FLUSH_ERR;
527
528	if (qp->r_rq.wq) {
529		struct qib_rwq *wq;
530		u32 head;
531		u32 tail;
532
533		spin_lock(&qp->r_rq.lock);
534
535		/* sanity check pointers before trusting them */
536		wq = qp->r_rq.wq;
537		head = wq->head;
538		if (head >= qp->r_rq.size)
539			head = 0;
540		tail = wq->tail;
541		if (tail >= qp->r_rq.size)
542			tail = 0;
543		while (tail != head) {
544			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
545			if (++tail >= qp->r_rq.size)
546				tail = 0;
547			qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
548		}
549		wq->tail = tail;
550
551		spin_unlock(&qp->r_rq.lock);
552	} else if (qp->ibqp.event_handler)
553		ret = 1;
554
555bail:
556	return ret;
557}
558
559/**
560 * qib_modify_qp - modify the attributes of a queue pair
561 * @ibqp: the queue pair who's attributes we're modifying
562 * @attr: the new attributes
563 * @attr_mask: the mask of attributes to modify
564 * @udata: user data for libibverbs.so
565 *
566 * Returns 0 on success, otherwise returns an errno.
567 */
568int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569		  int attr_mask, struct ib_udata *udata)
570{
571	struct qib_ibdev *dev = to_idev(ibqp->device);
572	struct qib_qp *qp = to_iqp(ibqp);
573	enum ib_qp_state cur_state, new_state;
574	struct ib_event ev;
575	int lastwqe = 0;
576	int mig = 0;
577	int ret;
578	u32 pmtu = 0; /* for gcc warning only */
579
580	spin_lock_irq(&qp->r_lock);
581	spin_lock(&qp->s_lock);
582
583	cur_state = attr_mask & IB_QP_CUR_STATE ?
584		attr->cur_qp_state : qp->state;
585	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
586
587	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
588				attr_mask, IB_LINK_LAYER_UNSPECIFIED))
589		goto inval;
590
591	if (attr_mask & IB_QP_AV) {
592		if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
593			goto inval;
594		if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
595			goto inval;
596	}
597
598	if (attr_mask & IB_QP_ALT_PATH) {
599		if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
600			goto inval;
601		if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
602			goto inval;
603		if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
604			goto inval;
605	}
606
607	if (attr_mask & IB_QP_PKEY_INDEX)
608		if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
609			goto inval;
610
611	if (attr_mask & IB_QP_MIN_RNR_TIMER)
612		if (attr->min_rnr_timer > 31)
613			goto inval;
614
615	if (attr_mask & IB_QP_PORT)
616		if (qp->ibqp.qp_type == IB_QPT_SMI ||
617		    qp->ibqp.qp_type == IB_QPT_GSI ||
618		    attr->port_num == 0 ||
619		    attr->port_num > ibqp->device->phys_port_cnt)
620			goto inval;
621
622	if (attr_mask & IB_QP_DEST_QPN)
623		if (attr->dest_qp_num > QIB_QPN_MASK)
624			goto inval;
625
626	if (attr_mask & IB_QP_RETRY_CNT)
627		if (attr->retry_cnt > 7)
628			goto inval;
629
630	if (attr_mask & IB_QP_RNR_RETRY)
631		if (attr->rnr_retry > 7)
632			goto inval;
633
634	/*
635	 * Don't allow invalid path_mtu values.  OK to set greater
636	 * than the active mtu (or even the max_cap, if we have tuned
637	 * that to a small mtu.  We'll set qp->path_mtu
638	 * to the lesser of requested attribute mtu and active,
639	 * for packetizing messages.
640	 * Note that the QP port has to be set in INIT and MTU in RTR.
641	 */
642	if (attr_mask & IB_QP_PATH_MTU) {
643		struct qib_devdata *dd = dd_from_dev(dev);
644		int mtu, pidx = qp->port_num - 1;
645
646		mtu = ib_mtu_enum_to_int(attr->path_mtu);
647		if (mtu == -1)
648			goto inval;
649		if (mtu > dd->pport[pidx].ibmtu) {
650			switch (dd->pport[pidx].ibmtu) {
651			case 4096:
652				pmtu = IB_MTU_4096;
653				break;
654			case 2048:
655				pmtu = IB_MTU_2048;
656				break;
657			case 1024:
658				pmtu = IB_MTU_1024;
659				break;
660			case 512:
661				pmtu = IB_MTU_512;
662				break;
663			case 256:
664				pmtu = IB_MTU_256;
665				break;
666			default:
667				pmtu = IB_MTU_2048;
668			}
669		} else
670			pmtu = attr->path_mtu;
671	}
672
673	if (attr_mask & IB_QP_PATH_MIG_STATE) {
674		if (attr->path_mig_state == IB_MIG_REARM) {
675			if (qp->s_mig_state == IB_MIG_ARMED)
676				goto inval;
677			if (new_state != IB_QPS_RTS)
678				goto inval;
679		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
680			if (qp->s_mig_state == IB_MIG_REARM)
681				goto inval;
682			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
683				goto inval;
684			if (qp->s_mig_state == IB_MIG_ARMED)
685				mig = 1;
686		} else
687			goto inval;
688	}
689
690	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
691		if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
692			goto inval;
693
694	switch (new_state) {
695	case IB_QPS_RESET:
696		if (qp->state != IB_QPS_RESET) {
697			qp->state = IB_QPS_RESET;
698			spin_lock(&dev->pending_lock);
699			if (!list_empty(&qp->iowait))
700				list_del_init(&qp->iowait);
701			spin_unlock(&dev->pending_lock);
702			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
703			spin_unlock(&qp->s_lock);
704			spin_unlock_irq(&qp->r_lock);
705			/* Stop the sending work queue and retry timer */
706			cancel_work_sync(&qp->s_work);
707			del_timer_sync(&qp->s_timer);
708			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
709			if (qp->s_tx) {
710				qib_put_txreq(qp->s_tx);
711				qp->s_tx = NULL;
712			}
713			remove_qp(dev, qp);
714			wait_event(qp->wait, !atomic_read(&qp->refcount));
715			spin_lock_irq(&qp->r_lock);
716			spin_lock(&qp->s_lock);
717			clear_mr_refs(qp, 1);
718			qib_reset_qp(qp, ibqp->qp_type);
719		}
720		break;
721
722	case IB_QPS_RTR:
723		/* Allow event to retrigger if QP set to RTR more than once */
724		qp->r_flags &= ~QIB_R_COMM_EST;
725		qp->state = new_state;
726		break;
727
728	case IB_QPS_SQD:
729		qp->s_draining = qp->s_last != qp->s_cur;
730		qp->state = new_state;
731		break;
732
733	case IB_QPS_SQE:
734		if (qp->ibqp.qp_type == IB_QPT_RC)
735			goto inval;
736		qp->state = new_state;
737		break;
738
739	case IB_QPS_ERR:
740		lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
741		break;
742
743	default:
744		qp->state = new_state;
745		break;
746	}
747
748	if (attr_mask & IB_QP_PKEY_INDEX)
749		qp->s_pkey_index = attr->pkey_index;
750
751	if (attr_mask & IB_QP_PORT)
752		qp->port_num = attr->port_num;
753
754	if (attr_mask & IB_QP_DEST_QPN)
755		qp->remote_qpn = attr->dest_qp_num;
756
757	if (attr_mask & IB_QP_SQ_PSN) {
758		qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
759		qp->s_psn = qp->s_next_psn;
760		qp->s_sending_psn = qp->s_next_psn;
761		qp->s_last_psn = qp->s_next_psn - 1;
762		qp->s_sending_hpsn = qp->s_last_psn;
763	}
764
765	if (attr_mask & IB_QP_RQ_PSN)
766		qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
767
768	if (attr_mask & IB_QP_ACCESS_FLAGS)
769		qp->qp_access_flags = attr->qp_access_flags;
770
771	if (attr_mask & IB_QP_AV) {
772		qp->remote_ah_attr = attr->ah_attr;
773		qp->s_srate = attr->ah_attr.static_rate;
774	}
775
776	if (attr_mask & IB_QP_ALT_PATH) {
777		qp->alt_ah_attr = attr->alt_ah_attr;
778		qp->s_alt_pkey_index = attr->alt_pkey_index;
779	}
780
781	if (attr_mask & IB_QP_PATH_MIG_STATE) {
782		qp->s_mig_state = attr->path_mig_state;
783		if (mig) {
784			qp->remote_ah_attr = qp->alt_ah_attr;
785			qp->port_num = qp->alt_ah_attr.port_num;
786			qp->s_pkey_index = qp->s_alt_pkey_index;
787		}
788	}
789
790	if (attr_mask & IB_QP_PATH_MTU) {
791		qp->path_mtu = pmtu;
792		qp->pmtu = ib_mtu_enum_to_int(pmtu);
793	}
794
795	if (attr_mask & IB_QP_RETRY_CNT) {
796		qp->s_retry_cnt = attr->retry_cnt;
797		qp->s_retry = attr->retry_cnt;
798	}
799
800	if (attr_mask & IB_QP_RNR_RETRY) {
801		qp->s_rnr_retry_cnt = attr->rnr_retry;
802		qp->s_rnr_retry = attr->rnr_retry;
803	}
804
805	if (attr_mask & IB_QP_MIN_RNR_TIMER)
806		qp->r_min_rnr_timer = attr->min_rnr_timer;
807
808	if (attr_mask & IB_QP_TIMEOUT) {
809		qp->timeout = attr->timeout;
810		qp->timeout_jiffies =
811			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
812				1000UL);
813	}
814
815	if (attr_mask & IB_QP_QKEY)
816		qp->qkey = attr->qkey;
817
818	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
819		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
820
821	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
822		qp->s_max_rd_atomic = attr->max_rd_atomic;
823
824	spin_unlock(&qp->s_lock);
825	spin_unlock_irq(&qp->r_lock);
826
827	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
828		insert_qp(dev, qp);
829
830	if (lastwqe) {
831		ev.device = qp->ibqp.device;
832		ev.element.qp = &qp->ibqp;
833		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
834		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
835	}
836	if (mig) {
837		ev.device = qp->ibqp.device;
838		ev.element.qp = &qp->ibqp;
839		ev.event = IB_EVENT_PATH_MIG;
840		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
841	}
842	ret = 0;
843	goto bail;
844
845inval:
846	spin_unlock(&qp->s_lock);
847	spin_unlock_irq(&qp->r_lock);
848	ret = -EINVAL;
849
850bail:
851	return ret;
852}
853
854int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
855		 int attr_mask, struct ib_qp_init_attr *init_attr)
856{
857	struct qib_qp *qp = to_iqp(ibqp);
858
859	attr->qp_state = qp->state;
860	attr->cur_qp_state = attr->qp_state;
861	attr->path_mtu = qp->path_mtu;
862	attr->path_mig_state = qp->s_mig_state;
863	attr->qkey = qp->qkey;
864	attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
865	attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
866	attr->dest_qp_num = qp->remote_qpn;
867	attr->qp_access_flags = qp->qp_access_flags;
868	attr->cap.max_send_wr = qp->s_size - 1;
869	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
870	attr->cap.max_send_sge = qp->s_max_sge;
871	attr->cap.max_recv_sge = qp->r_rq.max_sge;
872	attr->cap.max_inline_data = 0;
873	attr->ah_attr = qp->remote_ah_attr;
874	attr->alt_ah_attr = qp->alt_ah_attr;
875	attr->pkey_index = qp->s_pkey_index;
876	attr->alt_pkey_index = qp->s_alt_pkey_index;
877	attr->en_sqd_async_notify = 0;
878	attr->sq_draining = qp->s_draining;
879	attr->max_rd_atomic = qp->s_max_rd_atomic;
880	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
881	attr->min_rnr_timer = qp->r_min_rnr_timer;
882	attr->port_num = qp->port_num;
883	attr->timeout = qp->timeout;
884	attr->retry_cnt = qp->s_retry_cnt;
885	attr->rnr_retry = qp->s_rnr_retry_cnt;
886	attr->alt_port_num = qp->alt_ah_attr.port_num;
887	attr->alt_timeout = qp->alt_timeout;
888
889	init_attr->event_handler = qp->ibqp.event_handler;
890	init_attr->qp_context = qp->ibqp.qp_context;
891	init_attr->send_cq = qp->ibqp.send_cq;
892	init_attr->recv_cq = qp->ibqp.recv_cq;
893	init_attr->srq = qp->ibqp.srq;
894	init_attr->cap = attr->cap;
895	if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
896		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
897	else
898		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
899	init_attr->qp_type = qp->ibqp.qp_type;
900	init_attr->port_num = qp->port_num;
901	return 0;
902}
903
904/**
905 * qib_compute_aeth - compute the AETH (syndrome + MSN)
906 * @qp: the queue pair to compute the AETH for
907 *
908 * Returns the AETH.
909 */
910__be32 qib_compute_aeth(struct qib_qp *qp)
911{
912	u32 aeth = qp->r_msn & QIB_MSN_MASK;
913
914	if (qp->ibqp.srq) {
915		/*
916		 * Shared receive queues don't generate credits.
917		 * Set the credit field to the invalid value.
918		 */
919		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
920	} else {
921		u32 min, max, x;
922		u32 credits;
923		struct qib_rwq *wq = qp->r_rq.wq;
924		u32 head;
925		u32 tail;
926
927		/* sanity check pointers before trusting them */
928		head = wq->head;
929		if (head >= qp->r_rq.size)
930			head = 0;
931		tail = wq->tail;
932		if (tail >= qp->r_rq.size)
933			tail = 0;
934		/*
935		 * Compute the number of credits available (RWQEs).
936		 * XXX Not holding the r_rq.lock here so there is a small
937		 * chance that the pair of reads are not atomic.
938		 */
939		credits = head - tail;
940		if ((int)credits < 0)
941			credits += qp->r_rq.size;
942		/*
943		 * Binary search the credit table to find the code to
944		 * use.
945		 */
946		min = 0;
947		max = 31;
948		for (;;) {
949			x = (min + max) / 2;
950			if (credit_table[x] == credits)
951				break;
952			if (credit_table[x] > credits)
953				max = x;
954			else if (min == x)
955				break;
956			else
957				min = x;
958		}
959		aeth |= x << QIB_AETH_CREDIT_SHIFT;
960	}
961	return cpu_to_be32(aeth);
962}
963
964/**
965 * qib_create_qp - create a queue pair for a device
966 * @ibpd: the protection domain who's device we create the queue pair for
967 * @init_attr: the attributes of the queue pair
968 * @udata: user data for libibverbs.so
969 *
970 * Returns the queue pair on success, otherwise returns an errno.
971 *
972 * Called by the ib_create_qp() core verbs function.
973 */
974struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
975			    struct ib_qp_init_attr *init_attr,
976			    struct ib_udata *udata)
977{
978	struct qib_qp *qp;
979	int err;
980	struct qib_swqe *swq = NULL;
981	struct qib_ibdev *dev;
982	struct qib_devdata *dd;
983	size_t sz;
984	size_t sg_list_sz;
985	struct ib_qp *ret;
986
987	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
988	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
989	    init_attr->create_flags) {
990		ret = ERR_PTR(-EINVAL);
991		goto bail;
992	}
993
994	/* Check receive queue parameters if no SRQ is specified. */
995	if (!init_attr->srq) {
996		if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
997		    init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
998			ret = ERR_PTR(-EINVAL);
999			goto bail;
1000		}
1001		if (init_attr->cap.max_send_sge +
1002		    init_attr->cap.max_send_wr +
1003		    init_attr->cap.max_recv_sge +
1004		    init_attr->cap.max_recv_wr == 0) {
1005			ret = ERR_PTR(-EINVAL);
1006			goto bail;
1007		}
1008	}
1009
1010	switch (init_attr->qp_type) {
1011	case IB_QPT_SMI:
1012	case IB_QPT_GSI:
1013		if (init_attr->port_num == 0 ||
1014		    init_attr->port_num > ibpd->device->phys_port_cnt) {
1015			ret = ERR_PTR(-EINVAL);
1016			goto bail;
1017		}
1018	case IB_QPT_UC:
1019	case IB_QPT_RC:
1020	case IB_QPT_UD:
1021		sz = sizeof(struct qib_sge) *
1022			init_attr->cap.max_send_sge +
1023			sizeof(struct qib_swqe);
1024		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1025		if (swq == NULL) {
1026			ret = ERR_PTR(-ENOMEM);
1027			goto bail;
1028		}
1029		sz = sizeof(*qp);
1030		sg_list_sz = 0;
1031		if (init_attr->srq) {
1032			struct qib_srq *srq = to_isrq(init_attr->srq);
1033
1034			if (srq->rq.max_sge > 1)
1035				sg_list_sz = sizeof(*qp->r_sg_list) *
1036					(srq->rq.max_sge - 1);
1037		} else if (init_attr->cap.max_recv_sge > 1)
1038			sg_list_sz = sizeof(*qp->r_sg_list) *
1039				(init_attr->cap.max_recv_sge - 1);
1040		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1041		if (!qp) {
1042			ret = ERR_PTR(-ENOMEM);
1043			goto bail_swq;
1044		}
1045		RCU_INIT_POINTER(qp->next, NULL);
1046		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1047		if (!qp->s_hdr) {
1048			ret = ERR_PTR(-ENOMEM);
1049			goto bail_qp;
1050		}
1051		qp->timeout_jiffies =
1052			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1053				1000UL);
1054		if (init_attr->srq)
1055			sz = 0;
1056		else {
1057			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1058			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1059			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1060				sizeof(struct qib_rwqe);
1061			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1062						   qp->r_rq.size * sz);
1063			if (!qp->r_rq.wq) {
1064				ret = ERR_PTR(-ENOMEM);
1065				goto bail_qp;
1066			}
1067		}
1068
1069		/*
1070		 * ib_create_qp() will initialize qp->ibqp
1071		 * except for qp->ibqp.qp_num.
1072		 */
1073		spin_lock_init(&qp->r_lock);
1074		spin_lock_init(&qp->s_lock);
1075		spin_lock_init(&qp->r_rq.lock);
1076		atomic_set(&qp->refcount, 0);
1077		init_waitqueue_head(&qp->wait);
1078		init_waitqueue_head(&qp->wait_dma);
1079		init_timer(&qp->s_timer);
1080		qp->s_timer.data = (unsigned long)qp;
1081		INIT_WORK(&qp->s_work, qib_do_send);
1082		INIT_LIST_HEAD(&qp->iowait);
1083		INIT_LIST_HEAD(&qp->rspwait);
1084		qp->state = IB_QPS_RESET;
1085		qp->s_wq = swq;
1086		qp->s_size = init_attr->cap.max_send_wr + 1;
1087		qp->s_max_sge = init_attr->cap.max_send_sge;
1088		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1089			qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1090		dev = to_idev(ibpd->device);
1091		dd = dd_from_dev(dev);
1092		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1093				init_attr->port_num);
1094		if (err < 0) {
1095			ret = ERR_PTR(err);
1096			vfree(qp->r_rq.wq);
1097			goto bail_qp;
1098		}
1099		qp->ibqp.qp_num = err;
1100		qp->port_num = init_attr->port_num;
1101		qib_reset_qp(qp, init_attr->qp_type);
1102		break;
1103
1104	default:
1105		/* Don't support raw QPs */
1106		ret = ERR_PTR(-ENOSYS);
1107		goto bail;
1108	}
1109
1110	init_attr->cap.max_inline_data = 0;
1111
1112	/*
1113	 * Return the address of the RWQ as the offset to mmap.
1114	 * See qib_mmap() for details.
1115	 */
1116	if (udata && udata->outlen >= sizeof(__u64)) {
1117		if (!qp->r_rq.wq) {
1118			__u64 offset = 0;
1119
1120			err = ib_copy_to_udata(udata, &offset,
1121					       sizeof(offset));
1122			if (err) {
1123				ret = ERR_PTR(err);
1124				goto bail_ip;
1125			}
1126		} else {
1127			u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1128
1129			qp->ip = qib_create_mmap_info(dev, s,
1130						      ibpd->uobject->context,
1131						      qp->r_rq.wq);
1132			if (!qp->ip) {
1133				ret = ERR_PTR(-ENOMEM);
1134				goto bail_ip;
1135			}
1136
1137			err = ib_copy_to_udata(udata, &(qp->ip->offset),
1138					       sizeof(qp->ip->offset));
1139			if (err) {
1140				ret = ERR_PTR(err);
1141				goto bail_ip;
1142			}
1143		}
1144	}
1145
1146	spin_lock(&dev->n_qps_lock);
1147	if (dev->n_qps_allocated == ib_qib_max_qps) {
1148		spin_unlock(&dev->n_qps_lock);
1149		ret = ERR_PTR(-ENOMEM);
1150		goto bail_ip;
1151	}
1152
1153	dev->n_qps_allocated++;
1154	spin_unlock(&dev->n_qps_lock);
1155
1156	if (qp->ip) {
1157		spin_lock_irq(&dev->pending_lock);
1158		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1159		spin_unlock_irq(&dev->pending_lock);
1160	}
1161
1162	ret = &qp->ibqp;
1163	goto bail;
1164
1165bail_ip:
1166	if (qp->ip)
1167		kref_put(&qp->ip->ref, qib_release_mmap_info);
1168	else
1169		vfree(qp->r_rq.wq);
1170	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1171bail_qp:
1172	kfree(qp->s_hdr);
1173	kfree(qp);
1174bail_swq:
1175	vfree(swq);
1176bail:
1177	return ret;
1178}
1179
1180/**
1181 * qib_destroy_qp - destroy a queue pair
1182 * @ibqp: the queue pair to destroy
1183 *
1184 * Returns 0 on success.
1185 *
1186 * Note that this can be called while the QP is actively sending or
1187 * receiving!
1188 */
1189int qib_destroy_qp(struct ib_qp *ibqp)
1190{
1191	struct qib_qp *qp = to_iqp(ibqp);
1192	struct qib_ibdev *dev = to_idev(ibqp->device);
1193
1194	/* Make sure HW and driver activity is stopped. */
1195	spin_lock_irq(&qp->s_lock);
1196	if (qp->state != IB_QPS_RESET) {
1197		qp->state = IB_QPS_RESET;
1198		spin_lock(&dev->pending_lock);
1199		if (!list_empty(&qp->iowait))
1200			list_del_init(&qp->iowait);
1201		spin_unlock(&dev->pending_lock);
1202		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1203		spin_unlock_irq(&qp->s_lock);
1204		cancel_work_sync(&qp->s_work);
1205		del_timer_sync(&qp->s_timer);
1206		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1207		if (qp->s_tx) {
1208			qib_put_txreq(qp->s_tx);
1209			qp->s_tx = NULL;
1210		}
1211		remove_qp(dev, qp);
1212		wait_event(qp->wait, !atomic_read(&qp->refcount));
1213		clear_mr_refs(qp, 1);
1214	} else
1215		spin_unlock_irq(&qp->s_lock);
1216
1217	/* all user's cleaned up, mark it available */
1218	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1219	spin_lock(&dev->n_qps_lock);
1220	dev->n_qps_allocated--;
1221	spin_unlock(&dev->n_qps_lock);
1222
1223	if (qp->ip)
1224		kref_put(&qp->ip->ref, qib_release_mmap_info);
1225	else
1226		vfree(qp->r_rq.wq);
1227	vfree(qp->s_wq);
1228	kfree(qp->s_hdr);
1229	kfree(qp);
1230	return 0;
1231}
1232
1233/**
1234 * qib_init_qpn_table - initialize the QP number table for a device
1235 * @qpt: the QPN table
1236 */
1237void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1238{
1239	spin_lock_init(&qpt->lock);
1240	qpt->last = 1;          /* start with QPN 2 */
1241	qpt->nmaps = 1;
1242	qpt->mask = dd->qpn_mask;
1243}
1244
1245/**
1246 * qib_free_qpn_table - free the QP number table for a device
1247 * @qpt: the QPN table
1248 */
1249void qib_free_qpn_table(struct qib_qpn_table *qpt)
1250{
1251	int i;
1252
1253	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1254		if (qpt->map[i].page)
1255			free_page((unsigned long) qpt->map[i].page);
1256}
1257
1258/**
1259 * qib_get_credit - flush the send work queue of a QP
1260 * @qp: the qp who's send work queue to flush
1261 * @aeth: the Acknowledge Extended Transport Header
1262 *
1263 * The QP s_lock should be held.
1264 */
1265void qib_get_credit(struct qib_qp *qp, u32 aeth)
1266{
1267	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1268
1269	/*
1270	 * If the credit is invalid, we can send
1271	 * as many packets as we like.  Otherwise, we have to
1272	 * honor the credit field.
1273	 */
1274	if (credit == QIB_AETH_CREDIT_INVAL) {
1275		if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1276			qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1277			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1278				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1279				qib_schedule_send(qp);
1280			}
1281		}
1282	} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1283		/* Compute new LSN (i.e., MSN + credit) */
1284		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1285		if (qib_cmp24(credit, qp->s_lsn) > 0) {
1286			qp->s_lsn = credit;
1287			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1288				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1289				qib_schedule_send(qp);
1290			}
1291		}
1292	}
1293}
1294
1295#ifdef CONFIG_DEBUG_FS
1296
1297struct qib_qp_iter {
1298	struct qib_ibdev *dev;
1299	struct qib_qp *qp;
1300	int n;
1301};
1302
1303struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1304{
1305	struct qib_qp_iter *iter;
1306
1307	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1308	if (!iter)
1309		return NULL;
1310
1311	iter->dev = dev;
1312	if (qib_qp_iter_next(iter)) {
1313		kfree(iter);
1314		return NULL;
1315	}
1316
1317	return iter;
1318}
1319
1320int qib_qp_iter_next(struct qib_qp_iter *iter)
1321{
1322	struct qib_ibdev *dev = iter->dev;
1323	int n = iter->n;
1324	int ret = 1;
1325	struct qib_qp *pqp = iter->qp;
1326	struct qib_qp *qp;
1327
1328	for (; n < dev->qp_table_size; n++) {
1329		if (pqp)
1330			qp = rcu_dereference(pqp->next);
1331		else
1332			qp = rcu_dereference(dev->qp_table[n]);
1333		pqp = qp;
1334		if (qp) {
1335			iter->qp = qp;
1336			iter->n = n;
1337			return 0;
1338		}
1339	}
1340	return ret;
1341}
1342
1343static const char * const qp_type_str[] = {
1344	"SMI", "GSI", "RC", "UC", "UD",
1345};
1346
1347void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1348{
1349	struct qib_swqe *wqe;
1350	struct qib_qp *qp = iter->qp;
1351
1352	wqe = get_swqe_ptr(qp, qp->s_last);
1353	seq_printf(s,
1354		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1355		   iter->n,
1356		   qp->ibqp.qp_num,
1357		   qp_type_str[qp->ibqp.qp_type],
1358		   qp->state,
1359		   wqe->wr.opcode,
1360		   qp->s_hdrwords,
1361		   qp->s_flags,
1362		   atomic_read(&qp->s_dma_busy),
1363		   !list_empty(&qp->iowait),
1364		   qp->timeout,
1365		   wqe->ssn,
1366		   qp->s_lsn,
1367		   qp->s_last_psn,
1368		   qp->s_psn, qp->s_next_psn,
1369		   qp->s_sending_psn, qp->s_sending_hpsn,
1370		   qp->s_last, qp->s_acked, qp->s_cur,
1371		   qp->s_tail, qp->s_head, qp->s_size,
1372		   qp->remote_qpn,
1373		   qp->remote_ah_attr.dlid);
1374}
1375
1376#endif
1377