1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/sched.h>
33#include <linux/gfp.h>
34#include "iwch_provider.h"
35#include "iwch.h"
36#include "iwch_cm.h"
37#include "cxio_hal.h"
38#include "cxio_resource.h"
39
40#define NO_SUPPORT -1
41
42static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
43				u8 * flit_cnt)
44{
45	int i;
46	u32 plen;
47
48	switch (wr->opcode) {
49	case IB_WR_SEND:
50		if (wr->send_flags & IB_SEND_SOLICITED)
51			wqe->send.rdmaop = T3_SEND_WITH_SE;
52		else
53			wqe->send.rdmaop = T3_SEND;
54		wqe->send.rem_stag = 0;
55		break;
56	case IB_WR_SEND_WITH_INV:
57		if (wr->send_flags & IB_SEND_SOLICITED)
58			wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
59		else
60			wqe->send.rdmaop = T3_SEND_WITH_INV;
61		wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
62		break;
63	default:
64		return -EINVAL;
65	}
66	if (wr->num_sge > T3_MAX_SGE)
67		return -EINVAL;
68	wqe->send.reserved[0] = 0;
69	wqe->send.reserved[1] = 0;
70	wqe->send.reserved[2] = 0;
71	plen = 0;
72	for (i = 0; i < wr->num_sge; i++) {
73		if ((plen + wr->sg_list[i].length) < plen)
74			return -EMSGSIZE;
75
76		plen += wr->sg_list[i].length;
77		wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
78		wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
79		wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
80	}
81	wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
82	*flit_cnt = 4 + ((wr->num_sge) << 1);
83	wqe->send.plen = cpu_to_be32(plen);
84	return 0;
85}
86
87static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
88				 u8 *flit_cnt)
89{
90	int i;
91	u32 plen;
92	if (wr->num_sge > T3_MAX_SGE)
93		return -EINVAL;
94	wqe->write.rdmaop = T3_RDMA_WRITE;
95	wqe->write.reserved[0] = 0;
96	wqe->write.reserved[1] = 0;
97	wqe->write.reserved[2] = 0;
98	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
99	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
100
101	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
102		plen = 4;
103		wqe->write.sgl[0].stag = wr->ex.imm_data;
104		wqe->write.sgl[0].len = cpu_to_be32(0);
105		wqe->write.num_sgle = cpu_to_be32(0);
106		*flit_cnt = 6;
107	} else {
108		plen = 0;
109		for (i = 0; i < wr->num_sge; i++) {
110			if ((plen + wr->sg_list[i].length) < plen) {
111				return -EMSGSIZE;
112			}
113			plen += wr->sg_list[i].length;
114			wqe->write.sgl[i].stag =
115			    cpu_to_be32(wr->sg_list[i].lkey);
116			wqe->write.sgl[i].len =
117			    cpu_to_be32(wr->sg_list[i].length);
118			wqe->write.sgl[i].to =
119			    cpu_to_be64(wr->sg_list[i].addr);
120		}
121		wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
122		*flit_cnt = 5 + ((wr->num_sge) << 1);
123	}
124	wqe->write.plen = cpu_to_be32(plen);
125	return 0;
126}
127
128static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
129				u8 *flit_cnt)
130{
131	if (wr->num_sge > 1)
132		return -EINVAL;
133	wqe->read.rdmaop = T3_READ_REQ;
134	if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
135		wqe->read.local_inv = 1;
136	else
137		wqe->read.local_inv = 0;
138	wqe->read.reserved[0] = 0;
139	wqe->read.reserved[1] = 0;
140	wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
141	wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
142	wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
143	wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
144	wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
145	*flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
146	return 0;
147}
148
149static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
150				u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
151{
152	int i;
153	__be64 *p;
154
155	if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
156		return -EINVAL;
157	*wr_cnt = 1;
158	wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
159	wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
160	wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
161	wqe->fastreg.va_base_lo_fbo =
162				cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
163	wqe->fastreg.page_type_perms = cpu_to_be32(
164		V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
165		V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
166		V_FR_TYPE(TPT_VATO) |
167		V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
168	p = &wqe->fastreg.pbl_addrs[0];
169	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
170
171		/* If we need a 2nd WR, then set it up */
172		if (i == T3_MAX_FASTREG_FRAG) {
173			*wr_cnt = 2;
174			wqe = (union t3_wr *)(wq->queue +
175				Q_PTR2IDX((wq->wptr+1), wq->size_log2));
176			build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
177			       Q_GENBIT(wq->wptr + 1, wq->size_log2),
178			       0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
179			       T3_EOP);
180
181			p = &wqe->pbl_frag.pbl_addrs[0];
182		}
183		*p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
184	}
185	*flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
186	if (*flit_cnt > 15)
187		*flit_cnt = 15;
188	return 0;
189}
190
191static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
192				u8 *flit_cnt)
193{
194	wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
195	wqe->local_inv.reserved = 0;
196	*flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
197	return 0;
198}
199
200static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
201			    u32 num_sgle, u32 * pbl_addr, u8 * page_size)
202{
203	int i;
204	struct iwch_mr *mhp;
205	u64 offset;
206	for (i = 0; i < num_sgle; i++) {
207
208		mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
209		if (!mhp) {
210			PDBG("%s %d\n", __func__, __LINE__);
211			return -EIO;
212		}
213		if (!mhp->attr.state) {
214			PDBG("%s %d\n", __func__, __LINE__);
215			return -EIO;
216		}
217		if (mhp->attr.zbva) {
218			PDBG("%s %d\n", __func__, __LINE__);
219			return -EIO;
220		}
221
222		if (sg_list[i].addr < mhp->attr.va_fbo) {
223			PDBG("%s %d\n", __func__, __LINE__);
224			return -EINVAL;
225		}
226		if (sg_list[i].addr + ((u64) sg_list[i].length) <
227		    sg_list[i].addr) {
228			PDBG("%s %d\n", __func__, __LINE__);
229			return -EINVAL;
230		}
231		if (sg_list[i].addr + ((u64) sg_list[i].length) >
232		    mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
233			PDBG("%s %d\n", __func__, __LINE__);
234			return -EINVAL;
235		}
236		offset = sg_list[i].addr - mhp->attr.va_fbo;
237		offset += mhp->attr.va_fbo &
238			  ((1UL << (12 + mhp->attr.page_size)) - 1);
239		pbl_addr[i] = ((mhp->attr.pbl_addr -
240			        rhp->rdev.rnic_info.pbl_base) >> 3) +
241			      (offset >> (12 + mhp->attr.page_size));
242		page_size[i] = mhp->attr.page_size;
243	}
244	return 0;
245}
246
247static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
248				struct ib_recv_wr *wr)
249{
250	int i, err = 0;
251	u32 pbl_addr[T3_MAX_SGE];
252	u8 page_size[T3_MAX_SGE];
253
254	err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
255			       page_size);
256	if (err)
257		return err;
258	wqe->recv.pagesz[0] = page_size[0];
259	wqe->recv.pagesz[1] = page_size[1];
260	wqe->recv.pagesz[2] = page_size[2];
261	wqe->recv.pagesz[3] = page_size[3];
262	wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
263	for (i = 0; i < wr->num_sge; i++) {
264		wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
265		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
266
267		/* to in the WQE == the offset into the page */
268		wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
269				((1UL << (12 + page_size[i])) - 1));
270
271		/* pbl_addr is the adapters address in the PBL */
272		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
273	}
274	for (; i < T3_MAX_SGE; i++) {
275		wqe->recv.sgl[i].stag = 0;
276		wqe->recv.sgl[i].len = 0;
277		wqe->recv.sgl[i].to = 0;
278		wqe->recv.pbl_addr[i] = 0;
279	}
280	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
281			     qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
282	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
283			     qhp->wq.rq_size_log2)].pbl_addr = 0;
284	return 0;
285}
286
287static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
288				struct ib_recv_wr *wr)
289{
290	int i;
291	u32 pbl_addr;
292	u32 pbl_offset;
293
294
295	/*
296	 * The T3 HW requires the PBL in the HW recv descriptor to reference
297	 * a PBL entry.  So we allocate the max needed PBL memory here and pass
298	 * it to the uP in the recv WR.  The uP will build the PBL and setup
299	 * the HW recv descriptor.
300	 */
301	pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
302	if (!pbl_addr)
303		return -ENOMEM;
304
305	/*
306	 * Compute the 8B aligned offset.
307	 */
308	pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
309
310	wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
311
312	for (i = 0; i < wr->num_sge; i++) {
313
314		/*
315		 * Use a 128MB page size. This and an imposed 128MB
316		 * sge length limit allows us to require only a 2-entry HW
317		 * PBL for each SGE.  This restriction is acceptable since
318		 * since it is not possible to allocate 128MB of contiguous
319		 * DMA coherent memory!
320		 */
321		if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
322			return -EINVAL;
323		wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
324
325		/*
326		 * T3 restricts a recv to all zero-stag or all non-zero-stag.
327		 */
328		if (wr->sg_list[i].lkey != 0)
329			return -EINVAL;
330		wqe->recv.sgl[i].stag = 0;
331		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
332		wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
333		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
334		pbl_offset += 2;
335	}
336	for (; i < T3_MAX_SGE; i++) {
337		wqe->recv.pagesz[i] = 0;
338		wqe->recv.sgl[i].stag = 0;
339		wqe->recv.sgl[i].len = 0;
340		wqe->recv.sgl[i].to = 0;
341		wqe->recv.pbl_addr[i] = 0;
342	}
343	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
344			     qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
345	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
346			     qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
347	return 0;
348}
349
350int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
351		      struct ib_send_wr **bad_wr)
352{
353	int err = 0;
354	u8 uninitialized_var(t3_wr_flit_cnt);
355	enum t3_wr_opcode t3_wr_opcode = 0;
356	enum t3_wr_flags t3_wr_flags;
357	struct iwch_qp *qhp;
358	u32 idx;
359	union t3_wr *wqe;
360	u32 num_wrs;
361	unsigned long flag;
362	struct t3_swsq *sqp;
363	int wr_cnt = 1;
364
365	qhp = to_iwch_qp(ibqp);
366	spin_lock_irqsave(&qhp->lock, flag);
367	if (qhp->attr.state > IWCH_QP_STATE_RTS) {
368		spin_unlock_irqrestore(&qhp->lock, flag);
369		err = -EINVAL;
370		goto out;
371	}
372	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
373		  qhp->wq.sq_size_log2);
374	if (num_wrs == 0) {
375		spin_unlock_irqrestore(&qhp->lock, flag);
376		err = -ENOMEM;
377		goto out;
378	}
379	while (wr) {
380		if (num_wrs == 0) {
381			err = -ENOMEM;
382			break;
383		}
384		idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
385		wqe = (union t3_wr *) (qhp->wq.queue + idx);
386		t3_wr_flags = 0;
387		if (wr->send_flags & IB_SEND_SOLICITED)
388			t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
389		if (wr->send_flags & IB_SEND_SIGNALED)
390			t3_wr_flags |= T3_COMPLETION_FLAG;
391		sqp = qhp->wq.sq +
392		      Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
393		switch (wr->opcode) {
394		case IB_WR_SEND:
395		case IB_WR_SEND_WITH_INV:
396			if (wr->send_flags & IB_SEND_FENCE)
397				t3_wr_flags |= T3_READ_FENCE_FLAG;
398			t3_wr_opcode = T3_WR_SEND;
399			err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
400			break;
401		case IB_WR_RDMA_WRITE:
402		case IB_WR_RDMA_WRITE_WITH_IMM:
403			t3_wr_opcode = T3_WR_WRITE;
404			err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
405			break;
406		case IB_WR_RDMA_READ:
407		case IB_WR_RDMA_READ_WITH_INV:
408			t3_wr_opcode = T3_WR_READ;
409			t3_wr_flags = 0; /* T3 reads are always signaled */
410			err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
411			if (err)
412				break;
413			sqp->read_len = wqe->read.local_len;
414			if (!qhp->wq.oldest_read)
415				qhp->wq.oldest_read = sqp;
416			break;
417		case IB_WR_FAST_REG_MR:
418			t3_wr_opcode = T3_WR_FASTREG;
419			err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
420						 &wr_cnt, &qhp->wq);
421			break;
422		case IB_WR_LOCAL_INV:
423			if (wr->send_flags & IB_SEND_FENCE)
424				t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
425			t3_wr_opcode = T3_WR_INV_STAG;
426			err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
427			break;
428		default:
429			PDBG("%s post of type=%d TBD!\n", __func__,
430			     wr->opcode);
431			err = -EINVAL;
432		}
433		if (err)
434			break;
435		wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
436		sqp->wr_id = wr->wr_id;
437		sqp->opcode = wr2opcode(t3_wr_opcode);
438		sqp->sq_wptr = qhp->wq.sq_wptr;
439		sqp->complete = 0;
440		sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
441
442		build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
443			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
444			       0, t3_wr_flit_cnt,
445			       (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
446		PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
447		     __func__, (unsigned long long) wr->wr_id, idx,
448		     Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
449		     sqp->opcode);
450		wr = wr->next;
451		num_wrs--;
452		qhp->wq.wptr += wr_cnt;
453		++(qhp->wq.sq_wptr);
454	}
455	spin_unlock_irqrestore(&qhp->lock, flag);
456	if (cxio_wq_db_enabled(&qhp->wq))
457		ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
458
459out:
460	if (err)
461		*bad_wr = wr;
462	return err;
463}
464
465int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
466		      struct ib_recv_wr **bad_wr)
467{
468	int err = 0;
469	struct iwch_qp *qhp;
470	u32 idx;
471	union t3_wr *wqe;
472	u32 num_wrs;
473	unsigned long flag;
474
475	qhp = to_iwch_qp(ibqp);
476	spin_lock_irqsave(&qhp->lock, flag);
477	if (qhp->attr.state > IWCH_QP_STATE_RTS) {
478		spin_unlock_irqrestore(&qhp->lock, flag);
479		err = -EINVAL;
480		goto out;
481	}
482	num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
483			    qhp->wq.rq_size_log2) - 1;
484	if (!wr) {
485		spin_unlock_irqrestore(&qhp->lock, flag);
486		err = -ENOMEM;
487		goto out;
488	}
489	while (wr) {
490		if (wr->num_sge > T3_MAX_SGE) {
491			err = -EINVAL;
492			break;
493		}
494		idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
495		wqe = (union t3_wr *) (qhp->wq.queue + idx);
496		if (num_wrs)
497			if (wr->sg_list[0].lkey)
498				err = build_rdma_recv(qhp, wqe, wr);
499			else
500				err = build_zero_stag_recv(qhp, wqe, wr);
501		else
502			err = -ENOMEM;
503
504		if (err)
505			break;
506
507		build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
508			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
509			       0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
510		PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
511		     "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
512		     idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
513		++(qhp->wq.rq_wptr);
514		++(qhp->wq.wptr);
515		wr = wr->next;
516		num_wrs--;
517	}
518	spin_unlock_irqrestore(&qhp->lock, flag);
519	if (cxio_wq_db_enabled(&qhp->wq))
520		ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
521
522out:
523	if (err)
524		*bad_wr = wr;
525	return err;
526}
527
528int iwch_bind_mw(struct ib_qp *qp,
529			     struct ib_mw *mw,
530			     struct ib_mw_bind *mw_bind)
531{
532	struct iwch_dev *rhp;
533	struct iwch_mw *mhp;
534	struct iwch_qp *qhp;
535	union t3_wr *wqe;
536	u32 pbl_addr;
537	u8 page_size;
538	u32 num_wrs;
539	unsigned long flag;
540	struct ib_sge sgl;
541	int err=0;
542	enum t3_wr_flags t3_wr_flags;
543	u32 idx;
544	struct t3_swsq *sqp;
545
546	qhp = to_iwch_qp(qp);
547	mhp = to_iwch_mw(mw);
548	rhp = qhp->rhp;
549
550	spin_lock_irqsave(&qhp->lock, flag);
551	if (qhp->attr.state > IWCH_QP_STATE_RTS) {
552		spin_unlock_irqrestore(&qhp->lock, flag);
553		return -EINVAL;
554	}
555	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
556			    qhp->wq.sq_size_log2);
557	if (num_wrs == 0) {
558		spin_unlock_irqrestore(&qhp->lock, flag);
559		return -ENOMEM;
560	}
561	idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
562	PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
563	     mw, mw_bind);
564	wqe = (union t3_wr *) (qhp->wq.queue + idx);
565
566	t3_wr_flags = 0;
567	if (mw_bind->send_flags & IB_SEND_SIGNALED)
568		t3_wr_flags = T3_COMPLETION_FLAG;
569
570	sgl.addr = mw_bind->bind_info.addr;
571	sgl.lkey = mw_bind->bind_info.mr->lkey;
572	sgl.length = mw_bind->bind_info.length;
573	wqe->bind.reserved = 0;
574	wqe->bind.type = TPT_VATO;
575
576	/* TBD: check perms */
577	wqe->bind.perms = iwch_ib_to_tpt_bind_access(
578		mw_bind->bind_info.mw_access_flags);
579	wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
580	wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
581	wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
582	wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
583	err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
584	if (err) {
585		spin_unlock_irqrestore(&qhp->lock, flag);
586		return err;
587	}
588	wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
589	sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
590	sqp->wr_id = mw_bind->wr_id;
591	sqp->opcode = T3_BIND_MW;
592	sqp->sq_wptr = qhp->wq.sq_wptr;
593	sqp->complete = 0;
594	sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
595	wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
596	wqe->bind.mr_pagesz = page_size;
597	build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
598		       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
599		       sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
600	++(qhp->wq.wptr);
601	++(qhp->wq.sq_wptr);
602	spin_unlock_irqrestore(&qhp->lock, flag);
603
604	if (cxio_wq_db_enabled(&qhp->wq))
605		ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
606
607	return err;
608}
609
610static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
611				    u8 *layer_type, u8 *ecode)
612{
613	int status = TPT_ERR_INTERNAL_ERR;
614	int tagged = 0;
615	int opcode = -1;
616	int rqtype = 0;
617	int send_inv = 0;
618
619	if (rsp_msg) {
620		status = CQE_STATUS(rsp_msg->cqe);
621		opcode = CQE_OPCODE(rsp_msg->cqe);
622		rqtype = RQ_TYPE(rsp_msg->cqe);
623		send_inv = (opcode == T3_SEND_WITH_INV) ||
624		           (opcode == T3_SEND_WITH_SE_INV);
625		tagged = (opcode == T3_RDMA_WRITE) ||
626			 (rqtype && (opcode == T3_READ_RESP));
627	}
628
629	switch (status) {
630	case TPT_ERR_STAG:
631		if (send_inv) {
632			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
633			*ecode = RDMAP_CANT_INV_STAG;
634		} else {
635			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
636			*ecode = RDMAP_INV_STAG;
637		}
638		break;
639	case TPT_ERR_PDID:
640		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
641		if ((opcode == T3_SEND_WITH_INV) ||
642		    (opcode == T3_SEND_WITH_SE_INV))
643			*ecode = RDMAP_CANT_INV_STAG;
644		else
645			*ecode = RDMAP_STAG_NOT_ASSOC;
646		break;
647	case TPT_ERR_QPID:
648		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
649		*ecode = RDMAP_STAG_NOT_ASSOC;
650		break;
651	case TPT_ERR_ACCESS:
652		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
653		*ecode = RDMAP_ACC_VIOL;
654		break;
655	case TPT_ERR_WRAP:
656		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
657		*ecode = RDMAP_TO_WRAP;
658		break;
659	case TPT_ERR_BOUND:
660		if (tagged) {
661			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
662			*ecode = DDPT_BASE_BOUNDS;
663		} else {
664			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
665			*ecode = RDMAP_BASE_BOUNDS;
666		}
667		break;
668	case TPT_ERR_INVALIDATE_SHARED_MR:
669	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
670		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
671		*ecode = RDMAP_CANT_INV_STAG;
672		break;
673	case TPT_ERR_ECC:
674	case TPT_ERR_ECC_PSTAG:
675	case TPT_ERR_INTERNAL_ERR:
676		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
677		*ecode = 0;
678		break;
679	case TPT_ERR_OUT_OF_RQE:
680		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
681		*ecode = DDPU_INV_MSN_NOBUF;
682		break;
683	case TPT_ERR_PBL_ADDR_BOUND:
684		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
685		*ecode = DDPT_BASE_BOUNDS;
686		break;
687	case TPT_ERR_CRC:
688		*layer_type = LAYER_MPA|DDP_LLP;
689		*ecode = MPA_CRC_ERR;
690		break;
691	case TPT_ERR_MARKER:
692		*layer_type = LAYER_MPA|DDP_LLP;
693		*ecode = MPA_MARKER_ERR;
694		break;
695	case TPT_ERR_PDU_LEN_ERR:
696		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
697		*ecode = DDPU_MSG_TOOBIG;
698		break;
699	case TPT_ERR_DDP_VERSION:
700		if (tagged) {
701			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
702			*ecode = DDPT_INV_VERS;
703		} else {
704			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
705			*ecode = DDPU_INV_VERS;
706		}
707		break;
708	case TPT_ERR_RDMA_VERSION:
709		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
710		*ecode = RDMAP_INV_VERS;
711		break;
712	case TPT_ERR_OPCODE:
713		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
714		*ecode = RDMAP_INV_OPCODE;
715		break;
716	case TPT_ERR_DDP_QUEUE_NUM:
717		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
718		*ecode = DDPU_INV_QN;
719		break;
720	case TPT_ERR_MSN:
721	case TPT_ERR_MSN_GAP:
722	case TPT_ERR_MSN_RANGE:
723	case TPT_ERR_IRD_OVERFLOW:
724		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
725		*ecode = DDPU_INV_MSN_RANGE;
726		break;
727	case TPT_ERR_TBIT:
728		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;
729		*ecode = 0;
730		break;
731	case TPT_ERR_MO:
732		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
733		*ecode = DDPU_INV_MO;
734		break;
735	default:
736		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
737		*ecode = 0;
738		break;
739	}
740}
741
742int iwch_post_zb_read(struct iwch_ep *ep)
743{
744	union t3_wr *wqe;
745	struct sk_buff *skb;
746	u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
747
748	PDBG("%s enter\n", __func__);
749	skb = alloc_skb(40, GFP_KERNEL);
750	if (!skb) {
751		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
752		return -ENOMEM;
753	}
754	wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
755	memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
756	wqe->read.rdmaop = T3_READ_REQ;
757	wqe->read.reserved[0] = 0;
758	wqe->read.reserved[1] = 0;
759	wqe->read.rem_stag = cpu_to_be32(1);
760	wqe->read.rem_to = cpu_to_be64(1);
761	wqe->read.local_stag = cpu_to_be32(1);
762	wqe->read.local_len = cpu_to_be32(0);
763	wqe->read.local_to = cpu_to_be64(1);
764	wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
765	wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
766						V_FW_RIWR_LEN(flit_cnt));
767	skb->priority = CPL_PRIORITY_DATA;
768	return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
769}
770
771/*
772 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
773 */
774int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
775{
776	union t3_wr *wqe;
777	struct terminate_message *term;
778	struct sk_buff *skb;
779
780	PDBG("%s %d\n", __func__, __LINE__);
781	skb = alloc_skb(40, GFP_ATOMIC);
782	if (!skb) {
783		printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
784		return -ENOMEM;
785	}
786	wqe = (union t3_wr *)skb_put(skb, 40);
787	memset(wqe, 0, 40);
788	wqe->send.rdmaop = T3_TERMINATE;
789
790	/* immediate data length */
791	wqe->send.plen = htonl(4);
792
793	/* immediate data starts here. */
794	term = (struct terminate_message *)wqe->send.sgl;
795	build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
796	wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
797			 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
798	wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
799	skb->priority = CPL_PRIORITY_DATA;
800	return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
801}
802
803/*
804 * Assumes qhp lock is held.
805 */
806static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
807				struct iwch_cq *schp)
808{
809	int count;
810	int flushed;
811
812
813	PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
814	/* take a ref on the qhp since we must release the lock */
815	atomic_inc(&qhp->refcnt);
816	spin_unlock(&qhp->lock);
817
818	/* locking hierarchy: cq lock first, then qp lock. */
819	spin_lock(&rchp->lock);
820	spin_lock(&qhp->lock);
821	cxio_flush_hw_cq(&rchp->cq);
822	cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
823	flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
824	spin_unlock(&qhp->lock);
825	spin_unlock(&rchp->lock);
826	if (flushed) {
827		spin_lock(&rchp->comp_handler_lock);
828		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
829		spin_unlock(&rchp->comp_handler_lock);
830	}
831
832	/* locking hierarchy: cq lock first, then qp lock. */
833	spin_lock(&schp->lock);
834	spin_lock(&qhp->lock);
835	cxio_flush_hw_cq(&schp->cq);
836	cxio_count_scqes(&schp->cq, &qhp->wq, &count);
837	flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
838	spin_unlock(&qhp->lock);
839	spin_unlock(&schp->lock);
840	if (flushed) {
841		spin_lock(&schp->comp_handler_lock);
842		(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
843		spin_unlock(&schp->comp_handler_lock);
844	}
845
846	/* deref */
847	if (atomic_dec_and_test(&qhp->refcnt))
848	        wake_up(&qhp->wait);
849
850	spin_lock(&qhp->lock);
851}
852
853static void flush_qp(struct iwch_qp *qhp)
854{
855	struct iwch_cq *rchp, *schp;
856
857	rchp = get_chp(qhp->rhp, qhp->attr.rcq);
858	schp = get_chp(qhp->rhp, qhp->attr.scq);
859
860	if (qhp->ibqp.uobject) {
861		cxio_set_wq_in_error(&qhp->wq);
862		cxio_set_cq_in_error(&rchp->cq);
863		spin_lock(&rchp->comp_handler_lock);
864		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
865		spin_unlock(&rchp->comp_handler_lock);
866		if (schp != rchp) {
867			cxio_set_cq_in_error(&schp->cq);
868			spin_lock(&schp->comp_handler_lock);
869			(*schp->ibcq.comp_handler)(&schp->ibcq,
870						   schp->ibcq.cq_context);
871			spin_unlock(&schp->comp_handler_lock);
872		}
873		return;
874	}
875	__flush_qp(qhp, rchp, schp);
876}
877
878
879/*
880 * Return count of RECV WRs posted
881 */
882u16 iwch_rqes_posted(struct iwch_qp *qhp)
883{
884	union t3_wr *wqe = qhp->wq.queue;
885	u16 count = 0;
886
887	while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
888		count++;
889		wqe++;
890	}
891	PDBG("%s qhp %p count %u\n", __func__, qhp, count);
892	return count;
893}
894
895static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
896				enum iwch_qp_attr_mask mask,
897				struct iwch_qp_attributes *attrs)
898{
899	struct t3_rdma_init_attr init_attr;
900	int ret;
901
902	init_attr.tid = qhp->ep->hwtid;
903	init_attr.qpid = qhp->wq.qpid;
904	init_attr.pdid = qhp->attr.pd;
905	init_attr.scqid = qhp->attr.scq;
906	init_attr.rcqid = qhp->attr.rcq;
907	init_attr.rq_addr = qhp->wq.rq_addr;
908	init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
909	init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
910		qhp->attr.mpa_attr.recv_marker_enabled |
911		(qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
912		(qhp->attr.mpa_attr.crc_enabled << 2);
913
914	init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
915			   uP_RI_QP_RDMA_WRITE_ENABLE |
916			   uP_RI_QP_BIND_ENABLE;
917	if (!qhp->ibqp.uobject)
918		init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
919				    uP_RI_QP_FAST_REGISTER_ENABLE;
920
921	init_attr.tcp_emss = qhp->ep->emss;
922	init_attr.ord = qhp->attr.max_ord;
923	init_attr.ird = qhp->attr.max_ird;
924	init_attr.qp_dma_addr = qhp->wq.dma_addr;
925	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
926	init_attr.rqe_count = iwch_rqes_posted(qhp);
927	init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
928	init_attr.chan = qhp->ep->l2t->smt_idx;
929	if (peer2peer) {
930		init_attr.rtr_type = RTR_READ;
931		if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
932			init_attr.ord = 1;
933		if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
934			init_attr.ird = 1;
935	} else
936		init_attr.rtr_type = 0;
937	init_attr.irs = qhp->ep->rcv_seq;
938	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
939	     "flags 0x%x qpcaps 0x%x\n", __func__,
940	     init_attr.rq_addr, init_attr.rq_size,
941	     init_attr.flags, init_attr.qpcaps);
942	ret = cxio_rdma_init(&rhp->rdev, &init_attr);
943	PDBG("%s ret %d\n", __func__, ret);
944	return ret;
945}
946
947int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
948				enum iwch_qp_attr_mask mask,
949				struct iwch_qp_attributes *attrs,
950				int internal)
951{
952	int ret = 0;
953	struct iwch_qp_attributes newattr = qhp->attr;
954	unsigned long flag;
955	int disconnect = 0;
956	int terminate = 0;
957	int abort = 0;
958	int free = 0;
959	struct iwch_ep *ep = NULL;
960
961	PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
962	     qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
963	     (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
964
965	spin_lock_irqsave(&qhp->lock, flag);
966
967	/* Process attr changes if in IDLE */
968	if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
969		if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
970			ret = -EIO;
971			goto out;
972		}
973		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
974			newattr.enable_rdma_read = attrs->enable_rdma_read;
975		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
976			newattr.enable_rdma_write = attrs->enable_rdma_write;
977		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
978			newattr.enable_bind = attrs->enable_bind;
979		if (mask & IWCH_QP_ATTR_MAX_ORD) {
980			if (attrs->max_ord >
981			    rhp->attr.max_rdma_read_qp_depth) {
982				ret = -EINVAL;
983				goto out;
984			}
985			newattr.max_ord = attrs->max_ord;
986		}
987		if (mask & IWCH_QP_ATTR_MAX_IRD) {
988			if (attrs->max_ird >
989			    rhp->attr.max_rdma_reads_per_qp) {
990				ret = -EINVAL;
991				goto out;
992			}
993			newattr.max_ird = attrs->max_ird;
994		}
995		qhp->attr = newattr;
996	}
997
998	if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
999		goto out;
1000	if (qhp->attr.state == attrs->next_state)
1001		goto out;
1002
1003	switch (qhp->attr.state) {
1004	case IWCH_QP_STATE_IDLE:
1005		switch (attrs->next_state) {
1006		case IWCH_QP_STATE_RTS:
1007			if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
1008				ret = -EINVAL;
1009				goto out;
1010			}
1011			if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
1012				ret = -EINVAL;
1013				goto out;
1014			}
1015			qhp->attr.mpa_attr = attrs->mpa_attr;
1016			qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1017			qhp->ep = qhp->attr.llp_stream_handle;
1018			qhp->attr.state = IWCH_QP_STATE_RTS;
1019
1020			/*
1021			 * Ref the endpoint here and deref when we
1022			 * disassociate the endpoint from the QP.  This
1023			 * happens in CLOSING->IDLE transition or *->ERROR
1024			 * transition.
1025			 */
1026			get_ep(&qhp->ep->com);
1027			spin_unlock_irqrestore(&qhp->lock, flag);
1028			ret = rdma_init(rhp, qhp, mask, attrs);
1029			spin_lock_irqsave(&qhp->lock, flag);
1030			if (ret)
1031				goto err;
1032			break;
1033		case IWCH_QP_STATE_ERROR:
1034			qhp->attr.state = IWCH_QP_STATE_ERROR;
1035			flush_qp(qhp);
1036			break;
1037		default:
1038			ret = -EINVAL;
1039			goto out;
1040		}
1041		break;
1042	case IWCH_QP_STATE_RTS:
1043		switch (attrs->next_state) {
1044		case IWCH_QP_STATE_CLOSING:
1045			BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1046			qhp->attr.state = IWCH_QP_STATE_CLOSING;
1047			if (!internal) {
1048				abort=0;
1049				disconnect = 1;
1050				ep = qhp->ep;
1051				get_ep(&ep->com);
1052			}
1053			break;
1054		case IWCH_QP_STATE_TERMINATE:
1055			qhp->attr.state = IWCH_QP_STATE_TERMINATE;
1056			if (qhp->ibqp.uobject)
1057				cxio_set_wq_in_error(&qhp->wq);
1058			if (!internal)
1059				terminate = 1;
1060			break;
1061		case IWCH_QP_STATE_ERROR:
1062			qhp->attr.state = IWCH_QP_STATE_ERROR;
1063			if (!internal) {
1064				abort=1;
1065				disconnect = 1;
1066				ep = qhp->ep;
1067				get_ep(&ep->com);
1068			}
1069			goto err;
1070			break;
1071		default:
1072			ret = -EINVAL;
1073			goto out;
1074		}
1075		break;
1076	case IWCH_QP_STATE_CLOSING:
1077		if (!internal) {
1078			ret = -EINVAL;
1079			goto out;
1080		}
1081		switch (attrs->next_state) {
1082			case IWCH_QP_STATE_IDLE:
1083				flush_qp(qhp);
1084				qhp->attr.state = IWCH_QP_STATE_IDLE;
1085				qhp->attr.llp_stream_handle = NULL;
1086				put_ep(&qhp->ep->com);
1087				qhp->ep = NULL;
1088				wake_up(&qhp->wait);
1089				break;
1090			case IWCH_QP_STATE_ERROR:
1091				goto err;
1092			default:
1093				ret = -EINVAL;
1094				goto err;
1095		}
1096		break;
1097	case IWCH_QP_STATE_ERROR:
1098		if (attrs->next_state != IWCH_QP_STATE_IDLE) {
1099			ret = -EINVAL;
1100			goto out;
1101		}
1102
1103		if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
1104		    !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
1105			ret = -EINVAL;
1106			goto out;
1107		}
1108		qhp->attr.state = IWCH_QP_STATE_IDLE;
1109		break;
1110	case IWCH_QP_STATE_TERMINATE:
1111		if (!internal) {
1112			ret = -EINVAL;
1113			goto out;
1114		}
1115		goto err;
1116		break;
1117	default:
1118		printk(KERN_ERR "%s in a bad state %d\n",
1119		       __func__, qhp->attr.state);
1120		ret = -EINVAL;
1121		goto err;
1122		break;
1123	}
1124	goto out;
1125err:
1126	PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1127	     qhp->wq.qpid);
1128
1129	/* disassociate the LLP connection */
1130	qhp->attr.llp_stream_handle = NULL;
1131	ep = qhp->ep;
1132	qhp->ep = NULL;
1133	qhp->attr.state = IWCH_QP_STATE_ERROR;
1134	free=1;
1135	wake_up(&qhp->wait);
1136	BUG_ON(!ep);
1137	flush_qp(qhp);
1138out:
1139	spin_unlock_irqrestore(&qhp->lock, flag);
1140
1141	if (terminate)
1142		iwch_post_terminate(qhp, NULL);
1143
1144	/*
1145	 * If disconnect is 1, then we need to initiate a disconnect
1146	 * on the EP.  This can be a normal close (RTS->CLOSING) or
1147	 * an abnormal close (RTS/CLOSING->ERROR).
1148	 */
1149	if (disconnect) {
1150		iwch_ep_disconnect(ep, abort, GFP_KERNEL);
1151		put_ep(&ep->com);
1152	}
1153
1154	/*
1155	 * If free is 1, then we've disassociated the EP from the QP
1156	 * and we need to dereference the EP.
1157	 */
1158	if (free)
1159		put_ep(&ep->com);
1160
1161	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1162	return ret;
1163}
1164