1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <linux/atomic.h>
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42			     void *data)
43{
44	struct sk_buff *skb;
45	struct ulp_mem_io *req;
46	struct ulptx_idata *sc;
47	u8 wr_len, *to_dp, *from_dp;
48	int copy_len, num_wqe, i, ret = 0;
49	struct c4iw_wr_wait wr_wait;
50
51	addr &= 0x7FFFFFF;
52	PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54	c4iw_init_wr_wait(&wr_wait);
55	for (i = 0; i < num_wqe; i++) {
56
57		copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58			   len;
59		wr_len = roundup(sizeof *req + sizeof *sc +
60				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
62		skb = alloc_skb(wr_len, GFP_KERNEL);
63		if (!skb)
64			return -ENOMEM;
65		set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67		req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68		memset(req, 0, wr_len);
69		INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71		if (i == (num_wqe-1)) {
72			req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73						    FW_WR_COMPL(1));
74			req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
75		} else
76			req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77		req->wr.wr_mid = cpu_to_be32(
78				       FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80		req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81		req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82				DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83		req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84						      16));
85		req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87		sc = (struct ulptx_idata *)(req + 1);
88		sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89		sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91		to_dp = (u8 *)(sc + 1);
92		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93		if (data)
94			memcpy(to_dp, from_dp, copy_len);
95		else
96			memset(to_dp, 0, copy_len);
97		if (copy_len % T4_ULPTX_MIN_IO)
98			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99			       (copy_len % T4_ULPTX_MIN_IO));
100		ret = c4iw_ofld_send(rdev, skb);
101		if (ret)
102			return ret;
103		len -= C4IW_MAX_INLINE_SIZE;
104	}
105
106	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
107	return ret;
108}
109
110/*
111 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
113 *     pbl_size and pbl_addr
114 * OUT: stag index
115 */
116static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
117			   u32 *stag, u8 stag_state, u32 pdid,
118			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
119			   int bind_enabled, u32 zbva, u64 to,
120			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
121{
122	int err;
123	struct fw_ri_tpte tpt;
124	u32 stag_idx;
125	static atomic_t key;
126
127	if (c4iw_fatal_error(rdev))
128		return -EIO;
129
130	stag_state = stag_state > 0;
131	stag_idx = (*stag) >> 8;
132
133	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
134		stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
135					     &rdev->resource.tpt_fifo_lock);
136		if (!stag_idx)
137			return -ENOMEM;
138		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
139	}
140	PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
141	     __func__, stag_state, type, pdid, stag_idx);
142
143	/* write TPT entry */
144	if (reset_tpt_entry)
145		memset(&tpt, 0, sizeof(tpt));
146	else {
147		tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
148			V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
149			V_FW_RI_TPTE_STAGSTATE(stag_state) |
150			V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
151		tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
152			(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
153			V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
154						      FW_RI_VA_BASED_TO))|
155			V_FW_RI_TPTE_PS(page_size));
156		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
157			V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
158		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
159		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
160		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
161		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
162		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
163	}
164	err = write_adapter_mem(rdev, stag_idx +
165				(rdev->lldi.vr->stag.start >> 5),
166				sizeof(tpt), &tpt);
167
168	if (reset_tpt_entry)
169		c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
170				  &rdev->resource.tpt_fifo_lock);
171	return err;
172}
173
174static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
175		     u32 pbl_addr, u32 pbl_size)
176{
177	int err;
178
179	PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
180	     __func__, pbl_addr, rdev->lldi.vr->pbl.start,
181	     pbl_size);
182
183	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
184	return err;
185}
186
187static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
188		     u32 pbl_addr)
189{
190	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
191			       pbl_size, pbl_addr);
192}
193
194static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
195{
196	*stag = T4_STAG_UNSET;
197	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
198			       0UL, 0, 0, 0, 0);
199}
200
201static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
202{
203	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
204			       0);
205}
206
207static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
208			 u32 pbl_size, u32 pbl_addr)
209{
210	*stag = T4_STAG_UNSET;
211	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
212			       0UL, 0, 0, pbl_size, pbl_addr);
213}
214
215static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
216{
217	u32 mmid;
218
219	mhp->attr.state = 1;
220	mhp->attr.stag = stag;
221	mmid = stag >> 8;
222	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
223	PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
224	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
225}
226
227static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
228		      struct c4iw_mr *mhp, int shift)
229{
230	u32 stag = T4_STAG_UNSET;
231	int ret;
232
233	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
234			      FW_RI_STAG_NSMR, mhp->attr.perms,
235			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
236			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
237			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
238	if (ret)
239		return ret;
240
241	ret = finish_mem_reg(mhp, stag);
242	if (ret)
243		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
244		       mhp->attr.pbl_addr);
245	return ret;
246}
247
248static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
249			  struct c4iw_mr *mhp, int shift, int npages)
250{
251	u32 stag;
252	int ret;
253
254	if (npages > mhp->attr.pbl_size)
255		return -ENOMEM;
256
257	stag = mhp->attr.stag;
258	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
259			      FW_RI_STAG_NSMR, mhp->attr.perms,
260			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
261			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
262			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
263	if (ret)
264		return ret;
265
266	ret = finish_mem_reg(mhp, stag);
267	if (ret)
268		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
269		       mhp->attr.pbl_addr);
270
271	return ret;
272}
273
274static int alloc_pbl(struct c4iw_mr *mhp, int npages)
275{
276	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
277						    npages << 3);
278
279	if (!mhp->attr.pbl_addr)
280		return -ENOMEM;
281
282	mhp->attr.pbl_size = npages;
283
284	return 0;
285}
286
287static int build_phys_page_list(struct ib_phys_buf *buffer_list,
288				int num_phys_buf, u64 *iova_start,
289				u64 *total_size, int *npages,
290				int *shift, __be64 **page_list)
291{
292	u64 mask;
293	int i, j, n;
294
295	mask = 0;
296	*total_size = 0;
297	for (i = 0; i < num_phys_buf; ++i) {
298		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
299			return -EINVAL;
300		if (i != 0 && i != num_phys_buf - 1 &&
301		    (buffer_list[i].size & ~PAGE_MASK))
302			return -EINVAL;
303		*total_size += buffer_list[i].size;
304		if (i > 0)
305			mask |= buffer_list[i].addr;
306		else
307			mask |= buffer_list[i].addr & PAGE_MASK;
308		if (i != num_phys_buf - 1)
309			mask |= buffer_list[i].addr + buffer_list[i].size;
310		else
311			mask |= (buffer_list[i].addr + buffer_list[i].size +
312				PAGE_SIZE - 1) & PAGE_MASK;
313	}
314
315	if (*total_size > 0xFFFFFFFFULL)
316		return -ENOMEM;
317
318	/* Find largest page shift we can use to cover buffers */
319	for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
320		if ((1ULL << *shift) & mask)
321			break;
322
323	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
324	buffer_list[0].addr &= ~0ull << *shift;
325
326	*npages = 0;
327	for (i = 0; i < num_phys_buf; ++i)
328		*npages += (buffer_list[i].size +
329			(1ULL << *shift) - 1) >> *shift;
330
331	if (!*npages)
332		return -EINVAL;
333
334	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
335	if (!*page_list)
336		return -ENOMEM;
337
338	n = 0;
339	for (i = 0; i < num_phys_buf; ++i)
340		for (j = 0;
341		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
342		     ++j)
343			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
344			    ((u64) j << *shift));
345
346	PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
347	     __func__, (unsigned long long)*iova_start,
348	     (unsigned long long)mask, *shift, (unsigned long long)*total_size,
349	     *npages);
350
351	return 0;
352
353}
354
355int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
356			     struct ib_pd *pd, struct ib_phys_buf *buffer_list,
357			     int num_phys_buf, int acc, u64 *iova_start)
358{
359
360	struct c4iw_mr mh, *mhp;
361	struct c4iw_pd *php;
362	struct c4iw_dev *rhp;
363	__be64 *page_list = NULL;
364	int shift = 0;
365	u64 total_size;
366	int npages;
367	int ret;
368
369	PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
370
371	/* There can be no memory windows */
372	if (atomic_read(&mr->usecnt))
373		return -EINVAL;
374
375	mhp = to_c4iw_mr(mr);
376	rhp = mhp->rhp;
377	php = to_c4iw_pd(mr->pd);
378
379	/* make sure we are on the same adapter */
380	if (rhp != php->rhp)
381		return -EINVAL;
382
383	memcpy(&mh, mhp, sizeof *mhp);
384
385	if (mr_rereg_mask & IB_MR_REREG_PD)
386		php = to_c4iw_pd(pd);
387	if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
388		mh.attr.perms = c4iw_ib_to_tpt_access(acc);
389		mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
390					 IB_ACCESS_MW_BIND;
391	}
392	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
393		ret = build_phys_page_list(buffer_list, num_phys_buf,
394						iova_start,
395						&total_size, &npages,
396						&shift, &page_list);
397		if (ret)
398			return ret;
399	}
400
401	ret = reregister_mem(rhp, php, &mh, shift, npages);
402	kfree(page_list);
403	if (ret)
404		return ret;
405	if (mr_rereg_mask & IB_MR_REREG_PD)
406		mhp->attr.pdid = php->pdid;
407	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
408		mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
409	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
410		mhp->attr.zbva = 0;
411		mhp->attr.va_fbo = *iova_start;
412		mhp->attr.page_size = shift - 12;
413		mhp->attr.len = (u32) total_size;
414		mhp->attr.pbl_size = npages;
415	}
416
417	return 0;
418}
419
420struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
421				     struct ib_phys_buf *buffer_list,
422				     int num_phys_buf, int acc, u64 *iova_start)
423{
424	__be64 *page_list;
425	int shift;
426	u64 total_size;
427	int npages;
428	struct c4iw_dev *rhp;
429	struct c4iw_pd *php;
430	struct c4iw_mr *mhp;
431	int ret;
432
433	PDBG("%s ib_pd %p\n", __func__, pd);
434	php = to_c4iw_pd(pd);
435	rhp = php->rhp;
436
437	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
438	if (!mhp)
439		return ERR_PTR(-ENOMEM);
440
441	mhp->rhp = rhp;
442
443	/* First check that we have enough alignment */
444	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
445		ret = -EINVAL;
446		goto err;
447	}
448
449	if (num_phys_buf > 1 &&
450	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
451		ret = -EINVAL;
452		goto err;
453	}
454
455	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
456					&total_size, &npages, &shift,
457					&page_list);
458	if (ret)
459		goto err;
460
461	ret = alloc_pbl(mhp, npages);
462	if (ret) {
463		kfree(page_list);
464		goto err_pbl;
465	}
466
467	ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
468			     npages);
469	kfree(page_list);
470	if (ret)
471		goto err_pbl;
472
473	mhp->attr.pdid = php->pdid;
474	mhp->attr.zbva = 0;
475
476	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
477	mhp->attr.va_fbo = *iova_start;
478	mhp->attr.page_size = shift - 12;
479
480	mhp->attr.len = (u32) total_size;
481	mhp->attr.pbl_size = npages;
482	ret = register_mem(rhp, php, mhp, shift);
483	if (ret)
484		goto err_pbl;
485
486	return &mhp->ibmr;
487
488err_pbl:
489	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
490			      mhp->attr.pbl_size << 3);
491
492err:
493	kfree(mhp);
494	return ERR_PTR(ret);
495
496}
497
498struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
499{
500	struct c4iw_dev *rhp;
501	struct c4iw_pd *php;
502	struct c4iw_mr *mhp;
503	int ret;
504	u32 stag = T4_STAG_UNSET;
505
506	PDBG("%s ib_pd %p\n", __func__, pd);
507	php = to_c4iw_pd(pd);
508	rhp = php->rhp;
509
510	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
511	if (!mhp)
512		return ERR_PTR(-ENOMEM);
513
514	mhp->rhp = rhp;
515	mhp->attr.pdid = php->pdid;
516	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
517	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
518	mhp->attr.zbva = 0;
519	mhp->attr.va_fbo = 0;
520	mhp->attr.page_size = 0;
521	mhp->attr.len = ~0UL;
522	mhp->attr.pbl_size = 0;
523
524	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
525			      FW_RI_STAG_NSMR, mhp->attr.perms,
526			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
527	if (ret)
528		goto err1;
529
530	ret = finish_mem_reg(mhp, stag);
531	if (ret)
532		goto err2;
533	return &mhp->ibmr;
534err2:
535	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
536		  mhp->attr.pbl_addr);
537err1:
538	kfree(mhp);
539	return ERR_PTR(ret);
540}
541
542struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
543			       u64 virt, int acc, struct ib_udata *udata)
544{
545	__be64 *pages;
546	int shift, n, len;
547	int i, j, k;
548	int err = 0;
549	struct ib_umem_chunk *chunk;
550	struct c4iw_dev *rhp;
551	struct c4iw_pd *php;
552	struct c4iw_mr *mhp;
553
554	PDBG("%s ib_pd %p\n", __func__, pd);
555
556	if (length == ~0ULL)
557		return ERR_PTR(-EINVAL);
558
559	if ((length + start) < start)
560		return ERR_PTR(-EINVAL);
561
562	php = to_c4iw_pd(pd);
563	rhp = php->rhp;
564	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
565	if (!mhp)
566		return ERR_PTR(-ENOMEM);
567
568	mhp->rhp = rhp;
569
570	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
571	if (IS_ERR(mhp->umem)) {
572		err = PTR_ERR(mhp->umem);
573		kfree(mhp);
574		return ERR_PTR(err);
575	}
576
577	shift = ffs(mhp->umem->page_size) - 1;
578
579	n = 0;
580	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
581		n += chunk->nents;
582
583	err = alloc_pbl(mhp, n);
584	if (err)
585		goto err;
586
587	pages = (__be64 *) __get_free_page(GFP_KERNEL);
588	if (!pages) {
589		err = -ENOMEM;
590		goto err_pbl;
591	}
592
593	i = n = 0;
594
595	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
596		for (j = 0; j < chunk->nmap; ++j) {
597			len = sg_dma_len(&chunk->page_list[j]) >> shift;
598			for (k = 0; k < len; ++k) {
599				pages[i++] = cpu_to_be64(sg_dma_address(
600					&chunk->page_list[j]) +
601					mhp->umem->page_size * k);
602				if (i == PAGE_SIZE / sizeof *pages) {
603					err = write_pbl(&mhp->rhp->rdev,
604					      pages,
605					      mhp->attr.pbl_addr + (n << 3), i);
606					if (err)
607						goto pbl_done;
608					n += i;
609					i = 0;
610				}
611			}
612		}
613
614	if (i)
615		err = write_pbl(&mhp->rhp->rdev, pages,
616				     mhp->attr.pbl_addr + (n << 3), i);
617
618pbl_done:
619	free_page((unsigned long) pages);
620	if (err)
621		goto err_pbl;
622
623	mhp->attr.pdid = php->pdid;
624	mhp->attr.zbva = 0;
625	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
626	mhp->attr.va_fbo = virt;
627	mhp->attr.page_size = shift - 12;
628	mhp->attr.len = length;
629
630	err = register_mem(rhp, php, mhp, shift);
631	if (err)
632		goto err_pbl;
633
634	return &mhp->ibmr;
635
636err_pbl:
637	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
638			      mhp->attr.pbl_size << 3);
639
640err:
641	ib_umem_release(mhp->umem);
642	kfree(mhp);
643	return ERR_PTR(err);
644}
645
646struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
647{
648	struct c4iw_dev *rhp;
649	struct c4iw_pd *php;
650	struct c4iw_mw *mhp;
651	u32 mmid;
652	u32 stag = 0;
653	int ret;
654
655	php = to_c4iw_pd(pd);
656	rhp = php->rhp;
657	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
658	if (!mhp)
659		return ERR_PTR(-ENOMEM);
660	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
661	if (ret) {
662		kfree(mhp);
663		return ERR_PTR(ret);
664	}
665	mhp->rhp = rhp;
666	mhp->attr.pdid = php->pdid;
667	mhp->attr.type = FW_RI_STAG_MW;
668	mhp->attr.stag = stag;
669	mmid = (stag) >> 8;
670	mhp->ibmw.rkey = stag;
671	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
672		deallocate_window(&rhp->rdev, mhp->attr.stag);
673		kfree(mhp);
674		return ERR_PTR(-ENOMEM);
675	}
676	PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
677	return &(mhp->ibmw);
678}
679
680int c4iw_dealloc_mw(struct ib_mw *mw)
681{
682	struct c4iw_dev *rhp;
683	struct c4iw_mw *mhp;
684	u32 mmid;
685
686	mhp = to_c4iw_mw(mw);
687	rhp = mhp->rhp;
688	mmid = (mw->rkey) >> 8;
689	deallocate_window(&rhp->rdev, mhp->attr.stag);
690	remove_handle(rhp, &rhp->mmidr, mmid);
691	kfree(mhp);
692	PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
693	return 0;
694}
695
696struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
697{
698	struct c4iw_dev *rhp;
699	struct c4iw_pd *php;
700	struct c4iw_mr *mhp;
701	u32 mmid;
702	u32 stag = 0;
703	int ret = 0;
704
705	php = to_c4iw_pd(pd);
706	rhp = php->rhp;
707	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
708	if (!mhp) {
709		ret = -ENOMEM;
710		goto err;
711	}
712
713	mhp->rhp = rhp;
714	ret = alloc_pbl(mhp, pbl_depth);
715	if (ret)
716		goto err1;
717	mhp->attr.pbl_size = pbl_depth;
718	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
719				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
720	if (ret)
721		goto err2;
722	mhp->attr.pdid = php->pdid;
723	mhp->attr.type = FW_RI_STAG_NSMR;
724	mhp->attr.stag = stag;
725	mhp->attr.state = 1;
726	mmid = (stag) >> 8;
727	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
728	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
729		ret = -ENOMEM;
730		goto err3;
731	}
732
733	PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
734	return &(mhp->ibmr);
735err3:
736	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
737		       mhp->attr.pbl_addr);
738err2:
739	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
740			      mhp->attr.pbl_size << 3);
741err1:
742	kfree(mhp);
743err:
744	return ERR_PTR(ret);
745}
746
747struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
748						     int page_list_len)
749{
750	struct c4iw_fr_page_list *c4pl;
751	struct c4iw_dev *dev = to_c4iw_dev(device);
752	dma_addr_t dma_addr;
753	int size = sizeof *c4pl + page_list_len * sizeof(u64);
754
755	c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
756				  &dma_addr, GFP_KERNEL);
757	if (!c4pl)
758		return ERR_PTR(-ENOMEM);
759
760	dma_unmap_addr_set(c4pl, mapping, dma_addr);
761	c4pl->dma_addr = dma_addr;
762	c4pl->dev = dev;
763	c4pl->size = size;
764	c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
765	c4pl->ibpl.max_page_list_len = page_list_len;
766
767	return &c4pl->ibpl;
768}
769
770void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
771{
772	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
773
774	dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
775			  c4pl, dma_unmap_addr(c4pl, mapping));
776}
777
778int c4iw_dereg_mr(struct ib_mr *ib_mr)
779{
780	struct c4iw_dev *rhp;
781	struct c4iw_mr *mhp;
782	u32 mmid;
783
784	PDBG("%s ib_mr %p\n", __func__, ib_mr);
785	/* There can be no memory windows */
786	if (atomic_read(&ib_mr->usecnt))
787		return -EINVAL;
788
789	mhp = to_c4iw_mr(ib_mr);
790	rhp = mhp->rhp;
791	mmid = mhp->attr.stag >> 8;
792	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
793		       mhp->attr.pbl_addr);
794	if (mhp->attr.pbl_size)
795		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
796				  mhp->attr.pbl_size << 3);
797	remove_handle(rhp, &rhp->mmidr, mmid);
798	if (mhp->kva)
799		kfree((void *) (unsigned long) mhp->kva);
800	if (mhp->umem)
801		ib_umem_release(mhp->umem);
802	PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
803	kfree(mhp);
804	return 0;
805}
806