1/*
2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 *      Redistributions of source code must retain the above copyright
16 *      notice, this list of conditions and the following disclaimer.
17 *
18 *      Redistributions in binary form must reproduce the above
19 *      copyright notice, this list of conditions and the following
20 *      disclaimer in the documentation and/or other materials provided
21 *      with the distribution.
22 *
23 *      Neither the name of the Network Appliance, Inc. nor the names of
24 *      its contributors may be used to endorse or promote products
25 *      derived from this software without specific prior written
26 *      permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY	RPCDBG_SVCXPRT
52
53static int map_xdr(struct svcxprt_rdma *xprt,
54		   struct xdr_buf *xdr,
55		   struct svc_rdma_req_map *vec)
56{
57	int sge_no;
58	u32 sge_bytes;
59	u32 page_bytes;
60	u32 page_off;
61	int page_no;
62
63	BUG_ON(xdr->len !=
64	       (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
65
66	/* Skip the first sge, this is for the RPCRDMA header */
67	sge_no = 1;
68
69	/* Head SGE */
70	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
71	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
72	sge_no++;
73
74	/* pages SGE */
75	page_no = 0;
76	page_bytes = xdr->page_len;
77	page_off = xdr->page_base;
78	while (page_bytes) {
79		vec->sge[sge_no].iov_base =
80			page_address(xdr->pages[page_no]) + page_off;
81		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
82		page_bytes -= sge_bytes;
83		vec->sge[sge_no].iov_len = sge_bytes;
84
85		sge_no++;
86		page_no++;
87		page_off = 0; /* reset for next time through loop */
88	}
89
90	/* Tail SGE */
91	if (xdr->tail[0].iov_len) {
92		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
93		vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
94		sge_no++;
95	}
96
97	dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
98		"page_base %u page_len %u head_len %zu tail_len %zu\n",
99		sge_no, page_no, xdr->page_base, xdr->page_len,
100		xdr->head[0].iov_len, xdr->tail[0].iov_len);
101
102	vec->count = sge_no;
103	return 0;
104}
105
106static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
107			      struct xdr_buf *xdr,
108			      u32 xdr_off, size_t len, int dir)
109{
110	struct page *page;
111	dma_addr_t dma_addr;
112	if (xdr_off < xdr->head[0].iov_len) {
113		/* This offset is in the head */
114		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
115		page = virt_to_page(xdr->head[0].iov_base);
116	} else {
117		xdr_off -= xdr->head[0].iov_len;
118		if (xdr_off < xdr->page_len) {
119			/* This offset is in the page list */
120			xdr_off += xdr->page_base;
121			page = xdr->pages[xdr_off >> PAGE_SHIFT];
122			xdr_off &= ~PAGE_MASK;
123		} else {
124			/* This offset is in the tail */
125			xdr_off -= xdr->page_len;
126			xdr_off += (unsigned long)
127				xdr->tail[0].iov_base & ~PAGE_MASK;
128			page = virt_to_page(xdr->tail[0].iov_base);
129		}
130	}
131	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
132				   min_t(size_t, PAGE_SIZE, len), dir);
133	return dma_addr;
134}
135
136/* Assumptions:
137 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
138 */
139static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
140		      u32 rmr, u64 to,
141		      u32 xdr_off, int write_len,
142		      struct svc_rdma_req_map *vec)
143{
144	struct ib_send_wr write_wr;
145	struct ib_sge *sge;
146	int xdr_sge_no;
147	int sge_no;
148	int sge_bytes;
149	int sge_off;
150	int bc;
151	struct svc_rdma_op_ctxt *ctxt;
152
153	BUG_ON(vec->count > RPCSVC_MAXPAGES);
154	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
155		"write_len=%d, vec->sge=%p, vec->count=%lu\n",
156		rmr, (unsigned long long)to, xdr_off,
157		write_len, vec->sge, vec->count);
158
159	ctxt = svc_rdma_get_context(xprt);
160	ctxt->direction = DMA_TO_DEVICE;
161	sge = ctxt->sge;
162
163	/* Find the SGE associated with xdr_off */
164	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
165	     xdr_sge_no++) {
166		if (vec->sge[xdr_sge_no].iov_len > bc)
167			break;
168		bc -= vec->sge[xdr_sge_no].iov_len;
169	}
170
171	sge_off = bc;
172	bc = write_len;
173	sge_no = 0;
174
175	/* Copy the remaining SGE */
176	while (bc != 0) {
177		sge_bytes = min_t(size_t,
178			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);
179		sge[sge_no].length = sge_bytes;
180		sge[sge_no].addr =
181			dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
182				    sge_bytes, DMA_TO_DEVICE);
183		xdr_off += sge_bytes;
184		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
185					 sge[sge_no].addr))
186			goto err;
187		atomic_inc(&xprt->sc_dma_used);
188		sge[sge_no].lkey = xprt->sc_dma_lkey;
189		ctxt->count++;
190		sge_off = 0;
191		sge_no++;
192		xdr_sge_no++;
193		BUG_ON(xdr_sge_no > vec->count);
194		bc -= sge_bytes;
195		if (sge_no == xprt->sc_max_sge)
196			break;
197	}
198
199	/* Prepare WRITE WR */
200	memset(&write_wr, 0, sizeof write_wr);
201	ctxt->wr_op = IB_WR_RDMA_WRITE;
202	write_wr.wr_id = (unsigned long)ctxt;
203	write_wr.sg_list = &sge[0];
204	write_wr.num_sge = sge_no;
205	write_wr.opcode = IB_WR_RDMA_WRITE;
206	write_wr.send_flags = IB_SEND_SIGNALED;
207	write_wr.wr.rdma.rkey = rmr;
208	write_wr.wr.rdma.remote_addr = to;
209
210	/* Post It */
211	atomic_inc(&rdma_stat_write);
212	if (svc_rdma_send(xprt, &write_wr))
213		goto err;
214	return write_len - bc;
215 err:
216	svc_rdma_unmap_dma(ctxt);
217	svc_rdma_put_context(ctxt, 0);
218	/* Fatal error, close transport */
219	return -EIO;
220}
221
222static int send_write_chunks(struct svcxprt_rdma *xprt,
223			     struct rpcrdma_msg *rdma_argp,
224			     struct rpcrdma_msg *rdma_resp,
225			     struct svc_rqst *rqstp,
226			     struct svc_rdma_req_map *vec)
227{
228	u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
229	int write_len;
230	u32 xdr_off;
231	int chunk_off;
232	int chunk_no;
233	struct rpcrdma_write_array *arg_ary;
234	struct rpcrdma_write_array *res_ary;
235	int ret;
236
237	arg_ary = svc_rdma_get_write_array(rdma_argp);
238	if (!arg_ary)
239		return 0;
240	res_ary = (struct rpcrdma_write_array *)
241		&rdma_resp->rm_body.rm_chunks[1];
242
243	/* Write chunks start at the pagelist */
244	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
245	     xfer_len && chunk_no < arg_ary->wc_nchunks;
246	     chunk_no++) {
247		struct rpcrdma_segment *arg_ch;
248		u64 rs_offset;
249
250		arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
251		write_len = min(xfer_len, ntohl(arg_ch->rs_length));
252
253		/* Prepare the response chunk given the length actually
254		 * written */
255		xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
256		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
257						arg_ch->rs_handle,
258						arg_ch->rs_offset,
259						write_len);
260		chunk_off = 0;
261		while (write_len) {
262			ret = send_write(xprt, rqstp,
263					 ntohl(arg_ch->rs_handle),
264					 rs_offset + chunk_off,
265					 xdr_off,
266					 write_len,
267					 vec);
268			if (ret <= 0) {
269				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
270					ret);
271				return -EIO;
272			}
273			chunk_off += ret;
274			xdr_off += ret;
275			xfer_len -= ret;
276			write_len -= ret;
277		}
278	}
279	/* Update the req with the number of chunks actually used */
280	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
281
282	return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
283}
284
285static int send_reply_chunks(struct svcxprt_rdma *xprt,
286			     struct rpcrdma_msg *rdma_argp,
287			     struct rpcrdma_msg *rdma_resp,
288			     struct svc_rqst *rqstp,
289			     struct svc_rdma_req_map *vec)
290{
291	u32 xfer_len = rqstp->rq_res.len;
292	int write_len;
293	u32 xdr_off;
294	int chunk_no;
295	int chunk_off;
296	int nchunks;
297	struct rpcrdma_segment *ch;
298	struct rpcrdma_write_array *arg_ary;
299	struct rpcrdma_write_array *res_ary;
300	int ret;
301
302	arg_ary = svc_rdma_get_reply_array(rdma_argp);
303	if (!arg_ary)
304		return 0;
305	/* XXX: need to fix when reply lists occur with read-list and or
306	 * write-list */
307	res_ary = (struct rpcrdma_write_array *)
308		&rdma_resp->rm_body.rm_chunks[2];
309
310	/* xdr offset starts at RPC message */
311	nchunks = ntohl(arg_ary->wc_nchunks);
312	for (xdr_off = 0, chunk_no = 0;
313	     xfer_len && chunk_no < nchunks;
314	     chunk_no++) {
315		u64 rs_offset;
316		ch = &arg_ary->wc_array[chunk_no].wc_target;
317		write_len = min(xfer_len, htonl(ch->rs_length));
318
319		/* Prepare the reply chunk given the length actually
320		 * written */
321		xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
322		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
323						ch->rs_handle, ch->rs_offset,
324						write_len);
325		chunk_off = 0;
326		while (write_len) {
327			ret = send_write(xprt, rqstp,
328					 ntohl(ch->rs_handle),
329					 rs_offset + chunk_off,
330					 xdr_off,
331					 write_len,
332					 vec);
333			if (ret <= 0) {
334				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
335					ret);
336				return -EIO;
337			}
338			chunk_off += ret;
339			xdr_off += ret;
340			xfer_len -= ret;
341			write_len -= ret;
342		}
343	}
344	/* Update the req with the number of chunks actually used */
345	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
346
347	return rqstp->rq_res.len;
348}
349
350/* This function prepares the portion of the RPCRDMA message to be
351 * sent in the RDMA_SEND. This function is called after data sent via
352 * RDMA has already been transmitted. There are three cases:
353 * - The RPCRDMA header, RPC header, and payload are all sent in a
354 *   single RDMA_SEND. This is the "inline" case.
355 * - The RPCRDMA header and some portion of the RPC header and data
356 *   are sent via this RDMA_SEND and another portion of the data is
357 *   sent via RDMA.
358 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
359 *   header and data are all transmitted via RDMA.
360 * In all three cases, this function prepares the RPCRDMA header in
361 * sge[0], the 'type' parameter indicates the type to place in the
362 * RPCRDMA header, and the 'byte_count' field indicates how much of
363 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
364 * to send is zero in the XDR.
365 */
366static int send_reply(struct svcxprt_rdma *rdma,
367		      struct svc_rqst *rqstp,
368		      struct page *page,
369		      struct rpcrdma_msg *rdma_resp,
370		      struct svc_rdma_op_ctxt *ctxt,
371		      struct svc_rdma_req_map *vec,
372		      int byte_count)
373{
374	struct ib_send_wr send_wr;
375	int sge_no;
376	int sge_bytes;
377	int page_no;
378	int pages;
379	int ret;
380
381	/* Post a recv buffer to handle another request. */
382	ret = svc_rdma_post_recv(rdma);
383	if (ret) {
384		printk(KERN_INFO
385		       "svcrdma: could not post a receive buffer, err=%d."
386		       "Closing transport %p.\n", ret, rdma);
387		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
388		svc_rdma_put_context(ctxt, 0);
389		return -ENOTCONN;
390	}
391
392	/* Prepare the context */
393	ctxt->pages[0] = page;
394	ctxt->count = 1;
395
396	/* Prepare the SGE for the RPCRDMA Header */
397	ctxt->sge[0].lkey = rdma->sc_dma_lkey;
398	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
399	ctxt->sge[0].addr =
400	    ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
401			    ctxt->sge[0].length, DMA_TO_DEVICE);
402	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
403		goto err;
404	atomic_inc(&rdma->sc_dma_used);
405
406	ctxt->direction = DMA_TO_DEVICE;
407
408	/* Map the payload indicated by 'byte_count' */
409	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
410		int xdr_off = 0;
411		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
412		byte_count -= sge_bytes;
413		ctxt->sge[sge_no].addr =
414			dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
415				    sge_bytes, DMA_TO_DEVICE);
416		xdr_off += sge_bytes;
417		if (ib_dma_mapping_error(rdma->sc_cm_id->device,
418					 ctxt->sge[sge_no].addr))
419			goto err;
420		atomic_inc(&rdma->sc_dma_used);
421		ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
422		ctxt->sge[sge_no].length = sge_bytes;
423	}
424	BUG_ON(byte_count != 0);
425
426	/* Save all respages in the ctxt and remove them from the
427	 * respages array. They are our pages until the I/O
428	 * completes.
429	 */
430	pages = rqstp->rq_next_page - rqstp->rq_respages;
431	for (page_no = 0; page_no < pages; page_no++) {
432		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
433		ctxt->count++;
434		rqstp->rq_respages[page_no] = NULL;
435		/*
436		 * If there are more pages than SGE, terminate SGE
437		 * list so that svc_rdma_unmap_dma doesn't attempt to
438		 * unmap garbage.
439		 */
440		if (page_no+1 >= sge_no)
441			ctxt->sge[page_no+1].length = 0;
442	}
443	rqstp->rq_next_page = rqstp->rq_respages + 1;
444
445	BUG_ON(sge_no > rdma->sc_max_sge);
446	memset(&send_wr, 0, sizeof send_wr);
447	ctxt->wr_op = IB_WR_SEND;
448	send_wr.wr_id = (unsigned long)ctxt;
449	send_wr.sg_list = ctxt->sge;
450	send_wr.num_sge = sge_no;
451	send_wr.opcode = IB_WR_SEND;
452	send_wr.send_flags =  IB_SEND_SIGNALED;
453
454	ret = svc_rdma_send(rdma, &send_wr);
455	if (ret)
456		goto err;
457
458	return 0;
459
460 err:
461	svc_rdma_unmap_dma(ctxt);
462	svc_rdma_put_context(ctxt, 1);
463	return -EIO;
464}
465
466void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
467{
468}
469
470/*
471 * Return the start of an xdr buffer.
472 */
473static void *xdr_start(struct xdr_buf *xdr)
474{
475	return xdr->head[0].iov_base -
476		(xdr->len -
477		 xdr->page_len -
478		 xdr->tail[0].iov_len -
479		 xdr->head[0].iov_len);
480}
481
482int svc_rdma_sendto(struct svc_rqst *rqstp)
483{
484	struct svc_xprt *xprt = rqstp->rq_xprt;
485	struct svcxprt_rdma *rdma =
486		container_of(xprt, struct svcxprt_rdma, sc_xprt);
487	struct rpcrdma_msg *rdma_argp;
488	struct rpcrdma_msg *rdma_resp;
489	struct rpcrdma_write_array *reply_ary;
490	enum rpcrdma_proc reply_type;
491	int ret;
492	int inline_bytes;
493	struct page *res_page;
494	struct svc_rdma_op_ctxt *ctxt;
495	struct svc_rdma_req_map *vec;
496
497	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
498
499	/* Get the RDMA request header. */
500	rdma_argp = xdr_start(&rqstp->rq_arg);
501
502	/* Build an req vec for the XDR */
503	ctxt = svc_rdma_get_context(rdma);
504	ctxt->direction = DMA_TO_DEVICE;
505	vec = svc_rdma_get_req_map();
506	ret = map_xdr(rdma, &rqstp->rq_res, vec);
507	if (ret)
508		goto err0;
509	inline_bytes = rqstp->rq_res.len;
510
511	/* Create the RDMA response header */
512	res_page = svc_rdma_get_page();
513	rdma_resp = page_address(res_page);
514	reply_ary = svc_rdma_get_reply_array(rdma_argp);
515	if (reply_ary)
516		reply_type = RDMA_NOMSG;
517	else
518		reply_type = RDMA_MSG;
519	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
520					 rdma_resp, reply_type);
521
522	/* Send any write-chunk data and build resp write-list */
523	ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
524				rqstp, vec);
525	if (ret < 0) {
526		printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
527		       ret);
528		goto err1;
529	}
530	inline_bytes -= ret;
531
532	/* Send any reply-list data and update resp reply-list */
533	ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
534				rqstp, vec);
535	if (ret < 0) {
536		printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
537		       ret);
538		goto err1;
539	}
540	inline_bytes -= ret;
541
542	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
543			 inline_bytes);
544	svc_rdma_put_req_map(vec);
545	dprintk("svcrdma: send_reply returns %d\n", ret);
546	return ret;
547
548 err1:
549	put_page(res_page);
550 err0:
551	svc_rdma_put_req_map(vec);
552	svc_rdma_put_context(ctxt, 0);
553	return ret;
554}
555