1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc.  All Rights Reserved.
4(c) 2009 NetApp.  All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h>
28#include <linux/sunrpc/bc_xprt.h>
29
30#ifdef RPC_DEBUG
31#define RPCDBG_FACILITY	RPCDBG_TRANS
32#endif
33
34/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
40	return xprt->bc_alloc_count > 0;
41}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
45	xprt->bc_alloc_count += n;
46}
47
48static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
49{
50	return xprt->bc_alloc_count -= n;
51}
52
53/*
54 * Free the preallocated rpc_rqst structure and the memory
55 * buffers hanging off of it.
56 */
57static void xprt_free_allocation(struct rpc_rqst *req)
58{
59	struct xdr_buf *xbufp;
60
61	dprintk("RPC:        free allocations for req= %p\n", req);
62	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63	xbufp = &req->rq_private_buf;
64	free_page((unsigned long)xbufp->head[0].iov_base);
65	xbufp = &req->rq_snd_buf;
66	free_page((unsigned long)xbufp->head[0].iov_base);
67	kfree(req);
68}
69
70/*
71 * Preallocate up to min_reqs structures and related buffers for use
72 * by the backchannel.  This function can be called multiple times
73 * when creating new sessions that use the same rpc_xprt.  The
74 * preallocated buffers are added to the pool of resources used by
75 * the rpc_xprt.  Anyone of these resources may be used used by an
76 * incoming callback request.  It's up to the higher levels in the
77 * stack to enforce that the maximum number of session slots is not
78 * being exceeded.
79 *
80 * Some callback arguments can be large.  For example, a pNFS server
81 * using multiple deviceids.  The list can be unbound, but the client
82 * has the ability to tell the server the maximum size of the callback
83 * requests.  Each deviceID is 16 bytes, so allocate one page
84 * for the arguments to have enough room to receive a number of these
85 * deviceIDs.  The NFS client indicates to the pNFS server that its
86 * callback requests can be up to 4096 bytes in size.
87 */
88int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
89{
90	struct page *page_rcv = NULL, *page_snd = NULL;
91	struct xdr_buf *xbufp = NULL;
92	struct rpc_rqst *req, *tmp;
93	struct list_head tmp_list;
94	int i;
95
96	dprintk("RPC:       setup backchannel transport\n");
97
98	/*
99	 * We use a temporary list to keep track of the preallocated
100	 * buffers.  Once we're done building the list we splice it
101	 * into the backchannel preallocation list off of the rpc_xprt
102	 * struct.  This helps minimize the amount of time the list
103	 * lock is held on the rpc_xprt struct.  It also makes cleanup
104	 * easier in case of memory allocation errors.
105	 */
106	INIT_LIST_HEAD(&tmp_list);
107	for (i = 0; i < min_reqs; i++) {
108		/* Pre-allocate one backchannel rpc_rqst */
109		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
110		if (req == NULL) {
111			printk(KERN_ERR "Failed to create bc rpc_rqst\n");
112			goto out_free;
113		}
114
115		/* Add the allocated buffer to the tmp list */
116		dprintk("RPC:       adding req= %p\n", req);
117		list_add(&req->rq_bc_pa_list, &tmp_list);
118
119		req->rq_xprt = xprt;
120		INIT_LIST_HEAD(&req->rq_list);
121		INIT_LIST_HEAD(&req->rq_bc_list);
122
123		/* Preallocate one XDR receive buffer */
124		page_rcv = alloc_page(GFP_KERNEL);
125		if (page_rcv == NULL) {
126			printk(KERN_ERR "Failed to create bc receive xbuf\n");
127			goto out_free;
128		}
129		xbufp = &req->rq_rcv_buf;
130		xbufp->head[0].iov_base = page_address(page_rcv);
131		xbufp->head[0].iov_len = PAGE_SIZE;
132		xbufp->tail[0].iov_base = NULL;
133		xbufp->tail[0].iov_len = 0;
134		xbufp->page_len = 0;
135		xbufp->len = PAGE_SIZE;
136		xbufp->buflen = PAGE_SIZE;
137
138		/* Preallocate one XDR send buffer */
139		page_snd = alloc_page(GFP_KERNEL);
140		if (page_snd == NULL) {
141			printk(KERN_ERR "Failed to create bc snd xbuf\n");
142			goto out_free;
143		}
144
145		xbufp = &req->rq_snd_buf;
146		xbufp->head[0].iov_base = page_address(page_snd);
147		xbufp->head[0].iov_len = 0;
148		xbufp->tail[0].iov_base = NULL;
149		xbufp->tail[0].iov_len = 0;
150		xbufp->page_len = 0;
151		xbufp->len = 0;
152		xbufp->buflen = PAGE_SIZE;
153	}
154
155	/*
156	 * Add the temporary list to the backchannel preallocation list
157	 */
158	spin_lock_bh(&xprt->bc_pa_lock);
159	list_splice(&tmp_list, &xprt->bc_pa_list);
160	xprt_inc_alloc_count(xprt, min_reqs);
161	spin_unlock_bh(&xprt->bc_pa_lock);
162
163	dprintk("RPC:       setup backchannel transport done\n");
164	return 0;
165
166out_free:
167	/*
168	 * Memory allocation failed, free the temporary list
169	 */
170	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
171		list_del(&req->rq_bc_pa_list);
172		xprt_free_allocation(req);
173	}
174
175	dprintk("RPC:       setup backchannel transport failed\n");
176	return -ENOMEM;
177}
178EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
179
180/**
181 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
182 * @xprt:	the transport holding the preallocated strucures
183 * @max_reqs	the maximum number of preallocated structures to destroy
184 *
185 * Since these structures may have been allocated by multiple calls
186 * to xprt_setup_backchannel, we only destroy up to the maximum number
187 * of reqs specified by the caller.
188 */
189void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
190{
191	struct rpc_rqst *req = NULL, *tmp = NULL;
192
193	dprintk("RPC:        destroy backchannel transport\n");
194
195	if (max_reqs == 0)
196		goto out;
197
198	spin_lock_bh(&xprt->bc_pa_lock);
199	xprt_dec_alloc_count(xprt, max_reqs);
200	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
201		dprintk("RPC:        req=%p\n", req);
202		list_del(&req->rq_bc_pa_list);
203		xprt_free_allocation(req);
204		if (--max_reqs == 0)
205			break;
206	}
207	spin_unlock_bh(&xprt->bc_pa_lock);
208
209out:
210	dprintk("RPC:        backchannel list empty= %s\n",
211		list_empty(&xprt->bc_pa_list) ? "true" : "false");
212}
213EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
214
215static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
216{
217	struct rpc_rqst *req = NULL;
218
219	dprintk("RPC:       allocate a backchannel request\n");
220	if (list_empty(&xprt->bc_pa_list))
221		goto not_found;
222
223	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
224				rq_bc_pa_list);
225	req->rq_reply_bytes_recvd = 0;
226	req->rq_bytes_sent = 0;
227	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
228			sizeof(req->rq_private_buf));
229	req->rq_xid = xid;
230	req->rq_connect_cookie = xprt->connect_cookie;
231not_found:
232	dprintk("RPC:       backchannel req=%p\n", req);
233	return req;
234}
235
236/*
237 * Return the preallocated rpc_rqst structure and XDR buffers
238 * associated with this rpc_task.
239 */
240void xprt_free_bc_request(struct rpc_rqst *req)
241{
242	struct rpc_xprt *xprt = req->rq_xprt;
243
244	dprintk("RPC:       free backchannel req=%p\n", req);
245
246	req->rq_connect_cookie = xprt->connect_cookie - 1;
247	smp_mb__before_atomic();
248	WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
249	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
250	smp_mb__after_atomic();
251
252	if (!xprt_need_to_requeue(xprt)) {
253		/*
254		 * The last remaining session was destroyed while this
255		 * entry was in use.  Free the entry and don't attempt
256		 * to add back to the list because there is no need to
257		 * have anymore preallocated entries.
258		 */
259		dprintk("RPC:       Last session removed req=%p\n", req);
260		xprt_free_allocation(req);
261		return;
262	}
263
264	/*
265	 * Return it to the list of preallocations so that it
266	 * may be reused by a new callback request.
267	 */
268	spin_lock_bh(&xprt->bc_pa_lock);
269	list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
270	spin_unlock_bh(&xprt->bc_pa_lock);
271}
272
273/*
274 * One or more rpc_rqst structure have been preallocated during the
275 * backchannel setup.  Buffer space for the send and private XDR buffers
276 * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
277 * to this request.  Use xprt_free_bc_request to return it.
278 *
279 * We know that we're called in soft interrupt context, grab the spin_lock
280 * since there is no need to grab the bottom half spin_lock.
281 *
282 * Return an available rpc_rqst, otherwise NULL if non are available.
283 */
284struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
285{
286	struct rpc_rqst *req;
287
288	spin_lock(&xprt->bc_pa_lock);
289	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
290		if (req->rq_connect_cookie != xprt->connect_cookie)
291			continue;
292		if (req->rq_xid == xid)
293			goto found;
294	}
295	req = xprt_alloc_bc_request(xprt, xid);
296found:
297	spin_unlock(&xprt->bc_pa_lock);
298	return req;
299}
300
301/*
302 * Add callback request to callback list.  The callback
303 * service sleeps on the sv_cb_waitq waiting for new
304 * requests.  Wake it up after adding enqueing the
305 * request.
306 */
307void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
308{
309	struct rpc_xprt *xprt = req->rq_xprt;
310	struct svc_serv *bc_serv = xprt->bc_serv;
311
312	req->rq_private_buf.len = copied;
313	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
314
315	dprintk("RPC:       add callback request to list\n");
316	spin_lock(&bc_serv->sv_cb_lock);
317	list_del(&req->rq_bc_pa_list);
318	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
319	wake_up(&bc_serv->sv_cb_waitq);
320	spin_unlock(&bc_serv->sv_cb_lock);
321}
322
323