backchannel_rqst.c revision 9e00abc3c20904fd6a5d888bb7023925799ec8a5
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc.  All Rights Reserved.
4(c) 2009 NetApp.  All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27
28#ifdef RPC_DEBUG
29#define RPCDBG_FACILITY	RPCDBG_TRANS
30#endif
31
32/*
33 * Helper routines that track the number of preallocation elements
34 * on the transport.
35 */
36static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
37{
38	return xprt->bc_alloc_count > 0;
39}
40
41static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
42{
43	xprt->bc_alloc_count += n;
44}
45
46static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
47{
48	return xprt->bc_alloc_count -= n;
49}
50
51/*
52 * Free the preallocated rpc_rqst structure and the memory
53 * buffers hanging off of it.
54 */
55static void xprt_free_allocation(struct rpc_rqst *req)
56{
57	struct xdr_buf *xbufp;
58
59	dprintk("RPC:        free allocations for req= %p\n", req);
60	BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
61	xbufp = &req->rq_private_buf;
62	free_page((unsigned long)xbufp->head[0].iov_base);
63	xbufp = &req->rq_snd_buf;
64	free_page((unsigned long)xbufp->head[0].iov_base);
65	list_del(&req->rq_bc_pa_list);
66	kfree(req);
67}
68
69/*
70 * Preallocate up to min_reqs structures and related buffers for use
71 * by the backchannel.  This function can be called multiple times
72 * when creating new sessions that use the same rpc_xprt.  The
73 * preallocated buffers are added to the pool of resources used by
74 * the rpc_xprt.  Anyone of these resources may be used used by an
75 * incoming callback request.  It's up to the higher levels in the
76 * stack to enforce that the maximum number of session slots is not
77 * being exceeded.
78 *
79 * Some callback arguments can be large.  For example, a pNFS server
80 * using multiple deviceids.  The list can be unbound, but the client
81 * has the ability to tell the server the maximum size of the callback
82 * requests.  Each deviceID is 16 bytes, so allocate one page
83 * for the arguments to have enough room to receive a number of these
84 * deviceIDs.  The NFS client indicates to the pNFS server that its
85 * callback requests can be up to 4096 bytes in size.
86 */
87int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
88{
89	struct page *page_rcv = NULL, *page_snd = NULL;
90	struct xdr_buf *xbufp = NULL;
91	struct rpc_rqst *req, *tmp;
92	struct list_head tmp_list;
93	int i;
94
95	dprintk("RPC:       setup backchannel transport\n");
96
97	/*
98	 * We use a temporary list to keep track of the preallocated
99	 * buffers.  Once we're done building the list we splice it
100	 * into the backchannel preallocation list off of the rpc_xprt
101	 * struct.  This helps minimize the amount of time the list
102	 * lock is held on the rpc_xprt struct.  It also makes cleanup
103	 * easier in case of memory allocation errors.
104	 */
105	INIT_LIST_HEAD(&tmp_list);
106	for (i = 0; i < min_reqs; i++) {
107		/* Pre-allocate one backchannel rpc_rqst */
108		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
109		if (req == NULL) {
110			printk(KERN_ERR "Failed to create bc rpc_rqst\n");
111			goto out_free;
112		}
113
114		/* Add the allocated buffer to the tmp list */
115		dprintk("RPC:       adding req= %p\n", req);
116		list_add(&req->rq_bc_pa_list, &tmp_list);
117
118		req->rq_xprt = xprt;
119		INIT_LIST_HEAD(&req->rq_list);
120		INIT_LIST_HEAD(&req->rq_bc_list);
121
122		/* Preallocate one XDR receive buffer */
123		page_rcv = alloc_page(GFP_KERNEL);
124		if (page_rcv == NULL) {
125			printk(KERN_ERR "Failed to create bc receive xbuf\n");
126			goto out_free;
127		}
128		xbufp = &req->rq_rcv_buf;
129		xbufp->head[0].iov_base = page_address(page_rcv);
130		xbufp->head[0].iov_len = PAGE_SIZE;
131		xbufp->tail[0].iov_base = NULL;
132		xbufp->tail[0].iov_len = 0;
133		xbufp->page_len = 0;
134		xbufp->len = PAGE_SIZE;
135		xbufp->buflen = PAGE_SIZE;
136
137		/* Preallocate one XDR send buffer */
138		page_snd = alloc_page(GFP_KERNEL);
139		if (page_snd == NULL) {
140			printk(KERN_ERR "Failed to create bc snd xbuf\n");
141			goto out_free;
142		}
143
144		xbufp = &req->rq_snd_buf;
145		xbufp->head[0].iov_base = page_address(page_snd);
146		xbufp->head[0].iov_len = 0;
147		xbufp->tail[0].iov_base = NULL;
148		xbufp->tail[0].iov_len = 0;
149		xbufp->page_len = 0;
150		xbufp->len = 0;
151		xbufp->buflen = PAGE_SIZE;
152	}
153
154	/*
155	 * Add the temporary list to the backchannel preallocation list
156	 */
157	spin_lock_bh(&xprt->bc_pa_lock);
158	list_splice(&tmp_list, &xprt->bc_pa_list);
159	xprt_inc_alloc_count(xprt, min_reqs);
160	spin_unlock_bh(&xprt->bc_pa_lock);
161
162	dprintk("RPC:       setup backchannel transport done\n");
163	return 0;
164
165out_free:
166	/*
167	 * Memory allocation failed, free the temporary list
168	 */
169	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
170		xprt_free_allocation(req);
171
172	dprintk("RPC:       setup backchannel transport failed\n");
173	return -1;
174}
175EXPORT_SYMBOL(xprt_setup_backchannel);
176
177/*
178 * Destroys the backchannel preallocated structures.
179 * Since these structures may have been allocated by multiple calls
180 * to xprt_setup_backchannel, we only destroy up to the maximum number
181 * of reqs specified by the caller.
182 * @xprt:	the transport holding the preallocated strucures
183 * @max_reqs	the maximum number of preallocated structures to destroy
184 */
185void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
186{
187	struct rpc_rqst *req = NULL, *tmp = NULL;
188
189	dprintk("RPC:        destroy backchannel transport\n");
190
191	BUG_ON(max_reqs == 0);
192	spin_lock_bh(&xprt->bc_pa_lock);
193	xprt_dec_alloc_count(xprt, max_reqs);
194	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
195		dprintk("RPC:        req=%p\n", req);
196		xprt_free_allocation(req);
197		if (--max_reqs == 0)
198			break;
199	}
200	spin_unlock_bh(&xprt->bc_pa_lock);
201
202	dprintk("RPC:        backchannel list empty= %s\n",
203		list_empty(&xprt->bc_pa_list) ? "true" : "false");
204}
205EXPORT_SYMBOL(xprt_destroy_backchannel);
206
207/*
208 * One or more rpc_rqst structure have been preallocated during the
209 * backchannel setup.  Buffer space for the send and private XDR buffers
210 * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
211 * to this request.  Use xprt_free_bc_request to return it.
212 *
213 * We know that we're called in soft interrupt context, grab the spin_lock
214 * since there is no need to grab the bottom half spin_lock.
215 *
216 * Return an available rpc_rqst, otherwise NULL if non are available.
217 */
218struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
219{
220	struct rpc_rqst *req;
221
222	dprintk("RPC:       allocate a backchannel request\n");
223	spin_lock(&xprt->bc_pa_lock);
224	if (!list_empty(&xprt->bc_pa_list)) {
225		req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
226				rq_bc_pa_list);
227		list_del(&req->rq_bc_pa_list);
228	} else {
229		req = NULL;
230	}
231	spin_unlock(&xprt->bc_pa_lock);
232
233	if (req != NULL) {
234		set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
235		req->rq_reply_bytes_recvd = 0;
236		req->rq_bytes_sent = 0;
237		memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
238			sizeof(req->rq_private_buf));
239	}
240	dprintk("RPC:       backchannel req=%p\n", req);
241	return req;
242}
243
244/*
245 * Return the preallocated rpc_rqst structure and XDR buffers
246 * associated with this rpc_task.
247 */
248void xprt_free_bc_request(struct rpc_rqst *req)
249{
250	struct rpc_xprt *xprt = req->rq_xprt;
251
252	dprintk("RPC:       free backchannel req=%p\n", req);
253
254	smp_mb__before_clear_bit();
255	BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
256	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
257	smp_mb__after_clear_bit();
258
259	if (!xprt_need_to_requeue(xprt)) {
260		/*
261		 * The last remaining session was destroyed while this
262		 * entry was in use.  Free the entry and don't attempt
263		 * to add back to the list because there is no need to
264		 * have anymore preallocated entries.
265		 */
266		dprintk("RPC:       Last session removed req=%p\n", req);
267		xprt_free_allocation(req);
268		return;
269	}
270
271	/*
272	 * Return it to the list of preallocations so that it
273	 * may be reused by a new callback request.
274	 */
275	spin_lock_bh(&xprt->bc_pa_lock);
276	list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
277	spin_unlock_bh(&xprt->bc_pa_lock);
278}
279
280