xprt.c revision fde95c7554aa77f9a242f32b0b5f8f15395abf52
1/*
2 *  linux/net/sunrpc/xprt.c
3 *
4 *  This is a generic RPC call interface supporting congestion avoidance,
5 *  and asynchronous calls.
6 *
7 *  The interface works like this:
8 *
9 *  -	When a process places a call, it allocates a request slot if
10 *	one is available. Otherwise, it sleeps on the backlog queue
11 *	(xprt_reserve).
12 *  -	Next, the caller puts together the RPC message, stuffs it into
13 *	the request struct, and calls xprt_transmit().
14 *  -	xprt_transmit sends the message and installs the caller on the
15 *	transport's wait list. At the same time, it installs a timer that
16 *	is run after the packet's timeout has expired.
17 *  -	When a packet arrives, the data_ready handler walks the list of
18 *	pending requests for that transport. If a matching XID is found, the
19 *	caller is woken up, and the timer removed.
20 *  -	When no reply arrives within the timeout interval, the timer is
21 *	fired by the kernel and runs xprt_timer(). It either adjusts the
22 *	timeout values (minor timeout) or wakes up the caller with a status
23 *	of -ETIMEDOUT.
24 *  -	When the caller receives a notification from RPC that a reply arrived,
25 *	it should release the RPC slot, and process the reply.
26 *	If the call timed out, it may choose to retry the operation by
27 *	adjusting the initial timeout value, and simply calling rpc_call
28 *	again.
29 *
30 *  Support for async RPC is done through a set of RPC-specific scheduling
31 *  primitives that `transparently' work for processes as well as async
32 *  tasks that rely on callbacks.
33 *
34 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37 */
38
39#include <linux/module.h>
40
41#include <linux/types.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/net.h>
45
46#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/metrics.h>
48
49/*
50 * Local variables
51 */
52
53#ifdef RPC_DEBUG
54# define RPCDBG_FACILITY	RPCDBG_XPRT
55#endif
56
57/*
58 * Local functions
59 */
60static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61static inline void	do_xprt_reserve(struct rpc_task *);
62static void	xprt_connect_status(struct rpc_task *task);
63static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65static DEFINE_SPINLOCK(xprt_list_lock);
66static LIST_HEAD(xprt_list);
67
68/*
69 * The transport code maintains an estimate on the maximum number of out-
70 * standing RPC requests, using a smoothed version of the congestion
71 * avoidance implemented in 44BSD. This is basically the Van Jacobson
72 * congestion algorithm: If a retransmit occurs, the congestion window is
73 * halved; otherwise, it is incremented by 1/cwnd when
74 *
75 *	-	a reply is received and
76 *	-	a full number of requests are outstanding and
77 *	-	the congestion window hasn't been updated recently.
78 */
79#define RPC_CWNDSHIFT		(8U)
80#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
81#define RPC_INITCWND		RPC_CWNDSCALE
82#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
83
84#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
85
86/**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0:		transport successfully registered
95 * -EEXIST:	transport already registered
96 * -EINVAL:	transport module being unloaded
97 */
98int xprt_register_transport(struct xprt_class *transport)
99{
100	struct xprt_class *t;
101	int result;
102
103	result = -EEXIST;
104	spin_lock(&xprt_list_lock);
105	list_for_each_entry(t, &xprt_list, list) {
106		/* don't register the same transport class twice */
107		if (t->ident == transport->ident)
108			goto out;
109	}
110
111	result = -EINVAL;
112	if (try_module_get(THIS_MODULE)) {
113		list_add_tail(&transport->list, &xprt_list);
114		printk(KERN_INFO "RPC: Registered %s transport module.\n",
115			transport->name);
116		result = 0;
117	}
118
119out:
120	spin_unlock(&xprt_list_lock);
121	return result;
122}
123EXPORT_SYMBOL_GPL(xprt_register_transport);
124
125/**
126 * xprt_unregister_transport - unregister a transport implementation
127 * @transport: transport to unregister
128 *
129 * Returns:
130 * 0:		transport successfully unregistered
131 * -ENOENT:	transport never registered
132 */
133int xprt_unregister_transport(struct xprt_class *transport)
134{
135	struct xprt_class *t;
136	int result;
137
138	result = 0;
139	spin_lock(&xprt_list_lock);
140	list_for_each_entry(t, &xprt_list, list) {
141		if (t == transport) {
142			printk(KERN_INFO
143				"RPC: Unregistered %s transport module.\n",
144				transport->name);
145			list_del_init(&transport->list);
146			module_put(THIS_MODULE);
147			goto out;
148		}
149	}
150	result = -ENOENT;
151
152out:
153	spin_unlock(&xprt_list_lock);
154	return result;
155}
156EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157
158/**
159 * xprt_reserve_xprt - serialize write access to transports
160 * @task: task that is requesting access to the transport
161 *
162 * This prevents mixing the payload of separate requests, and prevents
163 * transport connects from colliding with writes.  No congestion control
164 * is provided.
165 */
166int xprt_reserve_xprt(struct rpc_task *task)
167{
168	struct rpc_xprt	*xprt = task->tk_xprt;
169	struct rpc_rqst *req = task->tk_rqstp;
170
171	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
172		if (task == xprt->snd_task)
173			return 1;
174		if (task == NULL)
175			return 0;
176		goto out_sleep;
177	}
178	xprt->snd_task = task;
179	if (req) {
180		req->rq_bytes_sent = 0;
181		req->rq_ntrans++;
182	}
183	return 1;
184
185out_sleep:
186	dprintk("RPC: %5u failed to lock transport %p\n",
187			task->tk_pid, xprt);
188	task->tk_timeout = 0;
189	task->tk_status = -EAGAIN;
190	if (req && req->rq_ntrans)
191		rpc_sleep_on(&xprt->resend, task, NULL, NULL);
192	else
193		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
194	return 0;
195}
196EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
197
198static void xprt_clear_locked(struct rpc_xprt *xprt)
199{
200	xprt->snd_task = NULL;
201	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
202		smp_mb__before_clear_bit();
203		clear_bit(XPRT_LOCKED, &xprt->state);
204		smp_mb__after_clear_bit();
205	} else
206		queue_work(rpciod_workqueue, &xprt->task_cleanup);
207}
208
209/*
210 * xprt_reserve_xprt_cong - serialize write access to transports
211 * @task: task that is requesting access to the transport
212 *
213 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
214 * integrated into the decision of whether a request is allowed to be
215 * woken up and given access to the transport.
216 */
217int xprt_reserve_xprt_cong(struct rpc_task *task)
218{
219	struct rpc_xprt	*xprt = task->tk_xprt;
220	struct rpc_rqst *req = task->tk_rqstp;
221
222	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
223		if (task == xprt->snd_task)
224			return 1;
225		goto out_sleep;
226	}
227	if (__xprt_get_cong(xprt, task)) {
228		xprt->snd_task = task;
229		if (req) {
230			req->rq_bytes_sent = 0;
231			req->rq_ntrans++;
232		}
233		return 1;
234	}
235	xprt_clear_locked(xprt);
236out_sleep:
237	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
238	task->tk_timeout = 0;
239	task->tk_status = -EAGAIN;
240	if (req && req->rq_ntrans)
241		rpc_sleep_on(&xprt->resend, task, NULL, NULL);
242	else
243		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
244	return 0;
245}
246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
247
248static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
249{
250	int retval;
251
252	spin_lock_bh(&xprt->transport_lock);
253	retval = xprt->ops->reserve_xprt(task);
254	spin_unlock_bh(&xprt->transport_lock);
255	return retval;
256}
257
258static void __xprt_lock_write_next(struct rpc_xprt *xprt)
259{
260	struct rpc_task *task;
261	struct rpc_rqst *req;
262
263	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
264		return;
265
266	task = rpc_wake_up_next(&xprt->resend);
267	if (!task) {
268		task = rpc_wake_up_next(&xprt->sending);
269		if (!task)
270			goto out_unlock;
271	}
272
273	req = task->tk_rqstp;
274	xprt->snd_task = task;
275	if (req) {
276		req->rq_bytes_sent = 0;
277		req->rq_ntrans++;
278	}
279	return;
280
281out_unlock:
282	xprt_clear_locked(xprt);
283}
284
285static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
286{
287	struct rpc_task *task;
288
289	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290		return;
291	if (RPCXPRT_CONGESTED(xprt))
292		goto out_unlock;
293	task = rpc_wake_up_next(&xprt->resend);
294	if (!task) {
295		task = rpc_wake_up_next(&xprt->sending);
296		if (!task)
297			goto out_unlock;
298	}
299	if (__xprt_get_cong(xprt, task)) {
300		struct rpc_rqst *req = task->tk_rqstp;
301		xprt->snd_task = task;
302		if (req) {
303			req->rq_bytes_sent = 0;
304			req->rq_ntrans++;
305		}
306		return;
307	}
308out_unlock:
309	xprt_clear_locked(xprt);
310}
311
312/**
313 * xprt_release_xprt - allow other requests to use a transport
314 * @xprt: transport with other tasks potentially waiting
315 * @task: task that is releasing access to the transport
316 *
317 * Note that "task" can be NULL.  No congestion control is provided.
318 */
319void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
320{
321	if (xprt->snd_task == task) {
322		xprt_clear_locked(xprt);
323		__xprt_lock_write_next(xprt);
324	}
325}
326EXPORT_SYMBOL_GPL(xprt_release_xprt);
327
328/**
329 * xprt_release_xprt_cong - allow other requests to use a transport
330 * @xprt: transport with other tasks potentially waiting
331 * @task: task that is releasing access to the transport
332 *
333 * Note that "task" can be NULL.  Another task is awoken to use the
334 * transport if the transport's congestion window allows it.
335 */
336void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
337{
338	if (xprt->snd_task == task) {
339		xprt_clear_locked(xprt);
340		__xprt_lock_write_next_cong(xprt);
341	}
342}
343EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
344
345static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
346{
347	spin_lock_bh(&xprt->transport_lock);
348	xprt->ops->release_xprt(xprt, task);
349	spin_unlock_bh(&xprt->transport_lock);
350}
351
352/*
353 * Van Jacobson congestion avoidance. Check if the congestion window
354 * overflowed. Put the task to sleep if this is the case.
355 */
356static int
357__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
358{
359	struct rpc_rqst *req = task->tk_rqstp;
360
361	if (req->rq_cong)
362		return 1;
363	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
364			task->tk_pid, xprt->cong, xprt->cwnd);
365	if (RPCXPRT_CONGESTED(xprt))
366		return 0;
367	req->rq_cong = 1;
368	xprt->cong += RPC_CWNDSCALE;
369	return 1;
370}
371
372/*
373 * Adjust the congestion window, and wake up the next task
374 * that has been sleeping due to congestion
375 */
376static void
377__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
378{
379	if (!req->rq_cong)
380		return;
381	req->rq_cong = 0;
382	xprt->cong -= RPC_CWNDSCALE;
383	__xprt_lock_write_next_cong(xprt);
384}
385
386/**
387 * xprt_release_rqst_cong - housekeeping when request is complete
388 * @task: RPC request that recently completed
389 *
390 * Useful for transports that require congestion control.
391 */
392void xprt_release_rqst_cong(struct rpc_task *task)
393{
394	__xprt_put_cong(task->tk_xprt, task->tk_rqstp);
395}
396EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
397
398/**
399 * xprt_adjust_cwnd - adjust transport congestion window
400 * @task: recently completed RPC request used to adjust window
401 * @result: result code of completed RPC request
402 *
403 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
404 */
405void xprt_adjust_cwnd(struct rpc_task *task, int result)
406{
407	struct rpc_rqst *req = task->tk_rqstp;
408	struct rpc_xprt *xprt = task->tk_xprt;
409	unsigned long cwnd = xprt->cwnd;
410
411	if (result >= 0 && cwnd <= xprt->cong) {
412		/* The (cwnd >> 1) term makes sure
413		 * the result gets rounded properly. */
414		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
415		if (cwnd > RPC_MAXCWND(xprt))
416			cwnd = RPC_MAXCWND(xprt);
417		__xprt_lock_write_next_cong(xprt);
418	} else if (result == -ETIMEDOUT) {
419		cwnd >>= 1;
420		if (cwnd < RPC_CWNDSCALE)
421			cwnd = RPC_CWNDSCALE;
422	}
423	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
424			xprt->cong, xprt->cwnd, cwnd);
425	xprt->cwnd = cwnd;
426	__xprt_put_cong(xprt, req);
427}
428EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
429
430/**
431 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
432 * @xprt: transport with waiting tasks
433 * @status: result code to plant in each task before waking it
434 *
435 */
436void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
437{
438	if (status < 0)
439		rpc_wake_up_status(&xprt->pending, status);
440	else
441		rpc_wake_up(&xprt->pending);
442}
443EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
444
445/**
446 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
447 * @task: task to be put to sleep
448 *
449 */
450void xprt_wait_for_buffer_space(struct rpc_task *task)
451{
452	struct rpc_rqst *req = task->tk_rqstp;
453	struct rpc_xprt *xprt = req->rq_xprt;
454
455	task->tk_timeout = req->rq_timeout;
456	rpc_sleep_on(&xprt->pending, task, NULL, NULL);
457}
458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
459
460/**
461 * xprt_write_space - wake the task waiting for transport output buffer space
462 * @xprt: transport with waiting tasks
463 *
464 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
465 */
466void xprt_write_space(struct rpc_xprt *xprt)
467{
468	if (unlikely(xprt->shutdown))
469		return;
470
471	spin_lock_bh(&xprt->transport_lock);
472	if (xprt->snd_task) {
473		dprintk("RPC:       write space: waking waiting task on "
474				"xprt %p\n", xprt);
475		rpc_wake_up_task(xprt->snd_task);
476	}
477	spin_unlock_bh(&xprt->transport_lock);
478}
479EXPORT_SYMBOL_GPL(xprt_write_space);
480
481/**
482 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
483 * @task: task whose timeout is to be set
484 *
485 * Set a request's retransmit timeout based on the transport's
486 * default timeout parameters.  Used by transports that don't adjust
487 * the retransmit timeout based on round-trip time estimation.
488 */
489void xprt_set_retrans_timeout_def(struct rpc_task *task)
490{
491	task->tk_timeout = task->tk_rqstp->rq_timeout;
492}
493EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
494
495/*
496 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
497 * @task: task whose timeout is to be set
498 *
499 * Set a request's retransmit timeout using the RTT estimator.
500 */
501void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
502{
503	int timer = task->tk_msg.rpc_proc->p_timer;
504	struct rpc_clnt *clnt = task->tk_client;
505	struct rpc_rtt *rtt = clnt->cl_rtt;
506	struct rpc_rqst *req = task->tk_rqstp;
507	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
508
509	task->tk_timeout = rpc_calc_rto(rtt, timer);
510	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
511	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
512		task->tk_timeout = max_timeout;
513}
514EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
515
516static void xprt_reset_majortimeo(struct rpc_rqst *req)
517{
518	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
519
520	req->rq_majortimeo = req->rq_timeout;
521	if (to->to_exponential)
522		req->rq_majortimeo <<= to->to_retries;
523	else
524		req->rq_majortimeo += to->to_increment * to->to_retries;
525	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
526		req->rq_majortimeo = to->to_maxval;
527	req->rq_majortimeo += jiffies;
528}
529
530/**
531 * xprt_adjust_timeout - adjust timeout values for next retransmit
532 * @req: RPC request containing parameters to use for the adjustment
533 *
534 */
535int xprt_adjust_timeout(struct rpc_rqst *req)
536{
537	struct rpc_xprt *xprt = req->rq_xprt;
538	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
539	int status = 0;
540
541	if (time_before(jiffies, req->rq_majortimeo)) {
542		if (to->to_exponential)
543			req->rq_timeout <<= 1;
544		else
545			req->rq_timeout += to->to_increment;
546		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
547			req->rq_timeout = to->to_maxval;
548		req->rq_retries++;
549	} else {
550		req->rq_timeout = to->to_initval;
551		req->rq_retries = 0;
552		xprt_reset_majortimeo(req);
553		/* Reset the RTT counters == "slow start" */
554		spin_lock_bh(&xprt->transport_lock);
555		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
556		spin_unlock_bh(&xprt->transport_lock);
557		status = -ETIMEDOUT;
558	}
559
560	if (req->rq_timeout == 0) {
561		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
562		req->rq_timeout = 5 * HZ;
563	}
564	return status;
565}
566
567static void xprt_autoclose(struct work_struct *work)
568{
569	struct rpc_xprt *xprt =
570		container_of(work, struct rpc_xprt, task_cleanup);
571
572	xprt->ops->close(xprt);
573	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
574	xprt_release_write(xprt, NULL);
575}
576
577/**
578 * xprt_disconnect_done - mark a transport as disconnected
579 * @xprt: transport to flag for disconnect
580 *
581 */
582void xprt_disconnect_done(struct rpc_xprt *xprt)
583{
584	dprintk("RPC:       disconnected transport %p\n", xprt);
585	spin_lock_bh(&xprt->transport_lock);
586	xprt_clear_connected(xprt);
587	xprt_wake_pending_tasks(xprt, -ENOTCONN);
588	spin_unlock_bh(&xprt->transport_lock);
589}
590EXPORT_SYMBOL_GPL(xprt_disconnect_done);
591
592/**
593 * xprt_force_disconnect - force a transport to disconnect
594 * @xprt: transport to disconnect
595 *
596 */
597void xprt_force_disconnect(struct rpc_xprt *xprt)
598{
599	/* Don't race with the test_bit() in xprt_clear_locked() */
600	spin_lock_bh(&xprt->transport_lock);
601	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
602	/* Try to schedule an autoclose RPC call */
603	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
604		queue_work(rpciod_workqueue, &xprt->task_cleanup);
605	else if (xprt->snd_task != NULL)
606		rpc_wake_up_task(xprt->snd_task);
607	spin_unlock_bh(&xprt->transport_lock);
608}
609EXPORT_SYMBOL_GPL(xprt_force_disconnect);
610
611static void
612xprt_init_autodisconnect(unsigned long data)
613{
614	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
615
616	spin_lock(&xprt->transport_lock);
617	if (!list_empty(&xprt->recv) || xprt->shutdown)
618		goto out_abort;
619	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
620		goto out_abort;
621	spin_unlock(&xprt->transport_lock);
622	if (xprt_connecting(xprt))
623		xprt_release_write(xprt, NULL);
624	else
625		queue_work(rpciod_workqueue, &xprt->task_cleanup);
626	return;
627out_abort:
628	spin_unlock(&xprt->transport_lock);
629}
630
631/**
632 * xprt_connect - schedule a transport connect operation
633 * @task: RPC task that is requesting the connect
634 *
635 */
636void xprt_connect(struct rpc_task *task)
637{
638	struct rpc_xprt	*xprt = task->tk_xprt;
639
640	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
641			xprt, (xprt_connected(xprt) ? "is" : "is not"));
642
643	if (!xprt_bound(xprt)) {
644		task->tk_status = -EIO;
645		return;
646	}
647	if (!xprt_lock_write(xprt, task))
648		return;
649	if (xprt_connected(xprt))
650		xprt_release_write(xprt, task);
651	else {
652		if (task->tk_rqstp)
653			task->tk_rqstp->rq_bytes_sent = 0;
654
655		task->tk_timeout = xprt->connect_timeout;
656		rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
657		xprt->stat.connect_start = jiffies;
658		xprt->ops->connect(task);
659	}
660	return;
661}
662
663static void xprt_connect_status(struct rpc_task *task)
664{
665	struct rpc_xprt	*xprt = task->tk_xprt;
666
667	if (task->tk_status >= 0) {
668		xprt->stat.connect_count++;
669		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
670		dprintk("RPC: %5u xprt_connect_status: connection established\n",
671				task->tk_pid);
672		return;
673	}
674
675	switch (task->tk_status) {
676	case -ECONNREFUSED:
677	case -ECONNRESET:
678		dprintk("RPC: %5u xprt_connect_status: server %s refused "
679				"connection\n", task->tk_pid,
680				task->tk_client->cl_server);
681		break;
682	case -ENOTCONN:
683		dprintk("RPC: %5u xprt_connect_status: connection broken\n",
684				task->tk_pid);
685		break;
686	case -ETIMEDOUT:
687		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
688				"out\n", task->tk_pid);
689		break;
690	default:
691		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
692				"server %s\n", task->tk_pid, -task->tk_status,
693				task->tk_client->cl_server);
694		xprt_release_write(xprt, task);
695		task->tk_status = -EIO;
696	}
697}
698
699/**
700 * xprt_lookup_rqst - find an RPC request corresponding to an XID
701 * @xprt: transport on which the original request was transmitted
702 * @xid: RPC XID of incoming reply
703 *
704 */
705struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
706{
707	struct list_head *pos;
708
709	list_for_each(pos, &xprt->recv) {
710		struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
711		if (entry->rq_xid == xid)
712			return entry;
713	}
714
715	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
716			ntohl(xid));
717	xprt->stat.bad_xids++;
718	return NULL;
719}
720EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
721
722/**
723 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
724 * @task: RPC request that recently completed
725 *
726 */
727void xprt_update_rtt(struct rpc_task *task)
728{
729	struct rpc_rqst *req = task->tk_rqstp;
730	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
731	unsigned timer = task->tk_msg.rpc_proc->p_timer;
732
733	if (timer) {
734		if (req->rq_ntrans == 1)
735			rpc_update_rtt(rtt, timer,
736					(long)jiffies - req->rq_xtime);
737		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
738	}
739}
740EXPORT_SYMBOL_GPL(xprt_update_rtt);
741
742/**
743 * xprt_complete_rqst - called when reply processing is complete
744 * @task: RPC request that recently completed
745 * @copied: actual number of bytes received from the transport
746 *
747 * Caller holds transport lock.
748 */
749void xprt_complete_rqst(struct rpc_task *task, int copied)
750{
751	struct rpc_rqst *req = task->tk_rqstp;
752
753	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
754			task->tk_pid, ntohl(req->rq_xid), copied);
755
756	task->tk_xprt->stat.recvs++;
757	task->tk_rtt = (long)jiffies - req->rq_xtime;
758
759	list_del_init(&req->rq_list);
760	/* Ensure all writes are done before we update req->rq_received */
761	smp_wmb();
762	req->rq_received = req->rq_private_buf.len = copied;
763	rpc_wake_up_task(task);
764}
765EXPORT_SYMBOL_GPL(xprt_complete_rqst);
766
767static void xprt_timer(struct rpc_task *task)
768{
769	struct rpc_rqst *req = task->tk_rqstp;
770	struct rpc_xprt *xprt = req->rq_xprt;
771
772	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
773
774	spin_lock(&xprt->transport_lock);
775	if (!req->rq_received) {
776		if (xprt->ops->timer)
777			xprt->ops->timer(task);
778		task->tk_status = -ETIMEDOUT;
779	}
780	spin_unlock(&xprt->transport_lock);
781}
782
783/**
784 * xprt_prepare_transmit - reserve the transport before sending a request
785 * @task: RPC task about to send a request
786 *
787 */
788int xprt_prepare_transmit(struct rpc_task *task)
789{
790	struct rpc_rqst	*req = task->tk_rqstp;
791	struct rpc_xprt	*xprt = req->rq_xprt;
792	int err = 0;
793
794	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
795
796	spin_lock_bh(&xprt->transport_lock);
797	if (req->rq_received && !req->rq_bytes_sent) {
798		err = req->rq_received;
799		goto out_unlock;
800	}
801	if (!xprt->ops->reserve_xprt(task)) {
802		err = -EAGAIN;
803		goto out_unlock;
804	}
805
806	if (!xprt_connected(xprt)) {
807		err = -ENOTCONN;
808		goto out_unlock;
809	}
810out_unlock:
811	spin_unlock_bh(&xprt->transport_lock);
812	return err;
813}
814
815void xprt_end_transmit(struct rpc_task *task)
816{
817	xprt_release_write(task->tk_xprt, task);
818}
819
820/**
821 * xprt_transmit - send an RPC request on a transport
822 * @task: controlling RPC task
823 *
824 * We have to copy the iovec because sendmsg fiddles with its contents.
825 */
826void xprt_transmit(struct rpc_task *task)
827{
828	struct rpc_rqst	*req = task->tk_rqstp;
829	struct rpc_xprt	*xprt = req->rq_xprt;
830	int status;
831
832	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
833
834	if (!req->rq_received) {
835		if (list_empty(&req->rq_list)) {
836			spin_lock_bh(&xprt->transport_lock);
837			/* Update the softirq receive buffer */
838			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
839					sizeof(req->rq_private_buf));
840			/* Add request to the receive list */
841			list_add_tail(&req->rq_list, &xprt->recv);
842			spin_unlock_bh(&xprt->transport_lock);
843			xprt_reset_majortimeo(req);
844			/* Turn off autodisconnect */
845			del_singleshot_timer_sync(&xprt->timer);
846		}
847	} else if (!req->rq_bytes_sent)
848		return;
849
850	status = xprt->ops->send_request(task);
851	if (status == 0) {
852		dprintk("RPC: %5u xmit complete\n", task->tk_pid);
853		spin_lock_bh(&xprt->transport_lock);
854
855		xprt->ops->set_retrans_timeout(task);
856
857		xprt->stat.sends++;
858		xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
859		xprt->stat.bklog_u += xprt->backlog.qlen;
860
861		/* Don't race with disconnect */
862		if (!xprt_connected(xprt))
863			task->tk_status = -ENOTCONN;
864		else if (!req->rq_received)
865			rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
866		spin_unlock_bh(&xprt->transport_lock);
867		return;
868	}
869
870	/* Note: at this point, task->tk_sleeping has not yet been set,
871	 *	 hence there is no danger of the waking up task being put on
872	 *	 schedq, and being picked up by a parallel run of rpciod().
873	 */
874	task->tk_status = status;
875	if (status == -ECONNREFUSED)
876		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
877}
878
879static inline void do_xprt_reserve(struct rpc_task *task)
880{
881	struct rpc_xprt	*xprt = task->tk_xprt;
882
883	task->tk_status = 0;
884	if (task->tk_rqstp)
885		return;
886	if (!list_empty(&xprt->free)) {
887		struct rpc_rqst	*req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
888		list_del_init(&req->rq_list);
889		task->tk_rqstp = req;
890		xprt_request_init(task, xprt);
891		return;
892	}
893	dprintk("RPC:       waiting for request slot\n");
894	task->tk_status = -EAGAIN;
895	task->tk_timeout = 0;
896	rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
897}
898
899/**
900 * xprt_reserve - allocate an RPC request slot
901 * @task: RPC task requesting a slot allocation
902 *
903 * If no more slots are available, place the task on the transport's
904 * backlog queue.
905 */
906void xprt_reserve(struct rpc_task *task)
907{
908	struct rpc_xprt	*xprt = task->tk_xprt;
909
910	task->tk_status = -EIO;
911	spin_lock(&xprt->reserve_lock);
912	do_xprt_reserve(task);
913	spin_unlock(&xprt->reserve_lock);
914}
915
916static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
917{
918	return xprt->xid++;
919}
920
921static inline void xprt_init_xid(struct rpc_xprt *xprt)
922{
923	xprt->xid = net_random();
924}
925
926static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
927{
928	struct rpc_rqst	*req = task->tk_rqstp;
929
930	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
931	req->rq_task	= task;
932	req->rq_xprt    = xprt;
933	req->rq_buffer  = NULL;
934	req->rq_xid     = xprt_alloc_xid(xprt);
935	req->rq_release_snd_buf = NULL;
936	xprt_reset_majortimeo(req);
937	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
938			req, ntohl(req->rq_xid));
939}
940
941/**
942 * xprt_release - release an RPC request slot
943 * @task: task which is finished with the slot
944 *
945 */
946void xprt_release(struct rpc_task *task)
947{
948	struct rpc_xprt	*xprt = task->tk_xprt;
949	struct rpc_rqst	*req;
950
951	if (!(req = task->tk_rqstp))
952		return;
953	rpc_count_iostats(task);
954	spin_lock_bh(&xprt->transport_lock);
955	xprt->ops->release_xprt(xprt, task);
956	if (xprt->ops->release_request)
957		xprt->ops->release_request(task);
958	if (!list_empty(&req->rq_list))
959		list_del(&req->rq_list);
960	xprt->last_used = jiffies;
961	if (list_empty(&xprt->recv))
962		mod_timer(&xprt->timer,
963				xprt->last_used + xprt->idle_timeout);
964	spin_unlock_bh(&xprt->transport_lock);
965	xprt->ops->buf_free(req->rq_buffer);
966	task->tk_rqstp = NULL;
967	if (req->rq_release_snd_buf)
968		req->rq_release_snd_buf(req);
969	memset(req, 0, sizeof(*req));	/* mark unused */
970
971	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
972
973	spin_lock(&xprt->reserve_lock);
974	list_add(&req->rq_list, &xprt->free);
975	rpc_wake_up_next(&xprt->backlog);
976	spin_unlock(&xprt->reserve_lock);
977}
978
979/**
980 * xprt_create_transport - create an RPC transport
981 * @args: rpc transport creation arguments
982 *
983 */
984struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
985{
986	struct rpc_xprt	*xprt;
987	struct rpc_rqst	*req;
988	struct xprt_class *t;
989
990	spin_lock(&xprt_list_lock);
991	list_for_each_entry(t, &xprt_list, list) {
992		if (t->ident == args->ident) {
993			spin_unlock(&xprt_list_lock);
994			goto found;
995		}
996	}
997	spin_unlock(&xprt_list_lock);
998	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
999	return ERR_PTR(-EIO);
1000
1001found:
1002	xprt = t->setup(args);
1003	if (IS_ERR(xprt)) {
1004		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1005				-PTR_ERR(xprt));
1006		return xprt;
1007	}
1008
1009	kref_init(&xprt->kref);
1010	spin_lock_init(&xprt->transport_lock);
1011	spin_lock_init(&xprt->reserve_lock);
1012
1013	INIT_LIST_HEAD(&xprt->free);
1014	INIT_LIST_HEAD(&xprt->recv);
1015	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1016	setup_timer(&xprt->timer, xprt_init_autodisconnect,
1017			(unsigned long)xprt);
1018	xprt->last_used = jiffies;
1019	xprt->cwnd = RPC_INITCWND;
1020	xprt->bind_index = 0;
1021
1022	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1023	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1024	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1025	rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1026	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1027
1028	/* initialize free list */
1029	for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1030		list_add(&req->rq_list, &xprt->free);
1031
1032	xprt_init_xid(xprt);
1033
1034	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1035			xprt->max_reqs);
1036
1037	return xprt;
1038}
1039
1040/**
1041 * xprt_destroy - destroy an RPC transport, killing off all requests.
1042 * @kref: kref for the transport to destroy
1043 *
1044 */
1045static void xprt_destroy(struct kref *kref)
1046{
1047	struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
1048
1049	dprintk("RPC:       destroying transport %p\n", xprt);
1050	xprt->shutdown = 1;
1051	del_timer_sync(&xprt->timer);
1052
1053	/*
1054	 * Tear down transport state and free the rpc_xprt
1055	 */
1056	xprt->ops->destroy(xprt);
1057}
1058
1059/**
1060 * xprt_put - release a reference to an RPC transport.
1061 * @xprt: pointer to the transport
1062 *
1063 */
1064void xprt_put(struct rpc_xprt *xprt)
1065{
1066	kref_put(&xprt->kref, xprt_destroy);
1067}
1068
1069/**
1070 * xprt_get - return a reference to an RPC transport.
1071 * @xprt: pointer to the transport
1072 *
1073 */
1074struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1075{
1076	kref_get(&xprt->kref);
1077	return xprt;
1078}
1079