clnt.c revision c36dcfe1f7712b7c12df2d80359e638b9d246ce6
1/*
2 *  linux/net/sunrpc/clnt.c
3 *
4 *  This file contains the high-level RPC interface.
5 *  It is modeled as a finite state machine to support both synchronous
6 *  and asynchronous requests.
7 *
8 *  -	RPC header generation and argument serialization.
9 *  -	Credential refresh.
10 *  -	TCP connect handling.
11 *  -	Retry of operation when it is suspected the operation failed because
12 *	of uid squashing on the server, or when the credentials were stale
13 *	and need to be refreshed, or when a packet was damaged in transit.
14 *	This may be have to be moved to the VFS layer.
15 *
16 *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17 *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18 */
19
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kallsyms.h>
24#include <linux/mm.h>
25#include <linux/namei.h>
26#include <linux/mount.h>
27#include <linux/slab.h>
28#include <linux/utsname.h>
29#include <linux/workqueue.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/un.h>
33#include <linux/rcupdate.h>
34
35#include <linux/sunrpc/clnt.h>
36#include <linux/sunrpc/addr.h>
37#include <linux/sunrpc/rpc_pipe_fs.h>
38#include <linux/sunrpc/metrics.h>
39#include <linux/sunrpc/bc_xprt.h>
40#include <trace/events/sunrpc.h>
41
42#include "sunrpc.h"
43#include "netns.h"
44
45#ifdef RPC_DEBUG
46# define RPCDBG_FACILITY	RPCDBG_CALL
47#endif
48
49#define dprint_status(t)					\
50	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\
51			__func__, t->tk_status)
52
53/*
54 * All RPC clients are linked into this list
55 */
56
57static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
58
59
60static void	call_start(struct rpc_task *task);
61static void	call_reserve(struct rpc_task *task);
62static void	call_reserveresult(struct rpc_task *task);
63static void	call_allocate(struct rpc_task *task);
64static void	call_decode(struct rpc_task *task);
65static void	call_bind(struct rpc_task *task);
66static void	call_bind_status(struct rpc_task *task);
67static void	call_transmit(struct rpc_task *task);
68#if defined(CONFIG_SUNRPC_BACKCHANNEL)
69static void	call_bc_transmit(struct rpc_task *task);
70#endif /* CONFIG_SUNRPC_BACKCHANNEL */
71static void	call_status(struct rpc_task *task);
72static void	call_transmit_status(struct rpc_task *task);
73static void	call_refresh(struct rpc_task *task);
74static void	call_refreshresult(struct rpc_task *task);
75static void	call_timeout(struct rpc_task *task);
76static void	call_connect(struct rpc_task *task);
77static void	call_connect_status(struct rpc_task *task);
78
79static __be32	*rpc_encode_header(struct rpc_task *task);
80static __be32	*rpc_verify_header(struct rpc_task *task);
81static int	rpc_ping(struct rpc_clnt *clnt);
82
83static void rpc_register_client(struct rpc_clnt *clnt)
84{
85	struct net *net = rpc_net_ns(clnt);
86	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
87
88	spin_lock(&sn->rpc_client_lock);
89	list_add(&clnt->cl_clients, &sn->all_clients);
90	spin_unlock(&sn->rpc_client_lock);
91}
92
93static void rpc_unregister_client(struct rpc_clnt *clnt)
94{
95	struct net *net = rpc_net_ns(clnt);
96	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
97
98	spin_lock(&sn->rpc_client_lock);
99	list_del(&clnt->cl_clients);
100	spin_unlock(&sn->rpc_client_lock);
101}
102
103static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104{
105	rpc_remove_client_dir(clnt);
106}
107
108static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
109{
110	struct net *net = rpc_net_ns(clnt);
111	struct super_block *pipefs_sb;
112
113	pipefs_sb = rpc_get_sb_net(net);
114	if (pipefs_sb) {
115		__rpc_clnt_remove_pipedir(clnt);
116		rpc_put_sb_net(net);
117	}
118}
119
120static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121				    struct rpc_clnt *clnt)
122{
123	static uint32_t clntid;
124	const char *dir_name = clnt->cl_program->pipe_dir_name;
125	char name[15];
126	struct dentry *dir, *dentry;
127
128	dir = rpc_d_lookup_sb(sb, dir_name);
129	if (dir == NULL) {
130		pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
131		return dir;
132	}
133	for (;;) {
134		snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135		name[sizeof(name) - 1] = '\0';
136		dentry = rpc_create_client_dir(dir, name, clnt);
137		if (!IS_ERR(dentry))
138			break;
139		if (dentry == ERR_PTR(-EEXIST))
140			continue;
141		printk(KERN_INFO "RPC: Couldn't create pipefs entry"
142				" %s/%s, error %ld\n",
143				dir_name, name, PTR_ERR(dentry));
144		break;
145	}
146	dput(dir);
147	return dentry;
148}
149
150static int
151rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
152{
153	struct dentry *dentry;
154
155	if (clnt->cl_program->pipe_dir_name != NULL) {
156		dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
157		if (IS_ERR(dentry))
158			return PTR_ERR(dentry);
159	}
160	return 0;
161}
162
163static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
164{
165	if (clnt->cl_program->pipe_dir_name == NULL)
166		return 1;
167
168	switch (event) {
169	case RPC_PIPEFS_MOUNT:
170		if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
171			return 1;
172		if (atomic_read(&clnt->cl_count) == 0)
173			return 1;
174		break;
175	case RPC_PIPEFS_UMOUNT:
176		if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
177			return 1;
178		break;
179	}
180	return 0;
181}
182
183static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
184				   struct super_block *sb)
185{
186	struct dentry *dentry;
187	int err = 0;
188
189	switch (event) {
190	case RPC_PIPEFS_MOUNT:
191		dentry = rpc_setup_pipedir_sb(sb, clnt);
192		if (!dentry)
193			return -ENOENT;
194		if (IS_ERR(dentry))
195			return PTR_ERR(dentry);
196		break;
197	case RPC_PIPEFS_UMOUNT:
198		__rpc_clnt_remove_pipedir(clnt);
199		break;
200	default:
201		printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
202		return -ENOTSUPP;
203	}
204	return err;
205}
206
207static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208				struct super_block *sb)
209{
210	int error = 0;
211
212	for (;; clnt = clnt->cl_parent) {
213		if (!rpc_clnt_skip_event(clnt, event))
214			error = __rpc_clnt_handle_event(clnt, event, sb);
215		if (error || clnt == clnt->cl_parent)
216			break;
217	}
218	return error;
219}
220
221static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
222{
223	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
224	struct rpc_clnt *clnt;
225
226	spin_lock(&sn->rpc_client_lock);
227	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
228		if (rpc_clnt_skip_event(clnt, event))
229			continue;
230		spin_unlock(&sn->rpc_client_lock);
231		return clnt;
232	}
233	spin_unlock(&sn->rpc_client_lock);
234	return NULL;
235}
236
237static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
238			    void *ptr)
239{
240	struct super_block *sb = ptr;
241	struct rpc_clnt *clnt;
242	int error = 0;
243
244	while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
245		error = __rpc_pipefs_event(clnt, event, sb);
246		if (error)
247			break;
248	}
249	return error;
250}
251
252static struct notifier_block rpc_clients_block = {
253	.notifier_call	= rpc_pipefs_event,
254	.priority	= SUNRPC_PIPEFS_RPC_PRIO,
255};
256
257int rpc_clients_notifier_register(void)
258{
259	return rpc_pipefs_notifier_register(&rpc_clients_block);
260}
261
262void rpc_clients_notifier_unregister(void)
263{
264	return rpc_pipefs_notifier_unregister(&rpc_clients_block);
265}
266
267static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
268{
269	clnt->cl_nodelen = strlen(nodename);
270	if (clnt->cl_nodelen > UNX_MAXNODENAME)
271		clnt->cl_nodelen = UNX_MAXNODENAME;
272	memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
273}
274
275static int rpc_client_register(const struct rpc_create_args *args,
276			       struct rpc_clnt *clnt)
277{
278	struct rpc_auth_create_args auth_args = {
279		.pseudoflavor = args->authflavor,
280		.target_name = args->client_name,
281	};
282	struct rpc_auth *auth;
283	struct net *net = rpc_net_ns(clnt);
284	struct super_block *pipefs_sb;
285	int err;
286
287	pipefs_sb = rpc_get_sb_net(net);
288	if (pipefs_sb) {
289		err = rpc_setup_pipedir(pipefs_sb, clnt);
290		if (err)
291			goto out;
292	}
293
294	rpc_register_client(clnt);
295	if (pipefs_sb)
296		rpc_put_sb_net(net);
297
298	auth = rpcauth_create(&auth_args, clnt);
299	if (IS_ERR(auth)) {
300		dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
301				args->authflavor);
302		err = PTR_ERR(auth);
303		goto err_auth;
304	}
305	return 0;
306err_auth:
307	pipefs_sb = rpc_get_sb_net(net);
308	rpc_unregister_client(clnt);
309	__rpc_clnt_remove_pipedir(clnt);
310out:
311	if (pipefs_sb)
312		rpc_put_sb_net(net);
313	return err;
314}
315
316static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
317{
318	const struct rpc_program *program = args->program;
319	const struct rpc_version *version;
320	struct rpc_clnt		*clnt = NULL;
321	int err;
322
323	/* sanity check the name before trying to print it */
324	dprintk("RPC:       creating %s client for %s (xprt %p)\n",
325			program->name, args->servername, xprt);
326
327	err = rpciod_up();
328	if (err)
329		goto out_no_rpciod;
330
331	err = -EINVAL;
332	if (args->version >= program->nrvers)
333		goto out_err;
334	version = program->version[args->version];
335	if (version == NULL)
336		goto out_err;
337
338	err = -ENOMEM;
339	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
340	if (!clnt)
341		goto out_err;
342	clnt->cl_parent = clnt;
343
344	rcu_assign_pointer(clnt->cl_xprt, xprt);
345	clnt->cl_procinfo = version->procs;
346	clnt->cl_maxproc  = version->nrprocs;
347	clnt->cl_prog     = args->prognumber ? : program->number;
348	clnt->cl_vers     = version->number;
349	clnt->cl_stats    = program->stats;
350	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
351	rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
352	err = -ENOMEM;
353	if (clnt->cl_metrics == NULL)
354		goto out_no_stats;
355	clnt->cl_program  = program;
356	INIT_LIST_HEAD(&clnt->cl_tasks);
357	spin_lock_init(&clnt->cl_lock);
358
359	if (!xprt_bound(xprt))
360		clnt->cl_autobind = 1;
361
362	clnt->cl_timeout = xprt->timeout;
363	if (args->timeout != NULL) {
364		memcpy(&clnt->cl_timeout_default, args->timeout,
365				sizeof(clnt->cl_timeout_default));
366		clnt->cl_timeout = &clnt->cl_timeout_default;
367	}
368
369	clnt->cl_rtt = &clnt->cl_rtt_default;
370	rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
371
372	atomic_set(&clnt->cl_count, 1);
373
374	/* save the nodename */
375	rpc_clnt_set_nodename(clnt, utsname()->nodename);
376
377	err = rpc_client_register(args, clnt);
378	if (err)
379		goto out_no_path;
380	return clnt;
381
382out_no_path:
383	rpc_free_iostats(clnt->cl_metrics);
384out_no_stats:
385	kfree(clnt);
386out_err:
387	rpciod_down();
388out_no_rpciod:
389	xprt_put(xprt);
390	return ERR_PTR(err);
391}
392
393/**
394 * rpc_create - create an RPC client and transport with one call
395 * @args: rpc_clnt create argument structure
396 *
397 * Creates and initializes an RPC transport and an RPC client.
398 *
399 * It can ping the server in order to determine if it is up, and to see if
400 * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
401 * this behavior so asynchronous tasks can also use rpc_create.
402 */
403struct rpc_clnt *rpc_create(struct rpc_create_args *args)
404{
405	struct rpc_xprt *xprt;
406	struct rpc_clnt *clnt;
407	struct xprt_create xprtargs = {
408		.net = args->net,
409		.ident = args->protocol,
410		.srcaddr = args->saddress,
411		.dstaddr = args->address,
412		.addrlen = args->addrsize,
413		.servername = args->servername,
414		.bc_xprt = args->bc_xprt,
415	};
416	char servername[48];
417
418	if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
419		xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
420	if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
421		xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
422	/*
423	 * If the caller chooses not to specify a hostname, whip
424	 * up a string representation of the passed-in address.
425	 */
426	if (xprtargs.servername == NULL) {
427		struct sockaddr_un *sun =
428				(struct sockaddr_un *)args->address;
429		struct sockaddr_in *sin =
430				(struct sockaddr_in *)args->address;
431		struct sockaddr_in6 *sin6 =
432				(struct sockaddr_in6 *)args->address;
433
434		servername[0] = '\0';
435		switch (args->address->sa_family) {
436		case AF_LOCAL:
437			snprintf(servername, sizeof(servername), "%s",
438				 sun->sun_path);
439			break;
440		case AF_INET:
441			snprintf(servername, sizeof(servername), "%pI4",
442				 &sin->sin_addr.s_addr);
443			break;
444		case AF_INET6:
445			snprintf(servername, sizeof(servername), "%pI6",
446				 &sin6->sin6_addr);
447			break;
448		default:
449			/* caller wants default server name, but
450			 * address family isn't recognized. */
451			return ERR_PTR(-EINVAL);
452		}
453		xprtargs.servername = servername;
454	}
455
456	xprt = xprt_create_transport(&xprtargs);
457	if (IS_ERR(xprt))
458		return (struct rpc_clnt *)xprt;
459
460	/*
461	 * By default, kernel RPC client connects from a reserved port.
462	 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
463	 * but it is always enabled for rpciod, which handles the connect
464	 * operation.
465	 */
466	xprt->resvport = 1;
467	if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
468		xprt->resvport = 0;
469
470	clnt = rpc_new_client(args, xprt);
471	if (IS_ERR(clnt))
472		return clnt;
473
474	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
475		int err = rpc_ping(clnt);
476		if (err != 0) {
477			rpc_shutdown_client(clnt);
478			return ERR_PTR(err);
479		}
480	}
481
482	clnt->cl_softrtry = 1;
483	if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
484		clnt->cl_softrtry = 0;
485
486	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
487		clnt->cl_autobind = 1;
488	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
489		clnt->cl_discrtry = 1;
490	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
491		clnt->cl_chatty = 1;
492
493	return clnt;
494}
495EXPORT_SYMBOL_GPL(rpc_create);
496
497/*
498 * This function clones the RPC client structure. It allows us to share the
499 * same transport while varying parameters such as the authentication
500 * flavour.
501 */
502static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
503					   struct rpc_clnt *clnt)
504{
505	struct rpc_xprt *xprt;
506	struct rpc_clnt *new;
507	int err;
508
509	err = -ENOMEM;
510	rcu_read_lock();
511	xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
512	rcu_read_unlock();
513	if (xprt == NULL)
514		goto out_err;
515	args->servername = xprt->servername;
516
517	new = rpc_new_client(args, xprt);
518	if (IS_ERR(new)) {
519		err = PTR_ERR(new);
520		goto out_err;
521	}
522
523	atomic_inc(&clnt->cl_count);
524	new->cl_parent = clnt;
525
526	/* Turn off autobind on clones */
527	new->cl_autobind = 0;
528	new->cl_softrtry = clnt->cl_softrtry;
529	new->cl_discrtry = clnt->cl_discrtry;
530	new->cl_chatty = clnt->cl_chatty;
531	return new;
532
533out_err:
534	dprintk("RPC:       %s: returned error %d\n", __func__, err);
535	return ERR_PTR(err);
536}
537
538/**
539 * rpc_clone_client - Clone an RPC client structure
540 *
541 * @clnt: RPC client whose parameters are copied
542 *
543 * Returns a fresh RPC client or an ERR_PTR.
544 */
545struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
546{
547	struct rpc_create_args args = {
548		.program	= clnt->cl_program,
549		.prognumber	= clnt->cl_prog,
550		.version	= clnt->cl_vers,
551		.authflavor	= clnt->cl_auth->au_flavor,
552	};
553	return __rpc_clone_client(&args, clnt);
554}
555EXPORT_SYMBOL_GPL(rpc_clone_client);
556
557/**
558 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
559 *
560 * @clnt: RPC client whose parameters are copied
561 * @flavor: security flavor for new client
562 *
563 * Returns a fresh RPC client or an ERR_PTR.
564 */
565struct rpc_clnt *
566rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
567{
568	struct rpc_create_args args = {
569		.program	= clnt->cl_program,
570		.prognumber	= clnt->cl_prog,
571		.version	= clnt->cl_vers,
572		.authflavor	= flavor,
573	};
574	return __rpc_clone_client(&args, clnt);
575}
576EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
577
578/*
579 * Kill all tasks for the given client.
580 * XXX: kill their descendants as well?
581 */
582void rpc_killall_tasks(struct rpc_clnt *clnt)
583{
584	struct rpc_task	*rovr;
585
586
587	if (list_empty(&clnt->cl_tasks))
588		return;
589	dprintk("RPC:       killing all tasks for client %p\n", clnt);
590	/*
591	 * Spin lock all_tasks to prevent changes...
592	 */
593	spin_lock(&clnt->cl_lock);
594	list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
595		if (!RPC_IS_ACTIVATED(rovr))
596			continue;
597		if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
598			rovr->tk_flags |= RPC_TASK_KILLED;
599			rpc_exit(rovr, -EIO);
600			if (RPC_IS_QUEUED(rovr))
601				rpc_wake_up_queued_task(rovr->tk_waitqueue,
602							rovr);
603		}
604	}
605	spin_unlock(&clnt->cl_lock);
606}
607EXPORT_SYMBOL_GPL(rpc_killall_tasks);
608
609/*
610 * Properly shut down an RPC client, terminating all outstanding
611 * requests.
612 */
613void rpc_shutdown_client(struct rpc_clnt *clnt)
614{
615	might_sleep();
616
617	dprintk_rcu("RPC:       shutting down %s client for %s\n",
618			clnt->cl_program->name,
619			rcu_dereference(clnt->cl_xprt)->servername);
620
621	while (!list_empty(&clnt->cl_tasks)) {
622		rpc_killall_tasks(clnt);
623		wait_event_timeout(destroy_wait,
624			list_empty(&clnt->cl_tasks), 1*HZ);
625	}
626
627	rpc_release_client(clnt);
628}
629EXPORT_SYMBOL_GPL(rpc_shutdown_client);
630
631/*
632 * Free an RPC client
633 */
634static void
635rpc_free_client(struct rpc_clnt *clnt)
636{
637	dprintk_rcu("RPC:       destroying %s client for %s\n",
638			clnt->cl_program->name,
639			rcu_dereference(clnt->cl_xprt)->servername);
640	if (clnt->cl_parent != clnt)
641		rpc_release_client(clnt->cl_parent);
642	rpc_clnt_remove_pipedir(clnt);
643	rpc_unregister_client(clnt);
644	rpc_free_iostats(clnt->cl_metrics);
645	clnt->cl_metrics = NULL;
646	xprt_put(rcu_dereference_raw(clnt->cl_xprt));
647	rpciod_down();
648	kfree(clnt);
649}
650
651/*
652 * Free an RPC client
653 */
654static void
655rpc_free_auth(struct rpc_clnt *clnt)
656{
657	if (clnt->cl_auth == NULL) {
658		rpc_free_client(clnt);
659		return;
660	}
661
662	/*
663	 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
664	 *       release remaining GSS contexts. This mechanism ensures
665	 *       that it can do so safely.
666	 */
667	atomic_inc(&clnt->cl_count);
668	rpcauth_release(clnt->cl_auth);
669	clnt->cl_auth = NULL;
670	if (atomic_dec_and_test(&clnt->cl_count))
671		rpc_free_client(clnt);
672}
673
674/*
675 * Release reference to the RPC client
676 */
677void
678rpc_release_client(struct rpc_clnt *clnt)
679{
680	dprintk("RPC:       rpc_release_client(%p)\n", clnt);
681
682	if (list_empty(&clnt->cl_tasks))
683		wake_up(&destroy_wait);
684	if (atomic_dec_and_test(&clnt->cl_count))
685		rpc_free_auth(clnt);
686}
687EXPORT_SYMBOL_GPL(rpc_release_client);
688
689/**
690 * rpc_bind_new_program - bind a new RPC program to an existing client
691 * @old: old rpc_client
692 * @program: rpc program to set
693 * @vers: rpc program version
694 *
695 * Clones the rpc client and sets up a new RPC program. This is mainly
696 * of use for enabling different RPC programs to share the same transport.
697 * The Sun NFSv2/v3 ACL protocol can do this.
698 */
699struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
700				      const struct rpc_program *program,
701				      u32 vers)
702{
703	struct rpc_create_args args = {
704		.program	= program,
705		.prognumber	= program->number,
706		.version	= vers,
707		.authflavor	= old->cl_auth->au_flavor,
708	};
709	struct rpc_clnt *clnt;
710	int err;
711
712	clnt = __rpc_clone_client(&args, old);
713	if (IS_ERR(clnt))
714		goto out;
715	err = rpc_ping(clnt);
716	if (err != 0) {
717		rpc_shutdown_client(clnt);
718		clnt = ERR_PTR(err);
719	}
720out:
721	return clnt;
722}
723EXPORT_SYMBOL_GPL(rpc_bind_new_program);
724
725void rpc_task_release_client(struct rpc_task *task)
726{
727	struct rpc_clnt *clnt = task->tk_client;
728
729	if (clnt != NULL) {
730		/* Remove from client task list */
731		spin_lock(&clnt->cl_lock);
732		list_del(&task->tk_task);
733		spin_unlock(&clnt->cl_lock);
734		task->tk_client = NULL;
735
736		rpc_release_client(clnt);
737	}
738}
739
740static
741void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
742{
743	if (clnt != NULL) {
744		rpc_task_release_client(task);
745		task->tk_client = clnt;
746		atomic_inc(&clnt->cl_count);
747		if (clnt->cl_softrtry)
748			task->tk_flags |= RPC_TASK_SOFT;
749		if (sk_memalloc_socks()) {
750			struct rpc_xprt *xprt;
751
752			rcu_read_lock();
753			xprt = rcu_dereference(clnt->cl_xprt);
754			if (xprt->swapper)
755				task->tk_flags |= RPC_TASK_SWAPPER;
756			rcu_read_unlock();
757		}
758		/* Add to the client's list of all tasks */
759		spin_lock(&clnt->cl_lock);
760		list_add_tail(&task->tk_task, &clnt->cl_tasks);
761		spin_unlock(&clnt->cl_lock);
762	}
763}
764
765void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
766{
767	rpc_task_release_client(task);
768	rpc_task_set_client(task, clnt);
769}
770EXPORT_SYMBOL_GPL(rpc_task_reset_client);
771
772
773static void
774rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
775{
776	if (msg != NULL) {
777		task->tk_msg.rpc_proc = msg->rpc_proc;
778		task->tk_msg.rpc_argp = msg->rpc_argp;
779		task->tk_msg.rpc_resp = msg->rpc_resp;
780		if (msg->rpc_cred != NULL)
781			task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
782	}
783}
784
785/*
786 * Default callback for async RPC calls
787 */
788static void
789rpc_default_callback(struct rpc_task *task, void *data)
790{
791}
792
793static const struct rpc_call_ops rpc_default_ops = {
794	.rpc_call_done = rpc_default_callback,
795};
796
797/**
798 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
799 * @task_setup_data: pointer to task initialisation data
800 */
801struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
802{
803	struct rpc_task *task;
804
805	task = rpc_new_task(task_setup_data);
806	if (IS_ERR(task))
807		goto out;
808
809	rpc_task_set_client(task, task_setup_data->rpc_client);
810	rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
811
812	if (task->tk_action == NULL)
813		rpc_call_start(task);
814
815	atomic_inc(&task->tk_count);
816	rpc_execute(task);
817out:
818	return task;
819}
820EXPORT_SYMBOL_GPL(rpc_run_task);
821
822/**
823 * rpc_call_sync - Perform a synchronous RPC call
824 * @clnt: pointer to RPC client
825 * @msg: RPC call parameters
826 * @flags: RPC call flags
827 */
828int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
829{
830	struct rpc_task	*task;
831	struct rpc_task_setup task_setup_data = {
832		.rpc_client = clnt,
833		.rpc_message = msg,
834		.callback_ops = &rpc_default_ops,
835		.flags = flags,
836	};
837	int status;
838
839	WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
840	if (flags & RPC_TASK_ASYNC) {
841		rpc_release_calldata(task_setup_data.callback_ops,
842			task_setup_data.callback_data);
843		return -EINVAL;
844	}
845
846	task = rpc_run_task(&task_setup_data);
847	if (IS_ERR(task))
848		return PTR_ERR(task);
849	status = task->tk_status;
850	rpc_put_task(task);
851	return status;
852}
853EXPORT_SYMBOL_GPL(rpc_call_sync);
854
855/**
856 * rpc_call_async - Perform an asynchronous RPC call
857 * @clnt: pointer to RPC client
858 * @msg: RPC call parameters
859 * @flags: RPC call flags
860 * @tk_ops: RPC call ops
861 * @data: user call data
862 */
863int
864rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
865	       const struct rpc_call_ops *tk_ops, void *data)
866{
867	struct rpc_task	*task;
868	struct rpc_task_setup task_setup_data = {
869		.rpc_client = clnt,
870		.rpc_message = msg,
871		.callback_ops = tk_ops,
872		.callback_data = data,
873		.flags = flags|RPC_TASK_ASYNC,
874	};
875
876	task = rpc_run_task(&task_setup_data);
877	if (IS_ERR(task))
878		return PTR_ERR(task);
879	rpc_put_task(task);
880	return 0;
881}
882EXPORT_SYMBOL_GPL(rpc_call_async);
883
884#if defined(CONFIG_SUNRPC_BACKCHANNEL)
885/**
886 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
887 * rpc_execute against it
888 * @req: RPC request
889 * @tk_ops: RPC call ops
890 */
891struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
892				const struct rpc_call_ops *tk_ops)
893{
894	struct rpc_task *task;
895	struct xdr_buf *xbufp = &req->rq_snd_buf;
896	struct rpc_task_setup task_setup_data = {
897		.callback_ops = tk_ops,
898	};
899
900	dprintk("RPC: rpc_run_bc_task req= %p\n", req);
901	/*
902	 * Create an rpc_task to send the data
903	 */
904	task = rpc_new_task(&task_setup_data);
905	if (IS_ERR(task)) {
906		xprt_free_bc_request(req);
907		goto out;
908	}
909	task->tk_rqstp = req;
910
911	/*
912	 * Set up the xdr_buf length.
913	 * This also indicates that the buffer is XDR encoded already.
914	 */
915	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
916			xbufp->tail[0].iov_len;
917
918	task->tk_action = call_bc_transmit;
919	atomic_inc(&task->tk_count);
920	WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
921	rpc_execute(task);
922
923out:
924	dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
925	return task;
926}
927#endif /* CONFIG_SUNRPC_BACKCHANNEL */
928
929void
930rpc_call_start(struct rpc_task *task)
931{
932	task->tk_action = call_start;
933}
934EXPORT_SYMBOL_GPL(rpc_call_start);
935
936/**
937 * rpc_peeraddr - extract remote peer address from clnt's xprt
938 * @clnt: RPC client structure
939 * @buf: target buffer
940 * @bufsize: length of target buffer
941 *
942 * Returns the number of bytes that are actually in the stored address.
943 */
944size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
945{
946	size_t bytes;
947	struct rpc_xprt *xprt;
948
949	rcu_read_lock();
950	xprt = rcu_dereference(clnt->cl_xprt);
951
952	bytes = xprt->addrlen;
953	if (bytes > bufsize)
954		bytes = bufsize;
955	memcpy(buf, &xprt->addr, bytes);
956	rcu_read_unlock();
957
958	return bytes;
959}
960EXPORT_SYMBOL_GPL(rpc_peeraddr);
961
962/**
963 * rpc_peeraddr2str - return remote peer address in printable format
964 * @clnt: RPC client structure
965 * @format: address format
966 *
967 * NB: the lifetime of the memory referenced by the returned pointer is
968 * the same as the rpc_xprt itself.  As long as the caller uses this
969 * pointer, it must hold the RCU read lock.
970 */
971const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
972			     enum rpc_display_format_t format)
973{
974	struct rpc_xprt *xprt;
975
976	xprt = rcu_dereference(clnt->cl_xprt);
977
978	if (xprt->address_strings[format] != NULL)
979		return xprt->address_strings[format];
980	else
981		return "unprintable";
982}
983EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
984
985static const struct sockaddr_in rpc_inaddr_loopback = {
986	.sin_family		= AF_INET,
987	.sin_addr.s_addr	= htonl(INADDR_ANY),
988};
989
990static const struct sockaddr_in6 rpc_in6addr_loopback = {
991	.sin6_family		= AF_INET6,
992	.sin6_addr		= IN6ADDR_ANY_INIT,
993};
994
995/*
996 * Try a getsockname() on a connected datagram socket.  Using a
997 * connected datagram socket prevents leaving a socket in TIME_WAIT.
998 * This conserves the ephemeral port number space.
999 *
1000 * Returns zero and fills in "buf" if successful; otherwise, a
1001 * negative errno is returned.
1002 */
1003static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1004			struct sockaddr *buf, int buflen)
1005{
1006	struct socket *sock;
1007	int err;
1008
1009	err = __sock_create(net, sap->sa_family,
1010				SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1011	if (err < 0) {
1012		dprintk("RPC:       can't create UDP socket (%d)\n", err);
1013		goto out;
1014	}
1015
1016	switch (sap->sa_family) {
1017	case AF_INET:
1018		err = kernel_bind(sock,
1019				(struct sockaddr *)&rpc_inaddr_loopback,
1020				sizeof(rpc_inaddr_loopback));
1021		break;
1022	case AF_INET6:
1023		err = kernel_bind(sock,
1024				(struct sockaddr *)&rpc_in6addr_loopback,
1025				sizeof(rpc_in6addr_loopback));
1026		break;
1027	default:
1028		err = -EAFNOSUPPORT;
1029		goto out;
1030	}
1031	if (err < 0) {
1032		dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1033		goto out_release;
1034	}
1035
1036	err = kernel_connect(sock, sap, salen, 0);
1037	if (err < 0) {
1038		dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1039		goto out_release;
1040	}
1041
1042	err = kernel_getsockname(sock, buf, &buflen);
1043	if (err < 0) {
1044		dprintk("RPC:       getsockname failed (%d)\n", err);
1045		goto out_release;
1046	}
1047
1048	err = 0;
1049	if (buf->sa_family == AF_INET6) {
1050		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1051		sin6->sin6_scope_id = 0;
1052	}
1053	dprintk("RPC:       %s succeeded\n", __func__);
1054
1055out_release:
1056	sock_release(sock);
1057out:
1058	return err;
1059}
1060
1061/*
1062 * Scraping a connected socket failed, so we don't have a useable
1063 * local address.  Fallback: generate an address that will prevent
1064 * the server from calling us back.
1065 *
1066 * Returns zero and fills in "buf" if successful; otherwise, a
1067 * negative errno is returned.
1068 */
1069static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1070{
1071	switch (family) {
1072	case AF_INET:
1073		if (buflen < sizeof(rpc_inaddr_loopback))
1074			return -EINVAL;
1075		memcpy(buf, &rpc_inaddr_loopback,
1076				sizeof(rpc_inaddr_loopback));
1077		break;
1078	case AF_INET6:
1079		if (buflen < sizeof(rpc_in6addr_loopback))
1080			return -EINVAL;
1081		memcpy(buf, &rpc_in6addr_loopback,
1082				sizeof(rpc_in6addr_loopback));
1083	default:
1084		dprintk("RPC:       %s: address family not supported\n",
1085			__func__);
1086		return -EAFNOSUPPORT;
1087	}
1088	dprintk("RPC:       %s: succeeded\n", __func__);
1089	return 0;
1090}
1091
1092/**
1093 * rpc_localaddr - discover local endpoint address for an RPC client
1094 * @clnt: RPC client structure
1095 * @buf: target buffer
1096 * @buflen: size of target buffer, in bytes
1097 *
1098 * Returns zero and fills in "buf" and "buflen" if successful;
1099 * otherwise, a negative errno is returned.
1100 *
1101 * This works even if the underlying transport is not currently connected,
1102 * or if the upper layer never previously provided a source address.
1103 *
1104 * The result of this function call is transient: multiple calls in
1105 * succession may give different results, depending on how local
1106 * networking configuration changes over time.
1107 */
1108int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1109{
1110	struct sockaddr_storage address;
1111	struct sockaddr *sap = (struct sockaddr *)&address;
1112	struct rpc_xprt *xprt;
1113	struct net *net;
1114	size_t salen;
1115	int err;
1116
1117	rcu_read_lock();
1118	xprt = rcu_dereference(clnt->cl_xprt);
1119	salen = xprt->addrlen;
1120	memcpy(sap, &xprt->addr, salen);
1121	net = get_net(xprt->xprt_net);
1122	rcu_read_unlock();
1123
1124	rpc_set_port(sap, 0);
1125	err = rpc_sockname(net, sap, salen, buf, buflen);
1126	put_net(net);
1127	if (err != 0)
1128		/* Couldn't discover local address, return ANYADDR */
1129		return rpc_anyaddr(sap->sa_family, buf, buflen);
1130	return 0;
1131}
1132EXPORT_SYMBOL_GPL(rpc_localaddr);
1133
1134void
1135rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1136{
1137	struct rpc_xprt *xprt;
1138
1139	rcu_read_lock();
1140	xprt = rcu_dereference(clnt->cl_xprt);
1141	if (xprt->ops->set_buffer_size)
1142		xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1143	rcu_read_unlock();
1144}
1145EXPORT_SYMBOL_GPL(rpc_setbufsize);
1146
1147/**
1148 * rpc_protocol - Get transport protocol number for an RPC client
1149 * @clnt: RPC client to query
1150 *
1151 */
1152int rpc_protocol(struct rpc_clnt *clnt)
1153{
1154	int protocol;
1155
1156	rcu_read_lock();
1157	protocol = rcu_dereference(clnt->cl_xprt)->prot;
1158	rcu_read_unlock();
1159	return protocol;
1160}
1161EXPORT_SYMBOL_GPL(rpc_protocol);
1162
1163/**
1164 * rpc_net_ns - Get the network namespace for this RPC client
1165 * @clnt: RPC client to query
1166 *
1167 */
1168struct net *rpc_net_ns(struct rpc_clnt *clnt)
1169{
1170	struct net *ret;
1171
1172	rcu_read_lock();
1173	ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1174	rcu_read_unlock();
1175	return ret;
1176}
1177EXPORT_SYMBOL_GPL(rpc_net_ns);
1178
1179/**
1180 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1181 * @clnt: RPC client to query
1182 *
1183 * For stream transports, this is one RPC record fragment (see RFC
1184 * 1831), as we don't support multi-record requests yet.  For datagram
1185 * transports, this is the size of an IP packet minus the IP, UDP, and
1186 * RPC header sizes.
1187 */
1188size_t rpc_max_payload(struct rpc_clnt *clnt)
1189{
1190	size_t ret;
1191
1192	rcu_read_lock();
1193	ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1194	rcu_read_unlock();
1195	return ret;
1196}
1197EXPORT_SYMBOL_GPL(rpc_max_payload);
1198
1199/**
1200 * rpc_get_timeout - Get timeout for transport in units of HZ
1201 * @clnt: RPC client to query
1202 */
1203unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1204{
1205	unsigned long ret;
1206
1207	rcu_read_lock();
1208	ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1209	rcu_read_unlock();
1210	return ret;
1211}
1212EXPORT_SYMBOL_GPL(rpc_get_timeout);
1213
1214/**
1215 * rpc_force_rebind - force transport to check that remote port is unchanged
1216 * @clnt: client to rebind
1217 *
1218 */
1219void rpc_force_rebind(struct rpc_clnt *clnt)
1220{
1221	if (clnt->cl_autobind) {
1222		rcu_read_lock();
1223		xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1224		rcu_read_unlock();
1225	}
1226}
1227EXPORT_SYMBOL_GPL(rpc_force_rebind);
1228
1229/*
1230 * Restart an (async) RPC call from the call_prepare state.
1231 * Usually called from within the exit handler.
1232 */
1233int
1234rpc_restart_call_prepare(struct rpc_task *task)
1235{
1236	if (RPC_ASSASSINATED(task))
1237		return 0;
1238	task->tk_action = call_start;
1239	if (task->tk_ops->rpc_call_prepare != NULL)
1240		task->tk_action = rpc_prepare_task;
1241	return 1;
1242}
1243EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1244
1245/*
1246 * Restart an (async) RPC call. Usually called from within the
1247 * exit handler.
1248 */
1249int
1250rpc_restart_call(struct rpc_task *task)
1251{
1252	if (RPC_ASSASSINATED(task))
1253		return 0;
1254	task->tk_action = call_start;
1255	return 1;
1256}
1257EXPORT_SYMBOL_GPL(rpc_restart_call);
1258
1259#ifdef RPC_DEBUG
1260static const char *rpc_proc_name(const struct rpc_task *task)
1261{
1262	const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1263
1264	if (proc) {
1265		if (proc->p_name)
1266			return proc->p_name;
1267		else
1268			return "NULL";
1269	} else
1270		return "no proc";
1271}
1272#endif
1273
1274/*
1275 * 0.  Initial state
1276 *
1277 *     Other FSM states can be visited zero or more times, but
1278 *     this state is visited exactly once for each RPC.
1279 */
1280static void
1281call_start(struct rpc_task *task)
1282{
1283	struct rpc_clnt	*clnt = task->tk_client;
1284
1285	dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1286			clnt->cl_program->name, clnt->cl_vers,
1287			rpc_proc_name(task),
1288			(RPC_IS_ASYNC(task) ? "async" : "sync"));
1289
1290	/* Increment call count */
1291	task->tk_msg.rpc_proc->p_count++;
1292	clnt->cl_stats->rpccnt++;
1293	task->tk_action = call_reserve;
1294}
1295
1296/*
1297 * 1.	Reserve an RPC call slot
1298 */
1299static void
1300call_reserve(struct rpc_task *task)
1301{
1302	dprint_status(task);
1303
1304	task->tk_status  = 0;
1305	task->tk_action  = call_reserveresult;
1306	xprt_reserve(task);
1307}
1308
1309static void call_retry_reserve(struct rpc_task *task);
1310
1311/*
1312 * 1b.	Grok the result of xprt_reserve()
1313 */
1314static void
1315call_reserveresult(struct rpc_task *task)
1316{
1317	int status = task->tk_status;
1318
1319	dprint_status(task);
1320
1321	/*
1322	 * After a call to xprt_reserve(), we must have either
1323	 * a request slot or else an error status.
1324	 */
1325	task->tk_status = 0;
1326	if (status >= 0) {
1327		if (task->tk_rqstp) {
1328			task->tk_action = call_refresh;
1329			return;
1330		}
1331
1332		printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1333				__func__, status);
1334		rpc_exit(task, -EIO);
1335		return;
1336	}
1337
1338	/*
1339	 * Even though there was an error, we may have acquired
1340	 * a request slot somehow.  Make sure not to leak it.
1341	 */
1342	if (task->tk_rqstp) {
1343		printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1344				__func__, status);
1345		xprt_release(task);
1346	}
1347
1348	switch (status) {
1349	case -ENOMEM:
1350		rpc_delay(task, HZ >> 2);
1351	case -EAGAIN:	/* woken up; retry */
1352		task->tk_action = call_retry_reserve;
1353		return;
1354	case -EIO:	/* probably a shutdown */
1355		break;
1356	default:
1357		printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1358				__func__, status);
1359		break;
1360	}
1361	rpc_exit(task, status);
1362}
1363
1364/*
1365 * 1c.	Retry reserving an RPC call slot
1366 */
1367static void
1368call_retry_reserve(struct rpc_task *task)
1369{
1370	dprint_status(task);
1371
1372	task->tk_status  = 0;
1373	task->tk_action  = call_reserveresult;
1374	xprt_retry_reserve(task);
1375}
1376
1377/*
1378 * 2.	Bind and/or refresh the credentials
1379 */
1380static void
1381call_refresh(struct rpc_task *task)
1382{
1383	dprint_status(task);
1384
1385	task->tk_action = call_refreshresult;
1386	task->tk_status = 0;
1387	task->tk_client->cl_stats->rpcauthrefresh++;
1388	rpcauth_refreshcred(task);
1389}
1390
1391/*
1392 * 2a.	Process the results of a credential refresh
1393 */
1394static void
1395call_refreshresult(struct rpc_task *task)
1396{
1397	int status = task->tk_status;
1398
1399	dprint_status(task);
1400
1401	task->tk_status = 0;
1402	task->tk_action = call_refresh;
1403	switch (status) {
1404	case 0:
1405		if (rpcauth_uptodatecred(task))
1406			task->tk_action = call_allocate;
1407		return;
1408	case -ETIMEDOUT:
1409		rpc_delay(task, 3*HZ);
1410	case -EKEYEXPIRED:
1411	case -EAGAIN:
1412		status = -EACCES;
1413		if (!task->tk_cred_retry)
1414			break;
1415		task->tk_cred_retry--;
1416		dprintk("RPC: %5u %s: retry refresh creds\n",
1417				task->tk_pid, __func__);
1418		return;
1419	}
1420	dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1421				task->tk_pid, __func__, status);
1422	rpc_exit(task, status);
1423}
1424
1425/*
1426 * 2b.	Allocate the buffer. For details, see sched.c:rpc_malloc.
1427 *	(Note: buffer memory is freed in xprt_release).
1428 */
1429static void
1430call_allocate(struct rpc_task *task)
1431{
1432	unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1433	struct rpc_rqst *req = task->tk_rqstp;
1434	struct rpc_xprt *xprt = req->rq_xprt;
1435	struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1436
1437	dprint_status(task);
1438
1439	task->tk_status = 0;
1440	task->tk_action = call_bind;
1441
1442	if (req->rq_buffer)
1443		return;
1444
1445	if (proc->p_proc != 0) {
1446		BUG_ON(proc->p_arglen == 0);
1447		if (proc->p_decode != NULL)
1448			BUG_ON(proc->p_replen == 0);
1449	}
1450
1451	/*
1452	 * Calculate the size (in quads) of the RPC call
1453	 * and reply headers, and convert both values
1454	 * to byte sizes.
1455	 */
1456	req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1457	req->rq_callsize <<= 2;
1458	req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1459	req->rq_rcvsize <<= 2;
1460
1461	req->rq_buffer = xprt->ops->buf_alloc(task,
1462					req->rq_callsize + req->rq_rcvsize);
1463	if (req->rq_buffer != NULL)
1464		return;
1465
1466	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1467
1468	if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1469		task->tk_action = call_allocate;
1470		rpc_delay(task, HZ>>4);
1471		return;
1472	}
1473
1474	rpc_exit(task, -ERESTARTSYS);
1475}
1476
1477static inline int
1478rpc_task_need_encode(struct rpc_task *task)
1479{
1480	return task->tk_rqstp->rq_snd_buf.len == 0;
1481}
1482
1483static inline void
1484rpc_task_force_reencode(struct rpc_task *task)
1485{
1486	task->tk_rqstp->rq_snd_buf.len = 0;
1487	task->tk_rqstp->rq_bytes_sent = 0;
1488}
1489
1490static inline void
1491rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1492{
1493	buf->head[0].iov_base = start;
1494	buf->head[0].iov_len = len;
1495	buf->tail[0].iov_len = 0;
1496	buf->page_len = 0;
1497	buf->flags = 0;
1498	buf->len = 0;
1499	buf->buflen = len;
1500}
1501
1502/*
1503 * 3.	Encode arguments of an RPC call
1504 */
1505static void
1506rpc_xdr_encode(struct rpc_task *task)
1507{
1508	struct rpc_rqst	*req = task->tk_rqstp;
1509	kxdreproc_t	encode;
1510	__be32		*p;
1511
1512	dprint_status(task);
1513
1514	rpc_xdr_buf_init(&req->rq_snd_buf,
1515			 req->rq_buffer,
1516			 req->rq_callsize);
1517	rpc_xdr_buf_init(&req->rq_rcv_buf,
1518			 (char *)req->rq_buffer + req->rq_callsize,
1519			 req->rq_rcvsize);
1520
1521	p = rpc_encode_header(task);
1522	if (p == NULL) {
1523		printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1524		rpc_exit(task, -EIO);
1525		return;
1526	}
1527
1528	encode = task->tk_msg.rpc_proc->p_encode;
1529	if (encode == NULL)
1530		return;
1531
1532	task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1533			task->tk_msg.rpc_argp);
1534}
1535
1536/*
1537 * 4.	Get the server port number if not yet set
1538 */
1539static void
1540call_bind(struct rpc_task *task)
1541{
1542	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1543
1544	dprint_status(task);
1545
1546	task->tk_action = call_connect;
1547	if (!xprt_bound(xprt)) {
1548		task->tk_action = call_bind_status;
1549		task->tk_timeout = xprt->bind_timeout;
1550		xprt->ops->rpcbind(task);
1551	}
1552}
1553
1554/*
1555 * 4a.	Sort out bind result
1556 */
1557static void
1558call_bind_status(struct rpc_task *task)
1559{
1560	int status = -EIO;
1561
1562	if (task->tk_status >= 0) {
1563		dprint_status(task);
1564		task->tk_status = 0;
1565		task->tk_action = call_connect;
1566		return;
1567	}
1568
1569	trace_rpc_bind_status(task);
1570	switch (task->tk_status) {
1571	case -ENOMEM:
1572		dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1573		rpc_delay(task, HZ >> 2);
1574		goto retry_timeout;
1575	case -EACCES:
1576		dprintk("RPC: %5u remote rpcbind: RPC program/version "
1577				"unavailable\n", task->tk_pid);
1578		/* fail immediately if this is an RPC ping */
1579		if (task->tk_msg.rpc_proc->p_proc == 0) {
1580			status = -EOPNOTSUPP;
1581			break;
1582		}
1583		if (task->tk_rebind_retry == 0)
1584			break;
1585		task->tk_rebind_retry--;
1586		rpc_delay(task, 3*HZ);
1587		goto retry_timeout;
1588	case -ETIMEDOUT:
1589		dprintk("RPC: %5u rpcbind request timed out\n",
1590				task->tk_pid);
1591		goto retry_timeout;
1592	case -EPFNOSUPPORT:
1593		/* server doesn't support any rpcbind version we know of */
1594		dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1595				task->tk_pid);
1596		break;
1597	case -EPROTONOSUPPORT:
1598		dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1599				task->tk_pid);
1600		task->tk_status = 0;
1601		task->tk_action = call_bind;
1602		return;
1603	case -ECONNREFUSED:		/* connection problems */
1604	case -ECONNRESET:
1605	case -ENOTCONN:
1606	case -EHOSTDOWN:
1607	case -EHOSTUNREACH:
1608	case -ENETUNREACH:
1609	case -EPIPE:
1610		dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1611				task->tk_pid, task->tk_status);
1612		if (!RPC_IS_SOFTCONN(task)) {
1613			rpc_delay(task, 5*HZ);
1614			goto retry_timeout;
1615		}
1616		status = task->tk_status;
1617		break;
1618	default:
1619		dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1620				task->tk_pid, -task->tk_status);
1621	}
1622
1623	rpc_exit(task, status);
1624	return;
1625
1626retry_timeout:
1627	task->tk_action = call_timeout;
1628}
1629
1630/*
1631 * 4b.	Connect to the RPC server
1632 */
1633static void
1634call_connect(struct rpc_task *task)
1635{
1636	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1637
1638	dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1639			task->tk_pid, xprt,
1640			(xprt_connected(xprt) ? "is" : "is not"));
1641
1642	task->tk_action = call_transmit;
1643	if (!xprt_connected(xprt)) {
1644		task->tk_action = call_connect_status;
1645		if (task->tk_status < 0)
1646			return;
1647		xprt_connect(task);
1648	}
1649}
1650
1651/*
1652 * 4c.	Sort out connect result
1653 */
1654static void
1655call_connect_status(struct rpc_task *task)
1656{
1657	struct rpc_clnt *clnt = task->tk_client;
1658	int status = task->tk_status;
1659
1660	dprint_status(task);
1661
1662	trace_rpc_connect_status(task, status);
1663	switch (status) {
1664		/* if soft mounted, test if we've timed out */
1665	case -ETIMEDOUT:
1666		task->tk_action = call_timeout;
1667		return;
1668	case -ECONNREFUSED:
1669	case -ECONNRESET:
1670	case -ENETUNREACH:
1671		if (RPC_IS_SOFTCONN(task))
1672			break;
1673		/* retry with existing socket, after a delay */
1674	case 0:
1675	case -EAGAIN:
1676		task->tk_status = 0;
1677		clnt->cl_stats->netreconn++;
1678		task->tk_action = call_transmit;
1679		return;
1680	}
1681	rpc_exit(task, status);
1682}
1683
1684/*
1685 * 5.	Transmit the RPC request, and wait for reply
1686 */
1687static void
1688call_transmit(struct rpc_task *task)
1689{
1690	dprint_status(task);
1691
1692	task->tk_action = call_status;
1693	if (task->tk_status < 0)
1694		return;
1695	task->tk_status = xprt_prepare_transmit(task);
1696	if (task->tk_status != 0)
1697		return;
1698	task->tk_action = call_transmit_status;
1699	/* Encode here so that rpcsec_gss can use correct sequence number. */
1700	if (rpc_task_need_encode(task)) {
1701		rpc_xdr_encode(task);
1702		/* Did the encode result in an error condition? */
1703		if (task->tk_status != 0) {
1704			/* Was the error nonfatal? */
1705			if (task->tk_status == -EAGAIN)
1706				rpc_delay(task, HZ >> 4);
1707			else
1708				rpc_exit(task, task->tk_status);
1709			return;
1710		}
1711	}
1712	xprt_transmit(task);
1713	if (task->tk_status < 0)
1714		return;
1715	/*
1716	 * On success, ensure that we call xprt_end_transmit() before sleeping
1717	 * in order to allow access to the socket to other RPC requests.
1718	 */
1719	call_transmit_status(task);
1720	if (rpc_reply_expected(task))
1721		return;
1722	task->tk_action = rpc_exit_task;
1723	rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1724}
1725
1726/*
1727 * 5a.	Handle cleanup after a transmission
1728 */
1729static void
1730call_transmit_status(struct rpc_task *task)
1731{
1732	task->tk_action = call_status;
1733
1734	/*
1735	 * Common case: success.  Force the compiler to put this
1736	 * test first.
1737	 */
1738	if (task->tk_status == 0) {
1739		xprt_end_transmit(task);
1740		rpc_task_force_reencode(task);
1741		return;
1742	}
1743
1744	switch (task->tk_status) {
1745	case -EAGAIN:
1746		break;
1747	default:
1748		dprint_status(task);
1749		xprt_end_transmit(task);
1750		rpc_task_force_reencode(task);
1751		break;
1752		/*
1753		 * Special cases: if we've been waiting on the
1754		 * socket's write_space() callback, or if the
1755		 * socket just returned a connection error,
1756		 * then hold onto the transport lock.
1757		 */
1758	case -ECONNREFUSED:
1759	case -EHOSTDOWN:
1760	case -EHOSTUNREACH:
1761	case -ENETUNREACH:
1762		if (RPC_IS_SOFTCONN(task)) {
1763			xprt_end_transmit(task);
1764			rpc_exit(task, task->tk_status);
1765			break;
1766		}
1767	case -ECONNRESET:
1768	case -ENOTCONN:
1769	case -EPIPE:
1770		rpc_task_force_reencode(task);
1771	}
1772}
1773
1774#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1775/*
1776 * 5b.	Send the backchannel RPC reply.  On error, drop the reply.  In
1777 * addition, disconnect on connectivity errors.
1778 */
1779static void
1780call_bc_transmit(struct rpc_task *task)
1781{
1782	struct rpc_rqst *req = task->tk_rqstp;
1783
1784	task->tk_status = xprt_prepare_transmit(task);
1785	if (task->tk_status == -EAGAIN) {
1786		/*
1787		 * Could not reserve the transport. Try again after the
1788		 * transport is released.
1789		 */
1790		task->tk_status = 0;
1791		task->tk_action = call_bc_transmit;
1792		return;
1793	}
1794
1795	task->tk_action = rpc_exit_task;
1796	if (task->tk_status < 0) {
1797		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1798			"error: %d\n", task->tk_status);
1799		return;
1800	}
1801
1802	xprt_transmit(task);
1803	xprt_end_transmit(task);
1804	dprint_status(task);
1805	switch (task->tk_status) {
1806	case 0:
1807		/* Success */
1808		break;
1809	case -EHOSTDOWN:
1810	case -EHOSTUNREACH:
1811	case -ENETUNREACH:
1812	case -ETIMEDOUT:
1813		/*
1814		 * Problem reaching the server.  Disconnect and let the
1815		 * forechannel reestablish the connection.  The server will
1816		 * have to retransmit the backchannel request and we'll
1817		 * reprocess it.  Since these ops are idempotent, there's no
1818		 * need to cache our reply at this time.
1819		 */
1820		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1821			"error: %d\n", task->tk_status);
1822		xprt_conditional_disconnect(req->rq_xprt,
1823			req->rq_connect_cookie);
1824		break;
1825	default:
1826		/*
1827		 * We were unable to reply and will have to drop the
1828		 * request.  The server should reconnect and retransmit.
1829		 */
1830		WARN_ON_ONCE(task->tk_status == -EAGAIN);
1831		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1832			"error: %d\n", task->tk_status);
1833		break;
1834	}
1835	rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1836}
1837#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1838
1839/*
1840 * 6.	Sort out the RPC call status
1841 */
1842static void
1843call_status(struct rpc_task *task)
1844{
1845	struct rpc_clnt	*clnt = task->tk_client;
1846	struct rpc_rqst	*req = task->tk_rqstp;
1847	int		status;
1848
1849	if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1850		task->tk_status = req->rq_reply_bytes_recvd;
1851
1852	dprint_status(task);
1853
1854	status = task->tk_status;
1855	if (status >= 0) {
1856		task->tk_action = call_decode;
1857		return;
1858	}
1859
1860	trace_rpc_call_status(task);
1861	task->tk_status = 0;
1862	switch(status) {
1863	case -EHOSTDOWN:
1864	case -EHOSTUNREACH:
1865	case -ENETUNREACH:
1866		/*
1867		 * Delay any retries for 3 seconds, then handle as if it
1868		 * were a timeout.
1869		 */
1870		rpc_delay(task, 3*HZ);
1871	case -ETIMEDOUT:
1872		task->tk_action = call_timeout;
1873		if (task->tk_client->cl_discrtry)
1874			xprt_conditional_disconnect(req->rq_xprt,
1875					req->rq_connect_cookie);
1876		break;
1877	case -ECONNRESET:
1878	case -ECONNREFUSED:
1879		rpc_force_rebind(clnt);
1880		rpc_delay(task, 3*HZ);
1881	case -EPIPE:
1882	case -ENOTCONN:
1883		task->tk_action = call_bind;
1884		break;
1885	case -EAGAIN:
1886		task->tk_action = call_transmit;
1887		break;
1888	case -EIO:
1889		/* shutdown or soft timeout */
1890		rpc_exit(task, status);
1891		break;
1892	default:
1893		if (clnt->cl_chatty)
1894			printk("%s: RPC call returned error %d\n",
1895			       clnt->cl_program->name, -status);
1896		rpc_exit(task, status);
1897	}
1898}
1899
1900/*
1901 * 6a.	Handle RPC timeout
1902 * 	We do not release the request slot, so we keep using the
1903 *	same XID for all retransmits.
1904 */
1905static void
1906call_timeout(struct rpc_task *task)
1907{
1908	struct rpc_clnt	*clnt = task->tk_client;
1909
1910	if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1911		dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1912		goto retry;
1913	}
1914
1915	dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1916	task->tk_timeouts++;
1917
1918	if (RPC_IS_SOFTCONN(task)) {
1919		rpc_exit(task, -ETIMEDOUT);
1920		return;
1921	}
1922	if (RPC_IS_SOFT(task)) {
1923		if (clnt->cl_chatty) {
1924			rcu_read_lock();
1925			printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1926				clnt->cl_program->name,
1927				rcu_dereference(clnt->cl_xprt)->servername);
1928			rcu_read_unlock();
1929		}
1930		if (task->tk_flags & RPC_TASK_TIMEOUT)
1931			rpc_exit(task, -ETIMEDOUT);
1932		else
1933			rpc_exit(task, -EIO);
1934		return;
1935	}
1936
1937	if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1938		task->tk_flags |= RPC_CALL_MAJORSEEN;
1939		if (clnt->cl_chatty) {
1940			rcu_read_lock();
1941			printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1942			clnt->cl_program->name,
1943			rcu_dereference(clnt->cl_xprt)->servername);
1944			rcu_read_unlock();
1945		}
1946	}
1947	rpc_force_rebind(clnt);
1948	/*
1949	 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1950	 * event? RFC2203 requires the server to drop all such requests.
1951	 */
1952	rpcauth_invalcred(task);
1953
1954retry:
1955	clnt->cl_stats->rpcretrans++;
1956	task->tk_action = call_bind;
1957	task->tk_status = 0;
1958}
1959
1960/*
1961 * 7.	Decode the RPC reply
1962 */
1963static void
1964call_decode(struct rpc_task *task)
1965{
1966	struct rpc_clnt	*clnt = task->tk_client;
1967	struct rpc_rqst	*req = task->tk_rqstp;
1968	kxdrdproc_t	decode = task->tk_msg.rpc_proc->p_decode;
1969	__be32		*p;
1970
1971	dprint_status(task);
1972
1973	if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1974		if (clnt->cl_chatty) {
1975			rcu_read_lock();
1976			printk(KERN_NOTICE "%s: server %s OK\n",
1977				clnt->cl_program->name,
1978				rcu_dereference(clnt->cl_xprt)->servername);
1979			rcu_read_unlock();
1980		}
1981		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1982	}
1983
1984	/*
1985	 * Ensure that we see all writes made by xprt_complete_rqst()
1986	 * before it changed req->rq_reply_bytes_recvd.
1987	 */
1988	smp_rmb();
1989	req->rq_rcv_buf.len = req->rq_private_buf.len;
1990
1991	/* Check that the softirq receive buffer is valid */
1992	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1993				sizeof(req->rq_rcv_buf)) != 0);
1994
1995	if (req->rq_rcv_buf.len < 12) {
1996		if (!RPC_IS_SOFT(task)) {
1997			task->tk_action = call_bind;
1998			clnt->cl_stats->rpcretrans++;
1999			goto out_retry;
2000		}
2001		dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2002				clnt->cl_program->name, task->tk_status);
2003		task->tk_action = call_timeout;
2004		goto out_retry;
2005	}
2006
2007	p = rpc_verify_header(task);
2008	if (IS_ERR(p)) {
2009		if (p == ERR_PTR(-EAGAIN))
2010			goto out_retry;
2011		return;
2012	}
2013
2014	task->tk_action = rpc_exit_task;
2015
2016	if (decode) {
2017		task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2018						      task->tk_msg.rpc_resp);
2019	}
2020	dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2021			task->tk_status);
2022	return;
2023out_retry:
2024	task->tk_status = 0;
2025	/* Note: rpc_verify_header() may have freed the RPC slot */
2026	if (task->tk_rqstp == req) {
2027		req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2028		if (task->tk_client->cl_discrtry)
2029			xprt_conditional_disconnect(req->rq_xprt,
2030					req->rq_connect_cookie);
2031	}
2032}
2033
2034static __be32 *
2035rpc_encode_header(struct rpc_task *task)
2036{
2037	struct rpc_clnt *clnt = task->tk_client;
2038	struct rpc_rqst	*req = task->tk_rqstp;
2039	__be32		*p = req->rq_svec[0].iov_base;
2040
2041	/* FIXME: check buffer size? */
2042
2043	p = xprt_skip_transport_header(req->rq_xprt, p);
2044	*p++ = req->rq_xid;		/* XID */
2045	*p++ = htonl(RPC_CALL);		/* CALL */
2046	*p++ = htonl(RPC_VERSION);	/* RPC version */
2047	*p++ = htonl(clnt->cl_prog);	/* program number */
2048	*p++ = htonl(clnt->cl_vers);	/* program version */
2049	*p++ = htonl(task->tk_msg.rpc_proc->p_proc);	/* procedure */
2050	p = rpcauth_marshcred(task, p);
2051	req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2052	return p;
2053}
2054
2055static __be32 *
2056rpc_verify_header(struct rpc_task *task)
2057{
2058	struct rpc_clnt *clnt = task->tk_client;
2059	struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2060	int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2061	__be32	*p = iov->iov_base;
2062	u32 n;
2063	int error = -EACCES;
2064
2065	if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2066		/* RFC-1014 says that the representation of XDR data must be a
2067		 * multiple of four bytes
2068		 * - if it isn't pointer subtraction in the NFS client may give
2069		 *   undefined results
2070		 */
2071		dprintk("RPC: %5u %s: XDR representation not a multiple of"
2072		       " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2073		       task->tk_rqstp->rq_rcv_buf.len);
2074		goto out_eio;
2075	}
2076	if ((len -= 3) < 0)
2077		goto out_overflow;
2078
2079	p += 1; /* skip XID */
2080	if ((n = ntohl(*p++)) != RPC_REPLY) {
2081		dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2082			task->tk_pid, __func__, n);
2083		goto out_garbage;
2084	}
2085
2086	if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2087		if (--len < 0)
2088			goto out_overflow;
2089		switch ((n = ntohl(*p++))) {
2090		case RPC_AUTH_ERROR:
2091			break;
2092		case RPC_MISMATCH:
2093			dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2094				task->tk_pid, __func__);
2095			error = -EPROTONOSUPPORT;
2096			goto out_err;
2097		default:
2098			dprintk("RPC: %5u %s: RPC call rejected, "
2099				"unknown error: %x\n",
2100				task->tk_pid, __func__, n);
2101			goto out_eio;
2102		}
2103		if (--len < 0)
2104			goto out_overflow;
2105		switch ((n = ntohl(*p++))) {
2106		case RPC_AUTH_REJECTEDCRED:
2107		case RPC_AUTH_REJECTEDVERF:
2108		case RPCSEC_GSS_CREDPROBLEM:
2109		case RPCSEC_GSS_CTXPROBLEM:
2110			if (!task->tk_cred_retry)
2111				break;
2112			task->tk_cred_retry--;
2113			dprintk("RPC: %5u %s: retry stale creds\n",
2114					task->tk_pid, __func__);
2115			rpcauth_invalcred(task);
2116			/* Ensure we obtain a new XID! */
2117			xprt_release(task);
2118			task->tk_action = call_reserve;
2119			goto out_retry;
2120		case RPC_AUTH_BADCRED:
2121		case RPC_AUTH_BADVERF:
2122			/* possibly garbled cred/verf? */
2123			if (!task->tk_garb_retry)
2124				break;
2125			task->tk_garb_retry--;
2126			dprintk("RPC: %5u %s: retry garbled creds\n",
2127					task->tk_pid, __func__);
2128			task->tk_action = call_bind;
2129			goto out_retry;
2130		case RPC_AUTH_TOOWEAK:
2131			rcu_read_lock();
2132			printk(KERN_NOTICE "RPC: server %s requires stronger "
2133			       "authentication.\n",
2134			       rcu_dereference(clnt->cl_xprt)->servername);
2135			rcu_read_unlock();
2136			break;
2137		default:
2138			dprintk("RPC: %5u %s: unknown auth error: %x\n",
2139					task->tk_pid, __func__, n);
2140			error = -EIO;
2141		}
2142		dprintk("RPC: %5u %s: call rejected %d\n",
2143				task->tk_pid, __func__, n);
2144		goto out_err;
2145	}
2146	if (!(p = rpcauth_checkverf(task, p))) {
2147		dprintk("RPC: %5u %s: auth check failed\n",
2148				task->tk_pid, __func__);
2149		goto out_garbage;		/* bad verifier, retry */
2150	}
2151	len = p - (__be32 *)iov->iov_base - 1;
2152	if (len < 0)
2153		goto out_overflow;
2154	switch ((n = ntohl(*p++))) {
2155	case RPC_SUCCESS:
2156		return p;
2157	case RPC_PROG_UNAVAIL:
2158		dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2159				"by server %s\n", task->tk_pid, __func__,
2160				(unsigned int)clnt->cl_prog,
2161				rcu_dereference(clnt->cl_xprt)->servername);
2162		error = -EPFNOSUPPORT;
2163		goto out_err;
2164	case RPC_PROG_MISMATCH:
2165		dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2166				"by server %s\n", task->tk_pid, __func__,
2167				(unsigned int)clnt->cl_prog,
2168				(unsigned int)clnt->cl_vers,
2169				rcu_dereference(clnt->cl_xprt)->servername);
2170		error = -EPROTONOSUPPORT;
2171		goto out_err;
2172	case RPC_PROC_UNAVAIL:
2173		dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2174				"version %u on server %s\n",
2175				task->tk_pid, __func__,
2176				rpc_proc_name(task),
2177				clnt->cl_prog, clnt->cl_vers,
2178				rcu_dereference(clnt->cl_xprt)->servername);
2179		error = -EOPNOTSUPP;
2180		goto out_err;
2181	case RPC_GARBAGE_ARGS:
2182		dprintk("RPC: %5u %s: server saw garbage\n",
2183				task->tk_pid, __func__);
2184		break;			/* retry */
2185	default:
2186		dprintk("RPC: %5u %s: server accept status: %x\n",
2187				task->tk_pid, __func__, n);
2188		/* Also retry */
2189	}
2190
2191out_garbage:
2192	clnt->cl_stats->rpcgarbage++;
2193	if (task->tk_garb_retry) {
2194		task->tk_garb_retry--;
2195		dprintk("RPC: %5u %s: retrying\n",
2196				task->tk_pid, __func__);
2197		task->tk_action = call_bind;
2198out_retry:
2199		return ERR_PTR(-EAGAIN);
2200	}
2201out_eio:
2202	error = -EIO;
2203out_err:
2204	rpc_exit(task, error);
2205	dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2206			__func__, error);
2207	return ERR_PTR(error);
2208out_overflow:
2209	dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2210			__func__);
2211	goto out_garbage;
2212}
2213
2214static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2215{
2216}
2217
2218static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2219{
2220	return 0;
2221}
2222
2223static struct rpc_procinfo rpcproc_null = {
2224	.p_encode = rpcproc_encode_null,
2225	.p_decode = rpcproc_decode_null,
2226};
2227
2228static int rpc_ping(struct rpc_clnt *clnt)
2229{
2230	struct rpc_message msg = {
2231		.rpc_proc = &rpcproc_null,
2232	};
2233	int err;
2234	msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2235	err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2236	put_rpccred(msg.rpc_cred);
2237	return err;
2238}
2239
2240struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2241{
2242	struct rpc_message msg = {
2243		.rpc_proc = &rpcproc_null,
2244		.rpc_cred = cred,
2245	};
2246	struct rpc_task_setup task_setup_data = {
2247		.rpc_client = clnt,
2248		.rpc_message = &msg,
2249		.callback_ops = &rpc_default_ops,
2250		.flags = flags,
2251	};
2252	return rpc_run_task(&task_setup_data);
2253}
2254EXPORT_SYMBOL_GPL(rpc_call_null);
2255
2256#ifdef RPC_DEBUG
2257static void rpc_show_header(void)
2258{
2259	printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2260		"-timeout ---ops--\n");
2261}
2262
2263static void rpc_show_task(const struct rpc_clnt *clnt,
2264			  const struct rpc_task *task)
2265{
2266	const char *rpc_waitq = "none";
2267
2268	if (RPC_IS_QUEUED(task))
2269		rpc_waitq = rpc_qname(task->tk_waitqueue);
2270
2271	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2272		task->tk_pid, task->tk_flags, task->tk_status,
2273		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2274		clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2275		task->tk_action, rpc_waitq);
2276}
2277
2278void rpc_show_tasks(struct net *net)
2279{
2280	struct rpc_clnt *clnt;
2281	struct rpc_task *task;
2282	int header = 0;
2283	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2284
2285	spin_lock(&sn->rpc_client_lock);
2286	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2287		spin_lock(&clnt->cl_lock);
2288		list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2289			if (!header) {
2290				rpc_show_header();
2291				header++;
2292			}
2293			rpc_show_task(clnt, task);
2294		}
2295		spin_unlock(&clnt->cl_lock);
2296	}
2297	spin_unlock(&sn->rpc_client_lock);
2298}
2299#endif
2300