clnt.c revision a246b0105bbd9a70a698f69baae2042996f2a0e9
1/*
2 *  linux/net/sunrpc/rpcclnt.c
3 *
4 *  This file contains the high-level RPC interface.
5 *  It is modeled as a finite state machine to support both synchronous
6 *  and asynchronous requests.
7 *
8 *  -	RPC header generation and argument serialization.
9 *  -	Credential refresh.
10 *  -	TCP connect handling.
11 *  -	Retry of operation when it is suspected the operation failed because
12 *	of uid squashing on the server, or when the credentials were stale
13 *	and need to be refreshed, or when a packet was damaged in transit.
14 *	This may be have to be moved to the VFS layer.
15 *
16 *  NB: BSD uses a more intelligent approach to guessing when a request
17 *  or reply has been lost by keeping the RTO estimate for each procedure.
18 *  We currently make do with a constant timeout value.
19 *
20 *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <asm/system.h>
25
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/slab.h>
30#include <linux/in.h>
31#include <linux/utsname.h>
32
33#include <linux/sunrpc/clnt.h>
34#include <linux/workqueue.h>
35#include <linux/sunrpc/rpc_pipe_fs.h>
36
37#include <linux/nfs.h>
38
39
40#define RPC_SLACK_SPACE		(1024)	/* total overkill */
41
42#ifdef RPC_DEBUG
43# define RPCDBG_FACILITY	RPCDBG_CALL
44#endif
45
46static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
47
48
49static void	call_start(struct rpc_task *task);
50static void	call_reserve(struct rpc_task *task);
51static void	call_reserveresult(struct rpc_task *task);
52static void	call_allocate(struct rpc_task *task);
53static void	call_encode(struct rpc_task *task);
54static void	call_decode(struct rpc_task *task);
55static void	call_bind(struct rpc_task *task);
56static void	call_bind_status(struct rpc_task *task);
57static void	call_transmit(struct rpc_task *task);
58static void	call_status(struct rpc_task *task);
59static void	call_refresh(struct rpc_task *task);
60static void	call_refreshresult(struct rpc_task *task);
61static void	call_timeout(struct rpc_task *task);
62static void	call_connect(struct rpc_task *task);
63static void	call_connect_status(struct rpc_task *task);
64static u32 *	call_header(struct rpc_task *task);
65static u32 *	call_verify(struct rpc_task *task);
66
67
68static int
69rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
70{
71	static uint32_t clntid;
72	int error;
73
74	if (dir_name == NULL)
75		return 0;
76	for (;;) {
77		snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
78				"%s/clnt%x", dir_name,
79				(unsigned int)clntid++);
80		clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
81		clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
82		if (!IS_ERR(clnt->cl_dentry))
83			return 0;
84		error = PTR_ERR(clnt->cl_dentry);
85		if (error != -EEXIST) {
86			printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
87					clnt->cl_pathname, error);
88			return error;
89		}
90	}
91}
92
93/*
94 * Create an RPC client
95 * FIXME: This should also take a flags argument (as in task->tk_flags).
96 * It's called (among others) from pmap_create_client, which may in
97 * turn be called by an async task. In this case, rpciod should not be
98 * made to sleep too long.
99 */
100struct rpc_clnt *
101rpc_new_client(struct rpc_xprt *xprt, char *servname,
102		  struct rpc_program *program, u32 vers,
103		  rpc_authflavor_t flavor)
104{
105	struct rpc_version	*version;
106	struct rpc_clnt		*clnt = NULL;
107	struct rpc_auth		*auth;
108	int err;
109	int len;
110
111	dprintk("RPC: creating %s client for %s (xprt %p)\n",
112		program->name, servname, xprt);
113
114	err = -EINVAL;
115	if (!xprt)
116		goto out_err;
117	if (vers >= program->nrvers || !(version = program->version[vers]))
118		goto out_err;
119
120	err = -ENOMEM;
121	clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL);
122	if (!clnt)
123		goto out_err;
124	memset(clnt, 0, sizeof(*clnt));
125	atomic_set(&clnt->cl_users, 0);
126	atomic_set(&clnt->cl_count, 1);
127	clnt->cl_parent = clnt;
128
129	clnt->cl_server = clnt->cl_inline_name;
130	len = strlen(servname) + 1;
131	if (len > sizeof(clnt->cl_inline_name)) {
132		char *buf = kmalloc(len, GFP_KERNEL);
133		if (buf != 0)
134			clnt->cl_server = buf;
135		else
136			len = sizeof(clnt->cl_inline_name);
137	}
138	strlcpy(clnt->cl_server, servname, len);
139
140	clnt->cl_xprt     = xprt;
141	clnt->cl_procinfo = version->procs;
142	clnt->cl_maxproc  = version->nrprocs;
143	clnt->cl_protname = program->name;
144	clnt->cl_pmap	  = &clnt->cl_pmap_default;
145	clnt->cl_port     = xprt->addr.sin_port;
146	clnt->cl_prog     = program->number;
147	clnt->cl_vers     = version->number;
148	clnt->cl_prot     = xprt->prot;
149	clnt->cl_stats    = program->stats;
150	rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait");
151
152	if (!clnt->cl_port)
153		clnt->cl_autobind = 1;
154
155	clnt->cl_rtt = &clnt->cl_rtt_default;
156	rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);
157
158	err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
159	if (err < 0)
160		goto out_no_path;
161
162	auth = rpcauth_create(flavor, clnt);
163	if (IS_ERR(auth)) {
164		printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
165				flavor);
166		err = PTR_ERR(auth);
167		goto out_no_auth;
168	}
169
170	/* save the nodename */
171	clnt->cl_nodelen = strlen(system_utsname.nodename);
172	if (clnt->cl_nodelen > UNX_MAXNODENAME)
173		clnt->cl_nodelen = UNX_MAXNODENAME;
174	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
175	return clnt;
176
177out_no_auth:
178	rpc_rmdir(clnt->cl_pathname);
179out_no_path:
180	if (clnt->cl_server != clnt->cl_inline_name)
181		kfree(clnt->cl_server);
182	kfree(clnt);
183out_err:
184	xprt_destroy(xprt);
185	return ERR_PTR(err);
186}
187
188/**
189 * Create an RPC client
190 * @xprt - pointer to xprt struct
191 * @servname - name of server
192 * @info - rpc_program
193 * @version - rpc_program version
194 * @authflavor - rpc_auth flavour to use
195 *
196 * Creates an RPC client structure, then pings the server in order to
197 * determine if it is up, and if it supports this program and version.
198 *
199 * This function should never be called by asynchronous tasks such as
200 * the portmapper.
201 */
202struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname,
203		struct rpc_program *info, u32 version, rpc_authflavor_t authflavor)
204{
205	struct rpc_clnt *clnt;
206	int err;
207
208	clnt = rpc_new_client(xprt, servname, info, version, authflavor);
209	if (IS_ERR(clnt))
210		return clnt;
211	err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
212	if (err == 0)
213		return clnt;
214	rpc_shutdown_client(clnt);
215	return ERR_PTR(err);
216}
217
218/*
219 * This function clones the RPC client structure. It allows us to share the
220 * same transport while varying parameters such as the authentication
221 * flavour.
222 */
223struct rpc_clnt *
224rpc_clone_client(struct rpc_clnt *clnt)
225{
226	struct rpc_clnt *new;
227
228	new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL);
229	if (!new)
230		goto out_no_clnt;
231	memcpy(new, clnt, sizeof(*new));
232	atomic_set(&new->cl_count, 1);
233	atomic_set(&new->cl_users, 0);
234	new->cl_parent = clnt;
235	atomic_inc(&clnt->cl_count);
236	/* Duplicate portmapper */
237	rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
238	/* Turn off autobind on clones */
239	new->cl_autobind = 0;
240	new->cl_oneshot = 0;
241	new->cl_dead = 0;
242	rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
243	if (new->cl_auth)
244		atomic_inc(&new->cl_auth->au_count);
245	new->cl_pmap		= &new->cl_pmap_default;
246	rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
247	return new;
248out_no_clnt:
249	printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
250	return ERR_PTR(-ENOMEM);
251}
252
253/*
254 * Properly shut down an RPC client, terminating all outstanding
255 * requests. Note that we must be certain that cl_oneshot and
256 * cl_dead are cleared, or else the client would be destroyed
257 * when the last task releases it.
258 */
259int
260rpc_shutdown_client(struct rpc_clnt *clnt)
261{
262	dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
263			clnt->cl_protname, clnt->cl_server,
264			atomic_read(&clnt->cl_users));
265
266	while (atomic_read(&clnt->cl_users) > 0) {
267		/* Don't let rpc_release_client destroy us */
268		clnt->cl_oneshot = 0;
269		clnt->cl_dead = 0;
270		rpc_killall_tasks(clnt);
271		sleep_on_timeout(&destroy_wait, 1*HZ);
272	}
273
274	if (atomic_read(&clnt->cl_users) < 0) {
275		printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n",
276				clnt, atomic_read(&clnt->cl_users));
277#ifdef RPC_DEBUG
278		rpc_show_tasks();
279#endif
280		BUG();
281	}
282
283	return rpc_destroy_client(clnt);
284}
285
286/*
287 * Delete an RPC client
288 */
289int
290rpc_destroy_client(struct rpc_clnt *clnt)
291{
292	if (!atomic_dec_and_test(&clnt->cl_count))
293		return 1;
294	BUG_ON(atomic_read(&clnt->cl_users) != 0);
295
296	dprintk("RPC: destroying %s client for %s\n",
297			clnt->cl_protname, clnt->cl_server);
298	if (clnt->cl_auth) {
299		rpcauth_destroy(clnt->cl_auth);
300		clnt->cl_auth = NULL;
301	}
302	if (clnt->cl_parent != clnt) {
303		rpc_destroy_client(clnt->cl_parent);
304		goto out_free;
305	}
306	if (clnt->cl_pathname[0])
307		rpc_rmdir(clnt->cl_pathname);
308	if (clnt->cl_xprt) {
309		xprt_destroy(clnt->cl_xprt);
310		clnt->cl_xprt = NULL;
311	}
312	if (clnt->cl_server != clnt->cl_inline_name)
313		kfree(clnt->cl_server);
314out_free:
315	kfree(clnt);
316	return 0;
317}
318
319/*
320 * Release an RPC client
321 */
322void
323rpc_release_client(struct rpc_clnt *clnt)
324{
325	dprintk("RPC:      rpc_release_client(%p, %d)\n",
326				clnt, atomic_read(&clnt->cl_users));
327
328	if (!atomic_dec_and_test(&clnt->cl_users))
329		return;
330	wake_up(&destroy_wait);
331	if (clnt->cl_oneshot || clnt->cl_dead)
332		rpc_destroy_client(clnt);
333}
334
335/**
336 * rpc_bind_new_program - bind a new RPC program to an existing client
337 * @old - old rpc_client
338 * @program - rpc program to set
339 * @vers - rpc program version
340 *
341 * Clones the rpc client and sets up a new RPC program. This is mainly
342 * of use for enabling different RPC programs to share the same transport.
343 * The Sun NFSv2/v3 ACL protocol can do this.
344 */
345struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
346				      struct rpc_program *program,
347				      int vers)
348{
349	struct rpc_clnt *clnt;
350	struct rpc_version *version;
351	int err;
352
353	BUG_ON(vers >= program->nrvers || !program->version[vers]);
354	version = program->version[vers];
355	clnt = rpc_clone_client(old);
356	if (IS_ERR(clnt))
357		goto out;
358	clnt->cl_procinfo = version->procs;
359	clnt->cl_maxproc  = version->nrprocs;
360	clnt->cl_protname = program->name;
361	clnt->cl_prog     = program->number;
362	clnt->cl_vers     = version->number;
363	clnt->cl_stats    = program->stats;
364	err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
365	if (err != 0) {
366		rpc_shutdown_client(clnt);
367		clnt = ERR_PTR(err);
368	}
369out:
370	return clnt;
371}
372
373/*
374 * Default callback for async RPC calls
375 */
376static void
377rpc_default_callback(struct rpc_task *task)
378{
379}
380
381/*
382 *	Export the signal mask handling for synchronous code that
383 *	sleeps on RPC calls
384 */
385#define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL))
386
387static void rpc_save_sigmask(sigset_t *oldset, int intr)
388{
389	unsigned long	sigallow = 0;
390	sigset_t sigmask;
391
392	/* Block all signals except those listed in sigallow */
393	if (intr)
394		sigallow |= RPC_INTR_SIGNALS;
395	siginitsetinv(&sigmask, sigallow);
396	sigprocmask(SIG_BLOCK, &sigmask, oldset);
397}
398
399static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
400{
401	rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
402}
403
404static inline void rpc_restore_sigmask(sigset_t *oldset)
405{
406	sigprocmask(SIG_SETMASK, oldset, NULL);
407}
408
409void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
410{
411	rpc_save_sigmask(oldset, clnt->cl_intr);
412}
413
414void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
415{
416	rpc_restore_sigmask(oldset);
417}
418
419/*
420 * New rpc_call implementation
421 */
422int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
423{
424	struct rpc_task	*task;
425	sigset_t	oldset;
426	int		status;
427
428	/* If this client is slain all further I/O fails */
429	if (clnt->cl_dead)
430		return -EIO;
431
432	BUG_ON(flags & RPC_TASK_ASYNC);
433
434	status = -ENOMEM;
435	task = rpc_new_task(clnt, NULL, flags);
436	if (task == NULL)
437		goto out;
438
439	/* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
440	rpc_task_sigmask(task, &oldset);
441
442	rpc_call_setup(task, msg, 0);
443
444	/* Set up the call info struct and execute the task */
445	if (task->tk_status == 0) {
446		status = rpc_execute(task);
447	} else {
448		status = task->tk_status;
449		rpc_release_task(task);
450	}
451
452	rpc_restore_sigmask(&oldset);
453out:
454	return status;
455}
456
457/*
458 * New rpc_call implementation
459 */
460int
461rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
462	       rpc_action callback, void *data)
463{
464	struct rpc_task	*task;
465	sigset_t	oldset;
466	int		status;
467
468	/* If this client is slain all further I/O fails */
469	if (clnt->cl_dead)
470		return -EIO;
471
472	flags |= RPC_TASK_ASYNC;
473
474	/* Create/initialize a new RPC task */
475	if (!callback)
476		callback = rpc_default_callback;
477	status = -ENOMEM;
478	if (!(task = rpc_new_task(clnt, callback, flags)))
479		goto out;
480	task->tk_calldata = data;
481
482	/* Mask signals on GSS_AUTH upcalls */
483	rpc_task_sigmask(task, &oldset);
484
485	rpc_call_setup(task, msg, 0);
486
487	/* Set up the call info struct and execute the task */
488	status = task->tk_status;
489	if (status == 0)
490		rpc_execute(task);
491	else
492		rpc_release_task(task);
493
494	rpc_restore_sigmask(&oldset);
495out:
496	return status;
497}
498
499
500void
501rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
502{
503	task->tk_msg   = *msg;
504	task->tk_flags |= flags;
505	/* Bind the user cred */
506	if (task->tk_msg.rpc_cred != NULL)
507		rpcauth_holdcred(task);
508	else
509		rpcauth_bindcred(task);
510
511	if (task->tk_status == 0)
512		task->tk_action = call_start;
513	else
514		task->tk_action = NULL;
515}
516
517void
518rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
519{
520	struct rpc_xprt *xprt = clnt->cl_xprt;
521
522	xprt->sndsize = 0;
523	if (sndsize)
524		xprt->sndsize = sndsize + RPC_SLACK_SPACE;
525	xprt->rcvsize = 0;
526	if (rcvsize)
527		xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
528	xprt->ops->set_buffer_size(xprt);
529}
530
531/*
532 * Return size of largest payload RPC client can support, in bytes
533 *
534 * For stream transports, this is one RPC record fragment (see RFC
535 * 1831), as we don't support multi-record requests yet.  For datagram
536 * transports, this is the size of an IP packet minus the IP, UDP, and
537 * RPC header sizes.
538 */
539size_t rpc_max_payload(struct rpc_clnt *clnt)
540{
541	return clnt->cl_xprt->max_payload;
542}
543EXPORT_SYMBOL(rpc_max_payload);
544
545/*
546 * Restart an (async) RPC call. Usually called from within the
547 * exit handler.
548 */
549void
550rpc_restart_call(struct rpc_task *task)
551{
552	if (RPC_ASSASSINATED(task))
553		return;
554
555	task->tk_action = call_start;
556}
557
558/*
559 * 0.  Initial state
560 *
561 *     Other FSM states can be visited zero or more times, but
562 *     this state is visited exactly once for each RPC.
563 */
564static void
565call_start(struct rpc_task *task)
566{
567	struct rpc_clnt	*clnt = task->tk_client;
568
569	dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
570		clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc,
571		(RPC_IS_ASYNC(task) ? "async" : "sync"));
572
573	/* Increment call count */
574	task->tk_msg.rpc_proc->p_count++;
575	clnt->cl_stats->rpccnt++;
576	task->tk_action = call_reserve;
577}
578
579/*
580 * 1.	Reserve an RPC call slot
581 */
582static void
583call_reserve(struct rpc_task *task)
584{
585	dprintk("RPC: %4d call_reserve\n", task->tk_pid);
586
587	if (!rpcauth_uptodatecred(task)) {
588		task->tk_action = call_refresh;
589		return;
590	}
591
592	task->tk_status  = 0;
593	task->tk_action  = call_reserveresult;
594	xprt_reserve(task);
595}
596
597/*
598 * 1b.	Grok the result of xprt_reserve()
599 */
600static void
601call_reserveresult(struct rpc_task *task)
602{
603	int status = task->tk_status;
604
605	dprintk("RPC: %4d call_reserveresult (status %d)\n",
606				task->tk_pid, task->tk_status);
607
608	/*
609	 * After a call to xprt_reserve(), we must have either
610	 * a request slot or else an error status.
611	 */
612	task->tk_status = 0;
613	if (status >= 0) {
614		if (task->tk_rqstp) {
615			task->tk_action = call_allocate;
616			return;
617		}
618
619		printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
620				__FUNCTION__, status);
621		rpc_exit(task, -EIO);
622		return;
623	}
624
625	/*
626	 * Even though there was an error, we may have acquired
627	 * a request slot somehow.  Make sure not to leak it.
628	 */
629	if (task->tk_rqstp) {
630		printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
631				__FUNCTION__, status);
632		xprt_release(task);
633	}
634
635	switch (status) {
636	case -EAGAIN:	/* woken up; retry */
637		task->tk_action = call_reserve;
638		return;
639	case -EIO:	/* probably a shutdown */
640		break;
641	default:
642		printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
643				__FUNCTION__, status);
644		break;
645	}
646	rpc_exit(task, status);
647}
648
649/*
650 * 2.	Allocate the buffer. For details, see sched.c:rpc_malloc.
651 *	(Note: buffer memory is freed in rpc_task_release).
652 */
653static void
654call_allocate(struct rpc_task *task)
655{
656	unsigned int	bufsiz;
657
658	dprintk("RPC: %4d call_allocate (status %d)\n",
659				task->tk_pid, task->tk_status);
660	task->tk_action = call_bind;
661	if (task->tk_buffer)
662		return;
663
664	/* FIXME: compute buffer requirements more exactly using
665	 * auth->au_wslack */
666	bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
667
668	if (rpc_malloc(task, bufsiz << 1) != NULL)
669		return;
670	printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
671
672	if (RPC_IS_ASYNC(task) || !signalled()) {
673		xprt_release(task);
674		task->tk_action = call_reserve;
675		rpc_delay(task, HZ>>4);
676		return;
677	}
678
679	rpc_exit(task, -ERESTARTSYS);
680}
681
682/*
683 * 3.	Encode arguments of an RPC call
684 */
685static void
686call_encode(struct rpc_task *task)
687{
688	struct rpc_clnt	*clnt = task->tk_client;
689	struct rpc_rqst	*req = task->tk_rqstp;
690	struct xdr_buf *sndbuf = &req->rq_snd_buf;
691	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
692	unsigned int	bufsiz;
693	kxdrproc_t	encode;
694	int		status;
695	u32		*p;
696
697	dprintk("RPC: %4d call_encode (status %d)\n",
698				task->tk_pid, task->tk_status);
699
700	/* Default buffer setup */
701	bufsiz = task->tk_bufsize >> 1;
702	sndbuf->head[0].iov_base = (void *)task->tk_buffer;
703	sndbuf->head[0].iov_len  = bufsiz;
704	sndbuf->tail[0].iov_len  = 0;
705	sndbuf->page_len	 = 0;
706	sndbuf->len		 = 0;
707	sndbuf->buflen		 = bufsiz;
708	rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
709	rcvbuf->head[0].iov_len  = bufsiz;
710	rcvbuf->tail[0].iov_len  = 0;
711	rcvbuf->page_len	 = 0;
712	rcvbuf->len		 = 0;
713	rcvbuf->buflen		 = bufsiz;
714
715	/* Encode header and provided arguments */
716	encode = task->tk_msg.rpc_proc->p_encode;
717	if (!(p = call_header(task))) {
718		printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
719		rpc_exit(task, -EIO);
720		return;
721	}
722	if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
723						 task->tk_msg.rpc_argp)) < 0) {
724		printk(KERN_WARNING "%s: can't encode arguments: %d\n",
725				clnt->cl_protname, -status);
726		rpc_exit(task, status);
727	}
728}
729
730/*
731 * 4.	Get the server port number if not yet set
732 */
733static void
734call_bind(struct rpc_task *task)
735{
736	struct rpc_clnt	*clnt = task->tk_client;
737
738	dprintk("RPC: %4d call_bind (status %d)\n",
739				task->tk_pid, task->tk_status);
740
741	task->tk_action = call_connect;
742	if (!clnt->cl_port) {
743		task->tk_action = call_bind_status;
744		task->tk_timeout = RPC_CONNECT_TIMEOUT;
745		rpc_getport(task, clnt);
746	}
747}
748
749/*
750 * 4a.	Sort out bind result
751 */
752static void
753call_bind_status(struct rpc_task *task)
754{
755	int status = -EACCES;
756
757	if (task->tk_status >= 0) {
758		dprintk("RPC: %4d call_bind_status (status %d)\n",
759					task->tk_pid, task->tk_status);
760		task->tk_status = 0;
761		task->tk_action = call_connect;
762		return;
763	}
764
765	switch (task->tk_status) {
766	case -EACCES:
767		dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
768				task->tk_pid);
769		break;
770	case -ETIMEDOUT:
771		dprintk("RPC: %4d rpcbind request timed out\n",
772				task->tk_pid);
773		if (RPC_IS_SOFT(task)) {
774			status = -EIO;
775			break;
776		}
777		goto retry_bind;
778	case -EPFNOSUPPORT:
779		dprintk("RPC: %4d remote rpcbind service unavailable\n",
780				task->tk_pid);
781		break;
782	case -EPROTONOSUPPORT:
783		dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
784				task->tk_pid);
785		break;
786	default:
787		dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
788				task->tk_pid, -task->tk_status);
789		status = -EIO;
790		break;
791	}
792
793	rpc_exit(task, status);
794	return;
795
796retry_bind:
797	task->tk_status = 0;
798	task->tk_action = call_bind;
799	return;
800}
801
802/*
803 * 4b.	Connect to the RPC server
804 */
805static void
806call_connect(struct rpc_task *task)
807{
808	struct rpc_xprt *xprt = task->tk_xprt;
809
810	dprintk("RPC: %4d call_connect xprt %p %s connected\n",
811			task->tk_pid, xprt,
812			(xprt_connected(xprt) ? "is" : "is not"));
813
814	task->tk_action = call_transmit;
815	if (!xprt_connected(xprt)) {
816		task->tk_action = call_connect_status;
817		if (task->tk_status < 0)
818			return;
819		xprt_connect(task);
820	}
821}
822
823/*
824 * 4c.	Sort out connect result
825 */
826static void
827call_connect_status(struct rpc_task *task)
828{
829	struct rpc_clnt *clnt = task->tk_client;
830	int status = task->tk_status;
831
832	dprintk("RPC: %5u call_connect_status (status %d)\n",
833				task->tk_pid, task->tk_status);
834
835	task->tk_status = 0;
836	if (status >= 0) {
837		clnt->cl_stats->netreconn++;
838		task->tk_action = call_transmit;
839		return;
840	}
841
842	/* Something failed: remote service port may have changed */
843	if (clnt->cl_autobind)
844		clnt->cl_port = 0;
845
846	switch (status) {
847	case -ENOTCONN:
848	case -ETIMEDOUT:
849	case -EAGAIN:
850		task->tk_action = call_bind;
851		break;
852	default:
853		rpc_exit(task, -EIO);
854		break;
855	}
856}
857
858/*
859 * 5.	Transmit the RPC request, and wait for reply
860 */
861static void
862call_transmit(struct rpc_task *task)
863{
864	dprintk("RPC: %4d call_transmit (status %d)\n",
865				task->tk_pid, task->tk_status);
866
867	task->tk_action = call_status;
868	if (task->tk_status < 0)
869		return;
870	task->tk_status = xprt_prepare_transmit(task);
871	if (task->tk_status != 0)
872		return;
873	/* Encode here so that rpcsec_gss can use correct sequence number. */
874	if (!task->tk_rqstp->rq_bytes_sent)
875		call_encode(task);
876	if (task->tk_status < 0)
877		return;
878	xprt_transmit(task);
879	if (task->tk_status < 0)
880		return;
881	if (!task->tk_msg.rpc_proc->p_decode) {
882		task->tk_action = NULL;
883		rpc_wake_up_task(task);
884	}
885}
886
887/*
888 * 6.	Sort out the RPC call status
889 */
890static void
891call_status(struct rpc_task *task)
892{
893	struct rpc_clnt	*clnt = task->tk_client;
894	struct rpc_rqst	*req = task->tk_rqstp;
895	int		status;
896
897	if (req->rq_received > 0 && !req->rq_bytes_sent)
898		task->tk_status = req->rq_received;
899
900	dprintk("RPC: %4d call_status (status %d)\n",
901				task->tk_pid, task->tk_status);
902
903	status = task->tk_status;
904	if (status >= 0) {
905		task->tk_action = call_decode;
906		return;
907	}
908
909	task->tk_status = 0;
910	switch(status) {
911	case -ETIMEDOUT:
912		task->tk_action = call_timeout;
913		break;
914	case -ECONNREFUSED:
915	case -ENOTCONN:
916		req->rq_bytes_sent = 0;
917		if (clnt->cl_autobind)
918			clnt->cl_port = 0;
919		task->tk_action = call_bind;
920		break;
921	case -EAGAIN:
922		task->tk_action = call_transmit;
923		break;
924	case -EIO:
925		/* shutdown or soft timeout */
926		rpc_exit(task, status);
927		break;
928	default:
929		if (clnt->cl_chatty)
930			printk("%s: RPC call returned error %d\n",
931			       clnt->cl_protname, -status);
932		rpc_exit(task, status);
933		break;
934	}
935}
936
937/*
938 * 6a.	Handle RPC timeout
939 * 	We do not release the request slot, so we keep using the
940 *	same XID for all retransmits.
941 */
942static void
943call_timeout(struct rpc_task *task)
944{
945	struct rpc_clnt	*clnt = task->tk_client;
946
947	if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
948		dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
949		goto retry;
950	}
951
952	dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
953	if (RPC_IS_SOFT(task)) {
954		if (clnt->cl_chatty)
955			printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
956				clnt->cl_protname, clnt->cl_server);
957		rpc_exit(task, -EIO);
958		return;
959	}
960
961	if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
962		task->tk_flags |= RPC_CALL_MAJORSEEN;
963		printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
964			clnt->cl_protname, clnt->cl_server);
965	}
966	if (clnt->cl_autobind)
967		clnt->cl_port = 0;
968
969retry:
970	clnt->cl_stats->rpcretrans++;
971	task->tk_action = call_bind;
972	task->tk_status = 0;
973}
974
975/*
976 * 7.	Decode the RPC reply
977 */
978static void
979call_decode(struct rpc_task *task)
980{
981	struct rpc_clnt	*clnt = task->tk_client;
982	struct rpc_rqst	*req = task->tk_rqstp;
983	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
984	u32		*p;
985
986	dprintk("RPC: %4d call_decode (status %d)\n",
987				task->tk_pid, task->tk_status);
988
989	if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
990		printk(KERN_NOTICE "%s: server %s OK\n",
991			clnt->cl_protname, clnt->cl_server);
992		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
993	}
994
995	if (task->tk_status < 12) {
996		if (!RPC_IS_SOFT(task)) {
997			task->tk_action = call_bind;
998			clnt->cl_stats->rpcretrans++;
999			goto out_retry;
1000		}
1001		printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
1002			clnt->cl_protname, task->tk_status);
1003		rpc_exit(task, -EIO);
1004		return;
1005	}
1006
1007	req->rq_rcv_buf.len = req->rq_private_buf.len;
1008
1009	/* Check that the softirq receive buffer is valid */
1010	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1011				sizeof(req->rq_rcv_buf)) != 0);
1012
1013	/* Verify the RPC header */
1014	if (!(p = call_verify(task))) {
1015		if (task->tk_action == NULL)
1016			return;
1017		goto out_retry;
1018	}
1019
1020	task->tk_action = NULL;
1021
1022	if (decode)
1023		task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1024						      task->tk_msg.rpc_resp);
1025	dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
1026					task->tk_status);
1027	return;
1028out_retry:
1029	req->rq_received = req->rq_private_buf.len = 0;
1030	task->tk_status = 0;
1031}
1032
1033/*
1034 * 8.	Refresh the credentials if rejected by the server
1035 */
1036static void
1037call_refresh(struct rpc_task *task)
1038{
1039	dprintk("RPC: %4d call_refresh\n", task->tk_pid);
1040
1041	xprt_release(task);	/* Must do to obtain new XID */
1042	task->tk_action = call_refreshresult;
1043	task->tk_status = 0;
1044	task->tk_client->cl_stats->rpcauthrefresh++;
1045	rpcauth_refreshcred(task);
1046}
1047
1048/*
1049 * 8a.	Process the results of a credential refresh
1050 */
1051static void
1052call_refreshresult(struct rpc_task *task)
1053{
1054	int status = task->tk_status;
1055	dprintk("RPC: %4d call_refreshresult (status %d)\n",
1056				task->tk_pid, task->tk_status);
1057
1058	task->tk_status = 0;
1059	task->tk_action = call_reserve;
1060	if (status >= 0 && rpcauth_uptodatecred(task))
1061		return;
1062	if (status == -EACCES) {
1063		rpc_exit(task, -EACCES);
1064		return;
1065	}
1066	task->tk_action = call_refresh;
1067	if (status != -ETIMEDOUT)
1068		rpc_delay(task, 3*HZ);
1069	return;
1070}
1071
1072/*
1073 * Call header serialization
1074 */
1075static u32 *
1076call_header(struct rpc_task *task)
1077{
1078	struct rpc_clnt *clnt = task->tk_client;
1079	struct rpc_xprt *xprt = clnt->cl_xprt;
1080	struct rpc_rqst	*req = task->tk_rqstp;
1081	u32		*p = req->rq_svec[0].iov_base;
1082
1083	/* FIXME: check buffer size? */
1084	if (xprt->stream)
1085		*p++ = 0;		/* fill in later */
1086	*p++ = req->rq_xid;		/* XID */
1087	*p++ = htonl(RPC_CALL);		/* CALL */
1088	*p++ = htonl(RPC_VERSION);	/* RPC version */
1089	*p++ = htonl(clnt->cl_prog);	/* program number */
1090	*p++ = htonl(clnt->cl_vers);	/* program version */
1091	*p++ = htonl(task->tk_msg.rpc_proc->p_proc);	/* procedure */
1092	p = rpcauth_marshcred(task, p);
1093	req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1094	return p;
1095}
1096
1097/*
1098 * Reply header verification
1099 */
1100static u32 *
1101call_verify(struct rpc_task *task)
1102{
1103	struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1104	int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1105	u32	*p = iov->iov_base, n;
1106	int error = -EACCES;
1107
1108	if ((len -= 3) < 0)
1109		goto out_overflow;
1110	p += 1;	/* skip XID */
1111
1112	if ((n = ntohl(*p++)) != RPC_REPLY) {
1113		printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
1114		goto out_retry;
1115	}
1116	if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1117		if (--len < 0)
1118			goto out_overflow;
1119		switch ((n = ntohl(*p++))) {
1120			case RPC_AUTH_ERROR:
1121				break;
1122			case RPC_MISMATCH:
1123				dprintk("%s: RPC call version mismatch!\n", __FUNCTION__);
1124				error = -EPROTONOSUPPORT;
1125				goto out_err;
1126			default:
1127				dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
1128				goto out_eio;
1129		}
1130		if (--len < 0)
1131			goto out_overflow;
1132		switch ((n = ntohl(*p++))) {
1133		case RPC_AUTH_REJECTEDCRED:
1134		case RPC_AUTH_REJECTEDVERF:
1135		case RPCSEC_GSS_CREDPROBLEM:
1136		case RPCSEC_GSS_CTXPROBLEM:
1137			if (!task->tk_cred_retry)
1138				break;
1139			task->tk_cred_retry--;
1140			dprintk("RPC: %4d call_verify: retry stale creds\n",
1141							task->tk_pid);
1142			rpcauth_invalcred(task);
1143			task->tk_action = call_refresh;
1144			return NULL;
1145		case RPC_AUTH_BADCRED:
1146		case RPC_AUTH_BADVERF:
1147			/* possibly garbled cred/verf? */
1148			if (!task->tk_garb_retry)
1149				break;
1150			task->tk_garb_retry--;
1151			dprintk("RPC: %4d call_verify: retry garbled creds\n",
1152							task->tk_pid);
1153			task->tk_action = call_bind;
1154			return NULL;
1155		case RPC_AUTH_TOOWEAK:
1156			printk(KERN_NOTICE "call_verify: server requires stronger "
1157			       "authentication.\n");
1158			break;
1159		default:
1160			printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
1161			error = -EIO;
1162		}
1163		dprintk("RPC: %4d call_verify: call rejected %d\n",
1164						task->tk_pid, n);
1165		goto out_err;
1166	}
1167	if (!(p = rpcauth_checkverf(task, p))) {
1168		printk(KERN_WARNING "call_verify: auth check failed\n");
1169		goto out_retry;		/* bad verifier, retry */
1170	}
1171	len = p - (u32 *)iov->iov_base - 1;
1172	if (len < 0)
1173		goto out_overflow;
1174	switch ((n = ntohl(*p++))) {
1175	case RPC_SUCCESS:
1176		return p;
1177	case RPC_PROG_UNAVAIL:
1178		dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
1179				(unsigned int)task->tk_client->cl_prog,
1180				task->tk_client->cl_server);
1181		error = -EPFNOSUPPORT;
1182		goto out_err;
1183	case RPC_PROG_MISMATCH:
1184		dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
1185				(unsigned int)task->tk_client->cl_prog,
1186				(unsigned int)task->tk_client->cl_vers,
1187				task->tk_client->cl_server);
1188		error = -EPROTONOSUPPORT;
1189		goto out_err;
1190	case RPC_PROC_UNAVAIL:
1191		dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1192				task->tk_msg.rpc_proc,
1193				task->tk_client->cl_prog,
1194				task->tk_client->cl_vers,
1195				task->tk_client->cl_server);
1196		error = -EOPNOTSUPP;
1197		goto out_err;
1198	case RPC_GARBAGE_ARGS:
1199		dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__);
1200		break;			/* retry */
1201	default:
1202		printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
1203		/* Also retry */
1204	}
1205
1206out_retry:
1207	task->tk_client->cl_stats->rpcgarbage++;
1208	if (task->tk_garb_retry) {
1209		task->tk_garb_retry--;
1210		dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
1211		task->tk_action = call_bind;
1212		return NULL;
1213	}
1214	printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__);
1215out_eio:
1216	error = -EIO;
1217out_err:
1218	rpc_exit(task, error);
1219	return NULL;
1220out_overflow:
1221	printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
1222	goto out_retry;
1223}
1224
1225static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj)
1226{
1227	return 0;
1228}
1229
1230static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj)
1231{
1232	return 0;
1233}
1234
1235static struct rpc_procinfo rpcproc_null = {
1236	.p_encode = rpcproc_encode_null,
1237	.p_decode = rpcproc_decode_null,
1238};
1239
1240int rpc_ping(struct rpc_clnt *clnt, int flags)
1241{
1242	struct rpc_message msg = {
1243		.rpc_proc = &rpcproc_null,
1244	};
1245	int err;
1246	msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1247	err = rpc_call_sync(clnt, &msg, flags);
1248	put_rpccred(msg.rpc_cred);
1249	return err;
1250}
1251