clnt.c revision f134585a7343d71f9be7f0cf97e2145f21dd10c6
1/* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24#include <asm/system.h> 25 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/slab.h> 30#include <linux/utsname.h> 31 32#include <linux/sunrpc/clnt.h> 33#include <linux/workqueue.h> 34#include <linux/sunrpc/rpc_pipe_fs.h> 35 36#include <linux/nfs.h> 37 38 39#define RPC_SLACK_SPACE (1024) /* total overkill */ 40 41#ifdef RPC_DEBUG 42# define RPCDBG_FACILITY RPCDBG_CALL 43#endif 44 45static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 46 47 48static void call_start(struct rpc_task *task); 49static void call_reserve(struct rpc_task *task); 50static void call_reserveresult(struct rpc_task *task); 51static void call_allocate(struct rpc_task *task); 52static void call_encode(struct rpc_task *task); 53static void call_decode(struct rpc_task *task); 54static void call_bind(struct rpc_task *task); 55static void call_bind_status(struct rpc_task *task); 56static void call_transmit(struct rpc_task *task); 57static void call_status(struct rpc_task *task); 58static void call_refresh(struct rpc_task *task); 59static void call_refreshresult(struct rpc_task *task); 60static void call_timeout(struct rpc_task *task); 61static void call_connect(struct rpc_task *task); 62static void call_connect_status(struct rpc_task *task); 63static u32 * call_header(struct rpc_task *task); 64static u32 * call_verify(struct rpc_task *task); 65 66 67static int 68rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 69{ 70 static uint32_t clntid; 71 int error; 72 73 if (dir_name == NULL) 74 return 0; 75 for (;;) { 76 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 77 "%s/clnt%x", dir_name, 78 (unsigned int)clntid++); 79 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 80 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 81 if (!IS_ERR(clnt->cl_dentry)) 82 return 0; 83 error = PTR_ERR(clnt->cl_dentry); 84 if (error != -EEXIST) { 85 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 86 clnt->cl_pathname, error); 87 return error; 88 } 89 } 90} 91 92/* 93 * Create an RPC client 94 * FIXME: This should also take a flags argument (as in task->tk_flags). 95 * It's called (among others) from pmap_create_client, which may in 96 * turn be called by an async task. In this case, rpciod should not be 97 * made to sleep too long. 98 */ 99struct rpc_clnt * 100rpc_new_client(struct rpc_xprt *xprt, char *servname, 101 struct rpc_program *program, u32 vers, 102 rpc_authflavor_t flavor) 103{ 104 struct rpc_version *version; 105 struct rpc_clnt *clnt = NULL; 106 struct rpc_auth *auth; 107 int err; 108 int len; 109 110 dprintk("RPC: creating %s client for %s (xprt %p)\n", 111 program->name, servname, xprt); 112 113 err = -EINVAL; 114 if (!xprt) 115 goto out_err; 116 if (vers >= program->nrvers || !(version = program->version[vers])) 117 goto out_err; 118 119 err = -ENOMEM; 120 clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL); 121 if (!clnt) 122 goto out_err; 123 memset(clnt, 0, sizeof(*clnt)); 124 atomic_set(&clnt->cl_users, 0); 125 atomic_set(&clnt->cl_count, 1); 126 clnt->cl_parent = clnt; 127 128 clnt->cl_server = clnt->cl_inline_name; 129 len = strlen(servname) + 1; 130 if (len > sizeof(clnt->cl_inline_name)) { 131 char *buf = kmalloc(len, GFP_KERNEL); 132 if (buf != 0) 133 clnt->cl_server = buf; 134 else 135 len = sizeof(clnt->cl_inline_name); 136 } 137 strlcpy(clnt->cl_server, servname, len); 138 139 clnt->cl_xprt = xprt; 140 clnt->cl_procinfo = version->procs; 141 clnt->cl_maxproc = version->nrprocs; 142 clnt->cl_protname = program->name; 143 clnt->cl_pmap = &clnt->cl_pmap_default; 144 clnt->cl_port = xprt->addr.sin_port; 145 clnt->cl_prog = program->number; 146 clnt->cl_vers = version->number; 147 clnt->cl_prot = xprt->prot; 148 clnt->cl_stats = program->stats; 149 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 150 151 if (!clnt->cl_port) 152 clnt->cl_autobind = 1; 153 154 clnt->cl_rtt = &clnt->cl_rtt_default; 155 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 156 157 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 158 if (err < 0) 159 goto out_no_path; 160 161 auth = rpcauth_create(flavor, clnt); 162 if (IS_ERR(auth)) { 163 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 164 flavor); 165 err = PTR_ERR(auth); 166 goto out_no_auth; 167 } 168 169 /* save the nodename */ 170 clnt->cl_nodelen = strlen(system_utsname.nodename); 171 if (clnt->cl_nodelen > UNX_MAXNODENAME) 172 clnt->cl_nodelen = UNX_MAXNODENAME; 173 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 174 return clnt; 175 176out_no_auth: 177 rpc_rmdir(clnt->cl_pathname); 178out_no_path: 179 if (clnt->cl_server != clnt->cl_inline_name) 180 kfree(clnt->cl_server); 181 kfree(clnt); 182out_err: 183 xprt_destroy(xprt); 184 return ERR_PTR(err); 185} 186 187/** 188 * Create an RPC client 189 * @xprt - pointer to xprt struct 190 * @servname - name of server 191 * @info - rpc_program 192 * @version - rpc_program version 193 * @authflavor - rpc_auth flavour to use 194 * 195 * Creates an RPC client structure, then pings the server in order to 196 * determine if it is up, and if it supports this program and version. 197 * 198 * This function should never be called by asynchronous tasks such as 199 * the portmapper. 200 */ 201struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 202 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 203{ 204 struct rpc_clnt *clnt; 205 int err; 206 207 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 208 if (IS_ERR(clnt)) 209 return clnt; 210 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 211 if (err == 0) 212 return clnt; 213 rpc_shutdown_client(clnt); 214 return ERR_PTR(err); 215} 216 217/* 218 * This function clones the RPC client structure. It allows us to share the 219 * same transport while varying parameters such as the authentication 220 * flavour. 221 */ 222struct rpc_clnt * 223rpc_clone_client(struct rpc_clnt *clnt) 224{ 225 struct rpc_clnt *new; 226 227 new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL); 228 if (!new) 229 goto out_no_clnt; 230 memcpy(new, clnt, sizeof(*new)); 231 atomic_set(&new->cl_count, 1); 232 atomic_set(&new->cl_users, 0); 233 new->cl_parent = clnt; 234 atomic_inc(&clnt->cl_count); 235 /* Duplicate portmapper */ 236 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 237 /* Turn off autobind on clones */ 238 new->cl_autobind = 0; 239 new->cl_oneshot = 0; 240 new->cl_dead = 0; 241 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 242 if (new->cl_auth) 243 atomic_inc(&new->cl_auth->au_count); 244 new->cl_pmap = &new->cl_pmap_default; 245 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 246 return new; 247out_no_clnt: 248 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 249 return ERR_PTR(-ENOMEM); 250} 251 252/* 253 * Properly shut down an RPC client, terminating all outstanding 254 * requests. Note that we must be certain that cl_oneshot and 255 * cl_dead are cleared, or else the client would be destroyed 256 * when the last task releases it. 257 */ 258int 259rpc_shutdown_client(struct rpc_clnt *clnt) 260{ 261 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 262 clnt->cl_protname, clnt->cl_server, 263 atomic_read(&clnt->cl_users)); 264 265 while (atomic_read(&clnt->cl_users) > 0) { 266 /* Don't let rpc_release_client destroy us */ 267 clnt->cl_oneshot = 0; 268 clnt->cl_dead = 0; 269 rpc_killall_tasks(clnt); 270 sleep_on_timeout(&destroy_wait, 1*HZ); 271 } 272 273 if (atomic_read(&clnt->cl_users) < 0) { 274 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 275 clnt, atomic_read(&clnt->cl_users)); 276#ifdef RPC_DEBUG 277 rpc_show_tasks(); 278#endif 279 BUG(); 280 } 281 282 return rpc_destroy_client(clnt); 283} 284 285/* 286 * Delete an RPC client 287 */ 288int 289rpc_destroy_client(struct rpc_clnt *clnt) 290{ 291 if (!atomic_dec_and_test(&clnt->cl_count)) 292 return 1; 293 BUG_ON(atomic_read(&clnt->cl_users) != 0); 294 295 dprintk("RPC: destroying %s client for %s\n", 296 clnt->cl_protname, clnt->cl_server); 297 if (clnt->cl_auth) { 298 rpcauth_destroy(clnt->cl_auth); 299 clnt->cl_auth = NULL; 300 } 301 if (clnt->cl_parent != clnt) { 302 rpc_destroy_client(clnt->cl_parent); 303 goto out_free; 304 } 305 if (clnt->cl_pathname[0]) 306 rpc_rmdir(clnt->cl_pathname); 307 if (clnt->cl_xprt) { 308 xprt_destroy(clnt->cl_xprt); 309 clnt->cl_xprt = NULL; 310 } 311 if (clnt->cl_server != clnt->cl_inline_name) 312 kfree(clnt->cl_server); 313out_free: 314 kfree(clnt); 315 return 0; 316} 317 318/* 319 * Release an RPC client 320 */ 321void 322rpc_release_client(struct rpc_clnt *clnt) 323{ 324 dprintk("RPC: rpc_release_client(%p, %d)\n", 325 clnt, atomic_read(&clnt->cl_users)); 326 327 if (!atomic_dec_and_test(&clnt->cl_users)) 328 return; 329 wake_up(&destroy_wait); 330 if (clnt->cl_oneshot || clnt->cl_dead) 331 rpc_destroy_client(clnt); 332} 333 334/** 335 * rpc_bind_new_program - bind a new RPC program to an existing client 336 * @old - old rpc_client 337 * @program - rpc program to set 338 * @vers - rpc program version 339 * 340 * Clones the rpc client and sets up a new RPC program. This is mainly 341 * of use for enabling different RPC programs to share the same transport. 342 * The Sun NFSv2/v3 ACL protocol can do this. 343 */ 344struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 345 struct rpc_program *program, 346 int vers) 347{ 348 struct rpc_clnt *clnt; 349 struct rpc_version *version; 350 int err; 351 352 BUG_ON(vers >= program->nrvers || !program->version[vers]); 353 version = program->version[vers]; 354 clnt = rpc_clone_client(old); 355 if (IS_ERR(clnt)) 356 goto out; 357 clnt->cl_procinfo = version->procs; 358 clnt->cl_maxproc = version->nrprocs; 359 clnt->cl_protname = program->name; 360 clnt->cl_prog = program->number; 361 clnt->cl_vers = version->number; 362 clnt->cl_stats = program->stats; 363 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 364 if (err != 0) { 365 rpc_shutdown_client(clnt); 366 clnt = ERR_PTR(err); 367 } 368out: 369 return clnt; 370} 371 372/* 373 * Default callback for async RPC calls 374 */ 375static void 376rpc_default_callback(struct rpc_task *task) 377{ 378} 379 380/* 381 * Export the signal mask handling for synchronous code that 382 * sleeps on RPC calls 383 */ 384#define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL)) 385 386static void rpc_save_sigmask(sigset_t *oldset, int intr) 387{ 388 unsigned long sigallow = 0; 389 sigset_t sigmask; 390 391 /* Block all signals except those listed in sigallow */ 392 if (intr) 393 sigallow |= RPC_INTR_SIGNALS; 394 siginitsetinv(&sigmask, sigallow); 395 sigprocmask(SIG_BLOCK, &sigmask, oldset); 396} 397 398static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 399{ 400 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 401} 402 403static inline void rpc_restore_sigmask(sigset_t *oldset) 404{ 405 sigprocmask(SIG_SETMASK, oldset, NULL); 406} 407 408void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 409{ 410 rpc_save_sigmask(oldset, clnt->cl_intr); 411} 412 413void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 414{ 415 rpc_restore_sigmask(oldset); 416} 417 418/* 419 * New rpc_call implementation 420 */ 421int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 422{ 423 struct rpc_task *task; 424 sigset_t oldset; 425 int status; 426 427 /* If this client is slain all further I/O fails */ 428 if (clnt->cl_dead) 429 return -EIO; 430 431 BUG_ON(flags & RPC_TASK_ASYNC); 432 433 status = -ENOMEM; 434 task = rpc_new_task(clnt, NULL, flags); 435 if (task == NULL) 436 goto out; 437 438 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 439 rpc_task_sigmask(task, &oldset); 440 441 rpc_call_setup(task, msg, 0); 442 443 /* Set up the call info struct and execute the task */ 444 if (task->tk_status == 0) { 445 status = rpc_execute(task); 446 } else { 447 status = task->tk_status; 448 rpc_release_task(task); 449 } 450 451 rpc_restore_sigmask(&oldset); 452out: 453 return status; 454} 455 456/* 457 * New rpc_call implementation 458 */ 459int 460rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 461 rpc_action callback, void *data) 462{ 463 struct rpc_task *task; 464 sigset_t oldset; 465 int status; 466 467 /* If this client is slain all further I/O fails */ 468 if (clnt->cl_dead) 469 return -EIO; 470 471 flags |= RPC_TASK_ASYNC; 472 473 /* Create/initialize a new RPC task */ 474 if (!callback) 475 callback = rpc_default_callback; 476 status = -ENOMEM; 477 if (!(task = rpc_new_task(clnt, callback, flags))) 478 goto out; 479 task->tk_calldata = data; 480 481 /* Mask signals on GSS_AUTH upcalls */ 482 rpc_task_sigmask(task, &oldset); 483 484 rpc_call_setup(task, msg, 0); 485 486 /* Set up the call info struct and execute the task */ 487 status = task->tk_status; 488 if (status == 0) 489 rpc_execute(task); 490 else 491 rpc_release_task(task); 492 493 rpc_restore_sigmask(&oldset); 494out: 495 return status; 496} 497 498 499void 500rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 501{ 502 task->tk_msg = *msg; 503 task->tk_flags |= flags; 504 /* Bind the user cred */ 505 if (task->tk_msg.rpc_cred != NULL) 506 rpcauth_holdcred(task); 507 else 508 rpcauth_bindcred(task); 509 510 if (task->tk_status == 0) 511 task->tk_action = call_start; 512 else 513 task->tk_action = NULL; 514} 515 516void 517rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 518{ 519 struct rpc_xprt *xprt = clnt->cl_xprt; 520 if (xprt->ops->set_buffer_size) 521 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 522} 523 524/* 525 * Return size of largest payload RPC client can support, in bytes 526 * 527 * For stream transports, this is one RPC record fragment (see RFC 528 * 1831), as we don't support multi-record requests yet. For datagram 529 * transports, this is the size of an IP packet minus the IP, UDP, and 530 * RPC header sizes. 531 */ 532size_t rpc_max_payload(struct rpc_clnt *clnt) 533{ 534 return clnt->cl_xprt->max_payload; 535} 536EXPORT_SYMBOL(rpc_max_payload); 537 538/* 539 * Restart an (async) RPC call. Usually called from within the 540 * exit handler. 541 */ 542void 543rpc_restart_call(struct rpc_task *task) 544{ 545 if (RPC_ASSASSINATED(task)) 546 return; 547 548 task->tk_action = call_start; 549} 550 551/* 552 * 0. Initial state 553 * 554 * Other FSM states can be visited zero or more times, but 555 * this state is visited exactly once for each RPC. 556 */ 557static void 558call_start(struct rpc_task *task) 559{ 560 struct rpc_clnt *clnt = task->tk_client; 561 562 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 563 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 564 (RPC_IS_ASYNC(task) ? "async" : "sync")); 565 566 /* Increment call count */ 567 task->tk_msg.rpc_proc->p_count++; 568 clnt->cl_stats->rpccnt++; 569 task->tk_action = call_reserve; 570} 571 572/* 573 * 1. Reserve an RPC call slot 574 */ 575static void 576call_reserve(struct rpc_task *task) 577{ 578 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 579 580 if (!rpcauth_uptodatecred(task)) { 581 task->tk_action = call_refresh; 582 return; 583 } 584 585 task->tk_status = 0; 586 task->tk_action = call_reserveresult; 587 xprt_reserve(task); 588} 589 590/* 591 * 1b. Grok the result of xprt_reserve() 592 */ 593static void 594call_reserveresult(struct rpc_task *task) 595{ 596 int status = task->tk_status; 597 598 dprintk("RPC: %4d call_reserveresult (status %d)\n", 599 task->tk_pid, task->tk_status); 600 601 /* 602 * After a call to xprt_reserve(), we must have either 603 * a request slot or else an error status. 604 */ 605 task->tk_status = 0; 606 if (status >= 0) { 607 if (task->tk_rqstp) { 608 task->tk_action = call_allocate; 609 return; 610 } 611 612 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 613 __FUNCTION__, status); 614 rpc_exit(task, -EIO); 615 return; 616 } 617 618 /* 619 * Even though there was an error, we may have acquired 620 * a request slot somehow. Make sure not to leak it. 621 */ 622 if (task->tk_rqstp) { 623 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 624 __FUNCTION__, status); 625 xprt_release(task); 626 } 627 628 switch (status) { 629 case -EAGAIN: /* woken up; retry */ 630 task->tk_action = call_reserve; 631 return; 632 case -EIO: /* probably a shutdown */ 633 break; 634 default: 635 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 636 __FUNCTION__, status); 637 break; 638 } 639 rpc_exit(task, status); 640} 641 642/* 643 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 644 * (Note: buffer memory is freed in rpc_task_release). 645 */ 646static void 647call_allocate(struct rpc_task *task) 648{ 649 unsigned int bufsiz; 650 651 dprintk("RPC: %4d call_allocate (status %d)\n", 652 task->tk_pid, task->tk_status); 653 task->tk_action = call_bind; 654 if (task->tk_buffer) 655 return; 656 657 /* FIXME: compute buffer requirements more exactly using 658 * auth->au_wslack */ 659 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 660 661 if (rpc_malloc(task, bufsiz << 1) != NULL) 662 return; 663 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 664 665 if (RPC_IS_ASYNC(task) || !signalled()) { 666 xprt_release(task); 667 task->tk_action = call_reserve; 668 rpc_delay(task, HZ>>4); 669 return; 670 } 671 672 rpc_exit(task, -ERESTARTSYS); 673} 674 675/* 676 * 3. Encode arguments of an RPC call 677 */ 678static void 679call_encode(struct rpc_task *task) 680{ 681 struct rpc_clnt *clnt = task->tk_client; 682 struct rpc_rqst *req = task->tk_rqstp; 683 struct xdr_buf *sndbuf = &req->rq_snd_buf; 684 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 685 unsigned int bufsiz; 686 kxdrproc_t encode; 687 int status; 688 u32 *p; 689 690 dprintk("RPC: %4d call_encode (status %d)\n", 691 task->tk_pid, task->tk_status); 692 693 /* Default buffer setup */ 694 bufsiz = task->tk_bufsize >> 1; 695 sndbuf->head[0].iov_base = (void *)task->tk_buffer; 696 sndbuf->head[0].iov_len = bufsiz; 697 sndbuf->tail[0].iov_len = 0; 698 sndbuf->page_len = 0; 699 sndbuf->len = 0; 700 sndbuf->buflen = bufsiz; 701 rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz); 702 rcvbuf->head[0].iov_len = bufsiz; 703 rcvbuf->tail[0].iov_len = 0; 704 rcvbuf->page_len = 0; 705 rcvbuf->len = 0; 706 rcvbuf->buflen = bufsiz; 707 708 /* Encode header and provided arguments */ 709 encode = task->tk_msg.rpc_proc->p_encode; 710 if (!(p = call_header(task))) { 711 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 712 rpc_exit(task, -EIO); 713 return; 714 } 715 if (encode && (status = rpcauth_wrap_req(task, encode, req, p, 716 task->tk_msg.rpc_argp)) < 0) { 717 printk(KERN_WARNING "%s: can't encode arguments: %d\n", 718 clnt->cl_protname, -status); 719 rpc_exit(task, status); 720 } 721} 722 723/* 724 * 4. Get the server port number if not yet set 725 */ 726static void 727call_bind(struct rpc_task *task) 728{ 729 struct rpc_clnt *clnt = task->tk_client; 730 731 dprintk("RPC: %4d call_bind (status %d)\n", 732 task->tk_pid, task->tk_status); 733 734 task->tk_action = call_connect; 735 if (!clnt->cl_port) { 736 task->tk_action = call_bind_status; 737 task->tk_timeout = task->tk_xprt->bind_timeout; 738 rpc_getport(task, clnt); 739 } 740} 741 742/* 743 * 4a. Sort out bind result 744 */ 745static void 746call_bind_status(struct rpc_task *task) 747{ 748 int status = -EACCES; 749 750 if (task->tk_status >= 0) { 751 dprintk("RPC: %4d call_bind_status (status %d)\n", 752 task->tk_pid, task->tk_status); 753 task->tk_status = 0; 754 task->tk_action = call_connect; 755 return; 756 } 757 758 switch (task->tk_status) { 759 case -EACCES: 760 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 761 task->tk_pid); 762 break; 763 case -ETIMEDOUT: 764 dprintk("RPC: %4d rpcbind request timed out\n", 765 task->tk_pid); 766 if (RPC_IS_SOFT(task)) { 767 status = -EIO; 768 break; 769 } 770 goto retry_bind; 771 case -EPFNOSUPPORT: 772 dprintk("RPC: %4d remote rpcbind service unavailable\n", 773 task->tk_pid); 774 break; 775 case -EPROTONOSUPPORT: 776 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 777 task->tk_pid); 778 break; 779 default: 780 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 781 task->tk_pid, -task->tk_status); 782 status = -EIO; 783 break; 784 } 785 786 rpc_exit(task, status); 787 return; 788 789retry_bind: 790 task->tk_status = 0; 791 task->tk_action = call_bind; 792 return; 793} 794 795/* 796 * 4b. Connect to the RPC server 797 */ 798static void 799call_connect(struct rpc_task *task) 800{ 801 struct rpc_xprt *xprt = task->tk_xprt; 802 803 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 804 task->tk_pid, xprt, 805 (xprt_connected(xprt) ? "is" : "is not")); 806 807 task->tk_action = call_transmit; 808 if (!xprt_connected(xprt)) { 809 task->tk_action = call_connect_status; 810 if (task->tk_status < 0) 811 return; 812 xprt_connect(task); 813 } 814} 815 816/* 817 * 4c. Sort out connect result 818 */ 819static void 820call_connect_status(struct rpc_task *task) 821{ 822 struct rpc_clnt *clnt = task->tk_client; 823 int status = task->tk_status; 824 825 dprintk("RPC: %5u call_connect_status (status %d)\n", 826 task->tk_pid, task->tk_status); 827 828 task->tk_status = 0; 829 if (status >= 0) { 830 clnt->cl_stats->netreconn++; 831 task->tk_action = call_transmit; 832 return; 833 } 834 835 /* Something failed: remote service port may have changed */ 836 if (clnt->cl_autobind) 837 clnt->cl_port = 0; 838 839 switch (status) { 840 case -ENOTCONN: 841 case -ETIMEDOUT: 842 case -EAGAIN: 843 task->tk_action = call_bind; 844 break; 845 default: 846 rpc_exit(task, -EIO); 847 break; 848 } 849} 850 851/* 852 * 5. Transmit the RPC request, and wait for reply 853 */ 854static void 855call_transmit(struct rpc_task *task) 856{ 857 dprintk("RPC: %4d call_transmit (status %d)\n", 858 task->tk_pid, task->tk_status); 859 860 task->tk_action = call_status; 861 if (task->tk_status < 0) 862 return; 863 task->tk_status = xprt_prepare_transmit(task); 864 if (task->tk_status != 0) 865 return; 866 /* Encode here so that rpcsec_gss can use correct sequence number. */ 867 if (!task->tk_rqstp->rq_bytes_sent) 868 call_encode(task); 869 if (task->tk_status < 0) 870 return; 871 xprt_transmit(task); 872 if (task->tk_status < 0) 873 return; 874 if (!task->tk_msg.rpc_proc->p_decode) { 875 task->tk_action = NULL; 876 rpc_wake_up_task(task); 877 } 878} 879 880/* 881 * 6. Sort out the RPC call status 882 */ 883static void 884call_status(struct rpc_task *task) 885{ 886 struct rpc_clnt *clnt = task->tk_client; 887 struct rpc_rqst *req = task->tk_rqstp; 888 int status; 889 890 if (req->rq_received > 0 && !req->rq_bytes_sent) 891 task->tk_status = req->rq_received; 892 893 dprintk("RPC: %4d call_status (status %d)\n", 894 task->tk_pid, task->tk_status); 895 896 status = task->tk_status; 897 if (status >= 0) { 898 task->tk_action = call_decode; 899 return; 900 } 901 902 task->tk_status = 0; 903 switch(status) { 904 case -ETIMEDOUT: 905 task->tk_action = call_timeout; 906 break; 907 case -ECONNREFUSED: 908 case -ENOTCONN: 909 req->rq_bytes_sent = 0; 910 if (clnt->cl_autobind) 911 clnt->cl_port = 0; 912 task->tk_action = call_bind; 913 break; 914 case -EAGAIN: 915 task->tk_action = call_transmit; 916 break; 917 case -EIO: 918 /* shutdown or soft timeout */ 919 rpc_exit(task, status); 920 break; 921 default: 922 if (clnt->cl_chatty) 923 printk("%s: RPC call returned error %d\n", 924 clnt->cl_protname, -status); 925 rpc_exit(task, status); 926 break; 927 } 928} 929 930/* 931 * 6a. Handle RPC timeout 932 * We do not release the request slot, so we keep using the 933 * same XID for all retransmits. 934 */ 935static void 936call_timeout(struct rpc_task *task) 937{ 938 struct rpc_clnt *clnt = task->tk_client; 939 940 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 941 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 942 goto retry; 943 } 944 945 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 946 if (RPC_IS_SOFT(task)) { 947 if (clnt->cl_chatty) 948 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 949 clnt->cl_protname, clnt->cl_server); 950 rpc_exit(task, -EIO); 951 return; 952 } 953 954 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) { 955 task->tk_flags |= RPC_CALL_MAJORSEEN; 956 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 957 clnt->cl_protname, clnt->cl_server); 958 } 959 if (clnt->cl_autobind) 960 clnt->cl_port = 0; 961 962retry: 963 clnt->cl_stats->rpcretrans++; 964 task->tk_action = call_bind; 965 task->tk_status = 0; 966} 967 968/* 969 * 7. Decode the RPC reply 970 */ 971static void 972call_decode(struct rpc_task *task) 973{ 974 struct rpc_clnt *clnt = task->tk_client; 975 struct rpc_rqst *req = task->tk_rqstp; 976 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 977 u32 *p; 978 979 dprintk("RPC: %4d call_decode (status %d)\n", 980 task->tk_pid, task->tk_status); 981 982 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) { 983 printk(KERN_NOTICE "%s: server %s OK\n", 984 clnt->cl_protname, clnt->cl_server); 985 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 986 } 987 988 if (task->tk_status < 12) { 989 if (!RPC_IS_SOFT(task)) { 990 task->tk_action = call_bind; 991 clnt->cl_stats->rpcretrans++; 992 goto out_retry; 993 } 994 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 995 clnt->cl_protname, task->tk_status); 996 rpc_exit(task, -EIO); 997 return; 998 } 999 1000 req->rq_rcv_buf.len = req->rq_private_buf.len; 1001 1002 /* Check that the softirq receive buffer is valid */ 1003 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1004 sizeof(req->rq_rcv_buf)) != 0); 1005 1006 /* Verify the RPC header */ 1007 if (!(p = call_verify(task))) { 1008 if (task->tk_action == NULL) 1009 return; 1010 goto out_retry; 1011 } 1012 1013 task->tk_action = NULL; 1014 1015 if (decode) 1016 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1017 task->tk_msg.rpc_resp); 1018 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1019 task->tk_status); 1020 return; 1021out_retry: 1022 req->rq_received = req->rq_private_buf.len = 0; 1023 task->tk_status = 0; 1024} 1025 1026/* 1027 * 8. Refresh the credentials if rejected by the server 1028 */ 1029static void 1030call_refresh(struct rpc_task *task) 1031{ 1032 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1033 1034 xprt_release(task); /* Must do to obtain new XID */ 1035 task->tk_action = call_refreshresult; 1036 task->tk_status = 0; 1037 task->tk_client->cl_stats->rpcauthrefresh++; 1038 rpcauth_refreshcred(task); 1039} 1040 1041/* 1042 * 8a. Process the results of a credential refresh 1043 */ 1044static void 1045call_refreshresult(struct rpc_task *task) 1046{ 1047 int status = task->tk_status; 1048 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1049 task->tk_pid, task->tk_status); 1050 1051 task->tk_status = 0; 1052 task->tk_action = call_reserve; 1053 if (status >= 0 && rpcauth_uptodatecred(task)) 1054 return; 1055 if (status == -EACCES) { 1056 rpc_exit(task, -EACCES); 1057 return; 1058 } 1059 task->tk_action = call_refresh; 1060 if (status != -ETIMEDOUT) 1061 rpc_delay(task, 3*HZ); 1062 return; 1063} 1064 1065/* 1066 * Call header serialization 1067 */ 1068static u32 * 1069call_header(struct rpc_task *task) 1070{ 1071 struct rpc_clnt *clnt = task->tk_client; 1072 struct rpc_rqst *req = task->tk_rqstp; 1073 u32 *p = req->rq_svec[0].iov_base; 1074 1075 /* FIXME: check buffer size? */ 1076 1077 p = xprt_skip_transport_header(task->tk_xprt, p); 1078 *p++ = req->rq_xid; /* XID */ 1079 *p++ = htonl(RPC_CALL); /* CALL */ 1080 *p++ = htonl(RPC_VERSION); /* RPC version */ 1081 *p++ = htonl(clnt->cl_prog); /* program number */ 1082 *p++ = htonl(clnt->cl_vers); /* program version */ 1083 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1084 p = rpcauth_marshcred(task, p); 1085 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1086 return p; 1087} 1088 1089/* 1090 * Reply header verification 1091 */ 1092static u32 * 1093call_verify(struct rpc_task *task) 1094{ 1095 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1096 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1097 u32 *p = iov->iov_base, n; 1098 int error = -EACCES; 1099 1100 if ((len -= 3) < 0) 1101 goto out_overflow; 1102 p += 1; /* skip XID */ 1103 1104 if ((n = ntohl(*p++)) != RPC_REPLY) { 1105 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1106 goto out_retry; 1107 } 1108 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1109 if (--len < 0) 1110 goto out_overflow; 1111 switch ((n = ntohl(*p++))) { 1112 case RPC_AUTH_ERROR: 1113 break; 1114 case RPC_MISMATCH: 1115 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1116 error = -EPROTONOSUPPORT; 1117 goto out_err; 1118 default: 1119 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1120 goto out_eio; 1121 } 1122 if (--len < 0) 1123 goto out_overflow; 1124 switch ((n = ntohl(*p++))) { 1125 case RPC_AUTH_REJECTEDCRED: 1126 case RPC_AUTH_REJECTEDVERF: 1127 case RPCSEC_GSS_CREDPROBLEM: 1128 case RPCSEC_GSS_CTXPROBLEM: 1129 if (!task->tk_cred_retry) 1130 break; 1131 task->tk_cred_retry--; 1132 dprintk("RPC: %4d call_verify: retry stale creds\n", 1133 task->tk_pid); 1134 rpcauth_invalcred(task); 1135 task->tk_action = call_refresh; 1136 return NULL; 1137 case RPC_AUTH_BADCRED: 1138 case RPC_AUTH_BADVERF: 1139 /* possibly garbled cred/verf? */ 1140 if (!task->tk_garb_retry) 1141 break; 1142 task->tk_garb_retry--; 1143 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1144 task->tk_pid); 1145 task->tk_action = call_bind; 1146 return NULL; 1147 case RPC_AUTH_TOOWEAK: 1148 printk(KERN_NOTICE "call_verify: server requires stronger " 1149 "authentication.\n"); 1150 break; 1151 default: 1152 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1153 error = -EIO; 1154 } 1155 dprintk("RPC: %4d call_verify: call rejected %d\n", 1156 task->tk_pid, n); 1157 goto out_err; 1158 } 1159 if (!(p = rpcauth_checkverf(task, p))) { 1160 printk(KERN_WARNING "call_verify: auth check failed\n"); 1161 goto out_retry; /* bad verifier, retry */ 1162 } 1163 len = p - (u32 *)iov->iov_base - 1; 1164 if (len < 0) 1165 goto out_overflow; 1166 switch ((n = ntohl(*p++))) { 1167 case RPC_SUCCESS: 1168 return p; 1169 case RPC_PROG_UNAVAIL: 1170 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1171 (unsigned int)task->tk_client->cl_prog, 1172 task->tk_client->cl_server); 1173 error = -EPFNOSUPPORT; 1174 goto out_err; 1175 case RPC_PROG_MISMATCH: 1176 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1177 (unsigned int)task->tk_client->cl_prog, 1178 (unsigned int)task->tk_client->cl_vers, 1179 task->tk_client->cl_server); 1180 error = -EPROTONOSUPPORT; 1181 goto out_err; 1182 case RPC_PROC_UNAVAIL: 1183 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1184 task->tk_msg.rpc_proc, 1185 task->tk_client->cl_prog, 1186 task->tk_client->cl_vers, 1187 task->tk_client->cl_server); 1188 error = -EOPNOTSUPP; 1189 goto out_err; 1190 case RPC_GARBAGE_ARGS: 1191 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1192 break; /* retry */ 1193 default: 1194 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1195 /* Also retry */ 1196 } 1197 1198out_retry: 1199 task->tk_client->cl_stats->rpcgarbage++; 1200 if (task->tk_garb_retry) { 1201 task->tk_garb_retry--; 1202 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1203 task->tk_action = call_bind; 1204 return NULL; 1205 } 1206 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1207out_eio: 1208 error = -EIO; 1209out_err: 1210 rpc_exit(task, error); 1211 return NULL; 1212out_overflow: 1213 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1214 goto out_retry; 1215} 1216 1217static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1218{ 1219 return 0; 1220} 1221 1222static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1223{ 1224 return 0; 1225} 1226 1227static struct rpc_procinfo rpcproc_null = { 1228 .p_encode = rpcproc_encode_null, 1229 .p_decode = rpcproc_decode_null, 1230}; 1231 1232int rpc_ping(struct rpc_clnt *clnt, int flags) 1233{ 1234 struct rpc_message msg = { 1235 .rpc_proc = &rpcproc_null, 1236 }; 1237 int err; 1238 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1239 err = rpc_call_sync(clnt, &msg, flags); 1240 put_rpccred(msg.rpc_cred); 1241 return err; 1242} 1243