clnt.c revision 35f5a422ce1af836007f811b613c440d0e348e06
1/* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24#include <asm/system.h> 25 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/slab.h> 30#include <linux/utsname.h> 31 32#include <linux/sunrpc/clnt.h> 33#include <linux/workqueue.h> 34#include <linux/sunrpc/rpc_pipe_fs.h> 35 36#include <linux/nfs.h> 37 38 39#define RPC_SLACK_SPACE (1024) /* total overkill */ 40 41#ifdef RPC_DEBUG 42# define RPCDBG_FACILITY RPCDBG_CALL 43#endif 44 45static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 46 47 48static void call_start(struct rpc_task *task); 49static void call_reserve(struct rpc_task *task); 50static void call_reserveresult(struct rpc_task *task); 51static void call_allocate(struct rpc_task *task); 52static void call_encode(struct rpc_task *task); 53static void call_decode(struct rpc_task *task); 54static void call_bind(struct rpc_task *task); 55static void call_bind_status(struct rpc_task *task); 56static void call_transmit(struct rpc_task *task); 57static void call_status(struct rpc_task *task); 58static void call_transmit_status(struct rpc_task *task); 59static void call_refresh(struct rpc_task *task); 60static void call_refreshresult(struct rpc_task *task); 61static void call_timeout(struct rpc_task *task); 62static void call_connect(struct rpc_task *task); 63static void call_connect_status(struct rpc_task *task); 64static u32 * call_header(struct rpc_task *task); 65static u32 * call_verify(struct rpc_task *task); 66 67 68static int 69rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 70{ 71 static uint32_t clntid; 72 int error; 73 74 if (dir_name == NULL) 75 return 0; 76 for (;;) { 77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 78 "%s/clnt%x", dir_name, 79 (unsigned int)clntid++); 80 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 81 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 82 if (!IS_ERR(clnt->cl_dentry)) 83 return 0; 84 error = PTR_ERR(clnt->cl_dentry); 85 if (error != -EEXIST) { 86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 87 clnt->cl_pathname, error); 88 return error; 89 } 90 } 91} 92 93/* 94 * Create an RPC client 95 * FIXME: This should also take a flags argument (as in task->tk_flags). 96 * It's called (among others) from pmap_create_client, which may in 97 * turn be called by an async task. In this case, rpciod should not be 98 * made to sleep too long. 99 */ 100struct rpc_clnt * 101rpc_new_client(struct rpc_xprt *xprt, char *servname, 102 struct rpc_program *program, u32 vers, 103 rpc_authflavor_t flavor) 104{ 105 struct rpc_version *version; 106 struct rpc_clnt *clnt = NULL; 107 struct rpc_auth *auth; 108 int err; 109 int len; 110 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 err = -EINVAL; 115 if (!xprt) 116 goto out_err; 117 if (vers >= program->nrvers || !(version = program->version[vers])) 118 goto out_err; 119 120 err = -ENOMEM; 121 clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 memset(clnt, 0, sizeof(*clnt)); 125 atomic_set(&clnt->cl_users, 0); 126 atomic_set(&clnt->cl_count, 1); 127 clnt->cl_parent = clnt; 128 129 clnt->cl_server = clnt->cl_inline_name; 130 len = strlen(servname) + 1; 131 if (len > sizeof(clnt->cl_inline_name)) { 132 char *buf = kmalloc(len, GFP_KERNEL); 133 if (buf != 0) 134 clnt->cl_server = buf; 135 else 136 len = sizeof(clnt->cl_inline_name); 137 } 138 strlcpy(clnt->cl_server, servname, len); 139 140 clnt->cl_xprt = xprt; 141 clnt->cl_procinfo = version->procs; 142 clnt->cl_maxproc = version->nrprocs; 143 clnt->cl_protname = program->name; 144 clnt->cl_pmap = &clnt->cl_pmap_default; 145 clnt->cl_port = xprt->addr.sin_port; 146 clnt->cl_prog = program->number; 147 clnt->cl_vers = version->number; 148 clnt->cl_prot = xprt->prot; 149 clnt->cl_stats = program->stats; 150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 151 152 if (!clnt->cl_port) 153 clnt->cl_autobind = 1; 154 155 clnt->cl_rtt = &clnt->cl_rtt_default; 156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 157 158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 159 if (err < 0) 160 goto out_no_path; 161 162 auth = rpcauth_create(flavor, clnt); 163 if (IS_ERR(auth)) { 164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 165 flavor); 166 err = PTR_ERR(auth); 167 goto out_no_auth; 168 } 169 170 /* save the nodename */ 171 clnt->cl_nodelen = strlen(system_utsname.nodename); 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 175 return clnt; 176 177out_no_auth: 178 rpc_rmdir(clnt->cl_pathname); 179out_no_path: 180 if (clnt->cl_server != clnt->cl_inline_name) 181 kfree(clnt->cl_server); 182 kfree(clnt); 183out_err: 184 xprt_destroy(xprt); 185 return ERR_PTR(err); 186} 187 188/** 189 * Create an RPC client 190 * @xprt - pointer to xprt struct 191 * @servname - name of server 192 * @info - rpc_program 193 * @version - rpc_program version 194 * @authflavor - rpc_auth flavour to use 195 * 196 * Creates an RPC client structure, then pings the server in order to 197 * determine if it is up, and if it supports this program and version. 198 * 199 * This function should never be called by asynchronous tasks such as 200 * the portmapper. 201 */ 202struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 203 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 204{ 205 struct rpc_clnt *clnt; 206 int err; 207 208 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 209 if (IS_ERR(clnt)) 210 return clnt; 211 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 212 if (err == 0) 213 return clnt; 214 rpc_shutdown_client(clnt); 215 return ERR_PTR(err); 216} 217 218/* 219 * This function clones the RPC client structure. It allows us to share the 220 * same transport while varying parameters such as the authentication 221 * flavour. 222 */ 223struct rpc_clnt * 224rpc_clone_client(struct rpc_clnt *clnt) 225{ 226 struct rpc_clnt *new; 227 228 new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL); 229 if (!new) 230 goto out_no_clnt; 231 memcpy(new, clnt, sizeof(*new)); 232 atomic_set(&new->cl_count, 1); 233 atomic_set(&new->cl_users, 0); 234 new->cl_parent = clnt; 235 atomic_inc(&clnt->cl_count); 236 /* Duplicate portmapper */ 237 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 238 /* Turn off autobind on clones */ 239 new->cl_autobind = 0; 240 new->cl_oneshot = 0; 241 new->cl_dead = 0; 242 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 243 if (new->cl_auth) 244 atomic_inc(&new->cl_auth->au_count); 245 new->cl_pmap = &new->cl_pmap_default; 246 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 247 return new; 248out_no_clnt: 249 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 250 return ERR_PTR(-ENOMEM); 251} 252 253/* 254 * Properly shut down an RPC client, terminating all outstanding 255 * requests. Note that we must be certain that cl_oneshot and 256 * cl_dead are cleared, or else the client would be destroyed 257 * when the last task releases it. 258 */ 259int 260rpc_shutdown_client(struct rpc_clnt *clnt) 261{ 262 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 263 clnt->cl_protname, clnt->cl_server, 264 atomic_read(&clnt->cl_users)); 265 266 while (atomic_read(&clnt->cl_users) > 0) { 267 /* Don't let rpc_release_client destroy us */ 268 clnt->cl_oneshot = 0; 269 clnt->cl_dead = 0; 270 rpc_killall_tasks(clnt); 271 sleep_on_timeout(&destroy_wait, 1*HZ); 272 } 273 274 if (atomic_read(&clnt->cl_users) < 0) { 275 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 276 clnt, atomic_read(&clnt->cl_users)); 277#ifdef RPC_DEBUG 278 rpc_show_tasks(); 279#endif 280 BUG(); 281 } 282 283 return rpc_destroy_client(clnt); 284} 285 286/* 287 * Delete an RPC client 288 */ 289int 290rpc_destroy_client(struct rpc_clnt *clnt) 291{ 292 if (!atomic_dec_and_test(&clnt->cl_count)) 293 return 1; 294 BUG_ON(atomic_read(&clnt->cl_users) != 0); 295 296 dprintk("RPC: destroying %s client for %s\n", 297 clnt->cl_protname, clnt->cl_server); 298 if (clnt->cl_auth) { 299 rpcauth_destroy(clnt->cl_auth); 300 clnt->cl_auth = NULL; 301 } 302 if (clnt->cl_parent != clnt) { 303 rpc_destroy_client(clnt->cl_parent); 304 goto out_free; 305 } 306 if (clnt->cl_pathname[0]) 307 rpc_rmdir(clnt->cl_pathname); 308 if (clnt->cl_xprt) { 309 xprt_destroy(clnt->cl_xprt); 310 clnt->cl_xprt = NULL; 311 } 312 if (clnt->cl_server != clnt->cl_inline_name) 313 kfree(clnt->cl_server); 314out_free: 315 kfree(clnt); 316 return 0; 317} 318 319/* 320 * Release an RPC client 321 */ 322void 323rpc_release_client(struct rpc_clnt *clnt) 324{ 325 dprintk("RPC: rpc_release_client(%p, %d)\n", 326 clnt, atomic_read(&clnt->cl_users)); 327 328 if (!atomic_dec_and_test(&clnt->cl_users)) 329 return; 330 wake_up(&destroy_wait); 331 if (clnt->cl_oneshot || clnt->cl_dead) 332 rpc_destroy_client(clnt); 333} 334 335/** 336 * rpc_bind_new_program - bind a new RPC program to an existing client 337 * @old - old rpc_client 338 * @program - rpc program to set 339 * @vers - rpc program version 340 * 341 * Clones the rpc client and sets up a new RPC program. This is mainly 342 * of use for enabling different RPC programs to share the same transport. 343 * The Sun NFSv2/v3 ACL protocol can do this. 344 */ 345struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 346 struct rpc_program *program, 347 int vers) 348{ 349 struct rpc_clnt *clnt; 350 struct rpc_version *version; 351 int err; 352 353 BUG_ON(vers >= program->nrvers || !program->version[vers]); 354 version = program->version[vers]; 355 clnt = rpc_clone_client(old); 356 if (IS_ERR(clnt)) 357 goto out; 358 clnt->cl_procinfo = version->procs; 359 clnt->cl_maxproc = version->nrprocs; 360 clnt->cl_protname = program->name; 361 clnt->cl_prog = program->number; 362 clnt->cl_vers = version->number; 363 clnt->cl_stats = program->stats; 364 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 365 if (err != 0) { 366 rpc_shutdown_client(clnt); 367 clnt = ERR_PTR(err); 368 } 369out: 370 return clnt; 371} 372 373/* 374 * Default callback for async RPC calls 375 */ 376static void 377rpc_default_callback(struct rpc_task *task, void *data) 378{ 379} 380 381static const struct rpc_call_ops rpc_default_ops = { 382 .rpc_call_done = rpc_default_callback, 383}; 384 385/* 386 * Export the signal mask handling for synchronous code that 387 * sleeps on RPC calls 388 */ 389#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 390 391static void rpc_save_sigmask(sigset_t *oldset, int intr) 392{ 393 unsigned long sigallow = sigmask(SIGKILL); 394 sigset_t sigmask; 395 396 /* Block all signals except those listed in sigallow */ 397 if (intr) 398 sigallow |= RPC_INTR_SIGNALS; 399 siginitsetinv(&sigmask, sigallow); 400 sigprocmask(SIG_BLOCK, &sigmask, oldset); 401} 402 403static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 404{ 405 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 406} 407 408static inline void rpc_restore_sigmask(sigset_t *oldset) 409{ 410 sigprocmask(SIG_SETMASK, oldset, NULL); 411} 412 413void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 414{ 415 rpc_save_sigmask(oldset, clnt->cl_intr); 416} 417 418void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 419{ 420 rpc_restore_sigmask(oldset); 421} 422 423/* 424 * New rpc_call implementation 425 */ 426int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 427{ 428 struct rpc_task *task; 429 sigset_t oldset; 430 int status; 431 432 /* If this client is slain all further I/O fails */ 433 if (clnt->cl_dead) 434 return -EIO; 435 436 BUG_ON(flags & RPC_TASK_ASYNC); 437 438 status = -ENOMEM; 439 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 440 if (task == NULL) 441 goto out; 442 443 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 444 rpc_task_sigmask(task, &oldset); 445 446 rpc_call_setup(task, msg, 0); 447 448 /* Set up the call info struct and execute the task */ 449 status = task->tk_status; 450 if (status == 0) { 451 atomic_inc(&task->tk_count); 452 status = rpc_execute(task); 453 if (status == 0) 454 status = task->tk_status; 455 } 456 rpc_restore_sigmask(&oldset); 457 rpc_release_task(task); 458out: 459 return status; 460} 461 462/* 463 * New rpc_call implementation 464 */ 465int 466rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 467 const struct rpc_call_ops *tk_ops, void *data) 468{ 469 struct rpc_task *task; 470 sigset_t oldset; 471 int status; 472 473 /* If this client is slain all further I/O fails */ 474 if (clnt->cl_dead) 475 return -EIO; 476 477 flags |= RPC_TASK_ASYNC; 478 479 /* Create/initialize a new RPC task */ 480 status = -ENOMEM; 481 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 482 goto out; 483 484 /* Mask signals on GSS_AUTH upcalls */ 485 rpc_task_sigmask(task, &oldset); 486 487 rpc_call_setup(task, msg, 0); 488 489 /* Set up the call info struct and execute the task */ 490 status = task->tk_status; 491 if (status == 0) 492 rpc_execute(task); 493 else 494 rpc_release_task(task); 495 496 rpc_restore_sigmask(&oldset); 497out: 498 return status; 499} 500 501 502void 503rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 504{ 505 task->tk_msg = *msg; 506 task->tk_flags |= flags; 507 /* Bind the user cred */ 508 if (task->tk_msg.rpc_cred != NULL) 509 rpcauth_holdcred(task); 510 else 511 rpcauth_bindcred(task); 512 513 if (task->tk_status == 0) 514 task->tk_action = call_start; 515 else 516 task->tk_action = rpc_exit_task; 517} 518 519void 520rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 521{ 522 struct rpc_xprt *xprt = clnt->cl_xprt; 523 if (xprt->ops->set_buffer_size) 524 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 525} 526 527/* 528 * Return size of largest payload RPC client can support, in bytes 529 * 530 * For stream transports, this is one RPC record fragment (see RFC 531 * 1831), as we don't support multi-record requests yet. For datagram 532 * transports, this is the size of an IP packet minus the IP, UDP, and 533 * RPC header sizes. 534 */ 535size_t rpc_max_payload(struct rpc_clnt *clnt) 536{ 537 return clnt->cl_xprt->max_payload; 538} 539EXPORT_SYMBOL(rpc_max_payload); 540 541/** 542 * rpc_force_rebind - force transport to check that remote port is unchanged 543 * @clnt: client to rebind 544 * 545 */ 546void rpc_force_rebind(struct rpc_clnt *clnt) 547{ 548 if (clnt->cl_autobind) 549 clnt->cl_port = 0; 550} 551EXPORT_SYMBOL(rpc_force_rebind); 552 553/* 554 * Restart an (async) RPC call. Usually called from within the 555 * exit handler. 556 */ 557void 558rpc_restart_call(struct rpc_task *task) 559{ 560 if (RPC_ASSASSINATED(task)) 561 return; 562 563 task->tk_action = call_start; 564} 565 566/* 567 * 0. Initial state 568 * 569 * Other FSM states can be visited zero or more times, but 570 * this state is visited exactly once for each RPC. 571 */ 572static void 573call_start(struct rpc_task *task) 574{ 575 struct rpc_clnt *clnt = task->tk_client; 576 577 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 578 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 579 (RPC_IS_ASYNC(task) ? "async" : "sync")); 580 581 /* Increment call count */ 582 task->tk_msg.rpc_proc->p_count++; 583 clnt->cl_stats->rpccnt++; 584 task->tk_action = call_reserve; 585} 586 587/* 588 * 1. Reserve an RPC call slot 589 */ 590static void 591call_reserve(struct rpc_task *task) 592{ 593 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 594 595 if (!rpcauth_uptodatecred(task)) { 596 task->tk_action = call_refresh; 597 return; 598 } 599 600 task->tk_status = 0; 601 task->tk_action = call_reserveresult; 602 xprt_reserve(task); 603} 604 605/* 606 * 1b. Grok the result of xprt_reserve() 607 */ 608static void 609call_reserveresult(struct rpc_task *task) 610{ 611 int status = task->tk_status; 612 613 dprintk("RPC: %4d call_reserveresult (status %d)\n", 614 task->tk_pid, task->tk_status); 615 616 /* 617 * After a call to xprt_reserve(), we must have either 618 * a request slot or else an error status. 619 */ 620 task->tk_status = 0; 621 if (status >= 0) { 622 if (task->tk_rqstp) { 623 task->tk_action = call_allocate; 624 return; 625 } 626 627 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 628 __FUNCTION__, status); 629 rpc_exit(task, -EIO); 630 return; 631 } 632 633 /* 634 * Even though there was an error, we may have acquired 635 * a request slot somehow. Make sure not to leak it. 636 */ 637 if (task->tk_rqstp) { 638 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 639 __FUNCTION__, status); 640 xprt_release(task); 641 } 642 643 switch (status) { 644 case -EAGAIN: /* woken up; retry */ 645 task->tk_action = call_reserve; 646 return; 647 case -EIO: /* probably a shutdown */ 648 break; 649 default: 650 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 651 __FUNCTION__, status); 652 break; 653 } 654 rpc_exit(task, status); 655} 656 657/* 658 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 659 * (Note: buffer memory is freed in xprt_release). 660 */ 661static void 662call_allocate(struct rpc_task *task) 663{ 664 struct rpc_rqst *req = task->tk_rqstp; 665 struct rpc_xprt *xprt = task->tk_xprt; 666 unsigned int bufsiz; 667 668 dprintk("RPC: %4d call_allocate (status %d)\n", 669 task->tk_pid, task->tk_status); 670 task->tk_action = call_bind; 671 if (req->rq_buffer) 672 return; 673 674 /* FIXME: compute buffer requirements more exactly using 675 * auth->au_wslack */ 676 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 677 678 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 679 return; 680 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 681 682 if (RPC_IS_ASYNC(task) || !signalled()) { 683 xprt_release(task); 684 task->tk_action = call_reserve; 685 rpc_delay(task, HZ>>4); 686 return; 687 } 688 689 rpc_exit(task, -ERESTARTSYS); 690} 691 692static inline int 693rpc_task_need_encode(struct rpc_task *task) 694{ 695 return task->tk_rqstp->rq_snd_buf.len == 0; 696} 697 698static inline void 699rpc_task_force_reencode(struct rpc_task *task) 700{ 701 task->tk_rqstp->rq_snd_buf.len = 0; 702} 703 704/* 705 * 3. Encode arguments of an RPC call 706 */ 707static void 708call_encode(struct rpc_task *task) 709{ 710 struct rpc_rqst *req = task->tk_rqstp; 711 struct xdr_buf *sndbuf = &req->rq_snd_buf; 712 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 713 unsigned int bufsiz; 714 kxdrproc_t encode; 715 u32 *p; 716 717 dprintk("RPC: %4d call_encode (status %d)\n", 718 task->tk_pid, task->tk_status); 719 720 /* Default buffer setup */ 721 bufsiz = req->rq_bufsize >> 1; 722 sndbuf->head[0].iov_base = (void *)req->rq_buffer; 723 sndbuf->head[0].iov_len = bufsiz; 724 sndbuf->tail[0].iov_len = 0; 725 sndbuf->page_len = 0; 726 sndbuf->len = 0; 727 sndbuf->buflen = bufsiz; 728 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); 729 rcvbuf->head[0].iov_len = bufsiz; 730 rcvbuf->tail[0].iov_len = 0; 731 rcvbuf->page_len = 0; 732 rcvbuf->len = 0; 733 rcvbuf->buflen = bufsiz; 734 735 /* Encode header and provided arguments */ 736 encode = task->tk_msg.rpc_proc->p_encode; 737 if (!(p = call_header(task))) { 738 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 739 rpc_exit(task, -EIO); 740 return; 741 } 742 if (encode == NULL) 743 return; 744 745 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 746 task->tk_msg.rpc_argp); 747 if (task->tk_status == -ENOMEM) { 748 /* XXX: Is this sane? */ 749 rpc_delay(task, 3*HZ); 750 task->tk_status = -EAGAIN; 751 } 752} 753 754/* 755 * 4. Get the server port number if not yet set 756 */ 757static void 758call_bind(struct rpc_task *task) 759{ 760 struct rpc_clnt *clnt = task->tk_client; 761 762 dprintk("RPC: %4d call_bind (status %d)\n", 763 task->tk_pid, task->tk_status); 764 765 task->tk_action = call_connect; 766 if (!clnt->cl_port) { 767 task->tk_action = call_bind_status; 768 task->tk_timeout = task->tk_xprt->bind_timeout; 769 rpc_getport(task, clnt); 770 } 771} 772 773/* 774 * 4a. Sort out bind result 775 */ 776static void 777call_bind_status(struct rpc_task *task) 778{ 779 int status = -EACCES; 780 781 if (task->tk_status >= 0) { 782 dprintk("RPC: %4d call_bind_status (status %d)\n", 783 task->tk_pid, task->tk_status); 784 task->tk_status = 0; 785 task->tk_action = call_connect; 786 return; 787 } 788 789 switch (task->tk_status) { 790 case -EACCES: 791 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 792 task->tk_pid); 793 rpc_delay(task, 3*HZ); 794 goto retry_bind; 795 case -ETIMEDOUT: 796 dprintk("RPC: %4d rpcbind request timed out\n", 797 task->tk_pid); 798 if (RPC_IS_SOFT(task)) { 799 status = -EIO; 800 break; 801 } 802 goto retry_bind; 803 case -EPFNOSUPPORT: 804 dprintk("RPC: %4d remote rpcbind service unavailable\n", 805 task->tk_pid); 806 break; 807 case -EPROTONOSUPPORT: 808 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 809 task->tk_pid); 810 break; 811 default: 812 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 813 task->tk_pid, -task->tk_status); 814 status = -EIO; 815 break; 816 } 817 818 rpc_exit(task, status); 819 return; 820 821retry_bind: 822 task->tk_status = 0; 823 task->tk_action = call_bind; 824 return; 825} 826 827/* 828 * 4b. Connect to the RPC server 829 */ 830static void 831call_connect(struct rpc_task *task) 832{ 833 struct rpc_xprt *xprt = task->tk_xprt; 834 835 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 836 task->tk_pid, xprt, 837 (xprt_connected(xprt) ? "is" : "is not")); 838 839 task->tk_action = call_transmit; 840 if (!xprt_connected(xprt)) { 841 task->tk_action = call_connect_status; 842 if (task->tk_status < 0) 843 return; 844 xprt_connect(task); 845 } 846} 847 848/* 849 * 4c. Sort out connect result 850 */ 851static void 852call_connect_status(struct rpc_task *task) 853{ 854 struct rpc_clnt *clnt = task->tk_client; 855 int status = task->tk_status; 856 857 dprintk("RPC: %5u call_connect_status (status %d)\n", 858 task->tk_pid, task->tk_status); 859 860 task->tk_status = 0; 861 if (status >= 0) { 862 clnt->cl_stats->netreconn++; 863 task->tk_action = call_transmit; 864 return; 865 } 866 867 /* Something failed: remote service port may have changed */ 868 rpc_force_rebind(clnt); 869 870 switch (status) { 871 case -ENOTCONN: 872 case -ETIMEDOUT: 873 case -EAGAIN: 874 task->tk_action = call_bind; 875 break; 876 default: 877 rpc_exit(task, -EIO); 878 break; 879 } 880} 881 882/* 883 * 5. Transmit the RPC request, and wait for reply 884 */ 885static void 886call_transmit(struct rpc_task *task) 887{ 888 dprintk("RPC: %4d call_transmit (status %d)\n", 889 task->tk_pid, task->tk_status); 890 891 task->tk_action = call_status; 892 if (task->tk_status < 0) 893 return; 894 task->tk_status = xprt_prepare_transmit(task); 895 if (task->tk_status != 0) 896 return; 897 /* Encode here so that rpcsec_gss can use correct sequence number. */ 898 if (rpc_task_need_encode(task)) { 899 task->tk_rqstp->rq_bytes_sent = 0; 900 call_encode(task); 901 /* Did the encode result in an error condition? */ 902 if (task->tk_status != 0) 903 goto out_nosend; 904 } 905 task->tk_action = call_transmit_status; 906 xprt_transmit(task); 907 if (task->tk_status < 0) 908 return; 909 if (!task->tk_msg.rpc_proc->p_decode) { 910 task->tk_action = rpc_exit_task; 911 rpc_wake_up_task(task); 912 } 913 return; 914out_nosend: 915 /* release socket write lock before attempting to handle error */ 916 xprt_abort_transmit(task); 917 rpc_task_force_reencode(task); 918} 919 920/* 921 * 6. Sort out the RPC call status 922 */ 923static void 924call_status(struct rpc_task *task) 925{ 926 struct rpc_clnt *clnt = task->tk_client; 927 struct rpc_rqst *req = task->tk_rqstp; 928 int status; 929 930 if (req->rq_received > 0 && !req->rq_bytes_sent) 931 task->tk_status = req->rq_received; 932 933 dprintk("RPC: %4d call_status (status %d)\n", 934 task->tk_pid, task->tk_status); 935 936 status = task->tk_status; 937 if (status >= 0) { 938 task->tk_action = call_decode; 939 return; 940 } 941 942 task->tk_status = 0; 943 switch(status) { 944 case -ETIMEDOUT: 945 task->tk_action = call_timeout; 946 break; 947 case -ECONNREFUSED: 948 case -ENOTCONN: 949 rpc_force_rebind(clnt); 950 task->tk_action = call_bind; 951 break; 952 case -EAGAIN: 953 task->tk_action = call_transmit; 954 break; 955 case -EIO: 956 /* shutdown or soft timeout */ 957 rpc_exit(task, status); 958 break; 959 default: 960 if (clnt->cl_chatty) 961 printk("%s: RPC call returned error %d\n", 962 clnt->cl_protname, -status); 963 rpc_exit(task, status); 964 break; 965 } 966} 967 968/* 969 * 6a. Handle transmission errors. 970 */ 971static void 972call_transmit_status(struct rpc_task *task) 973{ 974 if (task->tk_status != -EAGAIN) 975 rpc_task_force_reencode(task); 976 call_status(task); 977} 978 979/* 980 * 6b. Handle RPC timeout 981 * We do not release the request slot, so we keep using the 982 * same XID for all retransmits. 983 */ 984static void 985call_timeout(struct rpc_task *task) 986{ 987 struct rpc_clnt *clnt = task->tk_client; 988 989 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 990 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 991 goto retry; 992 } 993 994 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 995 if (RPC_IS_SOFT(task)) { 996 if (clnt->cl_chatty) 997 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 998 clnt->cl_protname, clnt->cl_server); 999 rpc_exit(task, -EIO); 1000 return; 1001 } 1002 1003 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1004 task->tk_flags |= RPC_CALL_MAJORSEEN; 1005 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1006 clnt->cl_protname, clnt->cl_server); 1007 } 1008 rpc_force_rebind(clnt); 1009 1010retry: 1011 clnt->cl_stats->rpcretrans++; 1012 task->tk_action = call_bind; 1013 task->tk_status = 0; 1014} 1015 1016/* 1017 * 7. Decode the RPC reply 1018 */ 1019static void 1020call_decode(struct rpc_task *task) 1021{ 1022 struct rpc_clnt *clnt = task->tk_client; 1023 struct rpc_rqst *req = task->tk_rqstp; 1024 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1025 u32 *p; 1026 1027 dprintk("RPC: %4d call_decode (status %d)\n", 1028 task->tk_pid, task->tk_status); 1029 1030 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) { 1031 printk(KERN_NOTICE "%s: server %s OK\n", 1032 clnt->cl_protname, clnt->cl_server); 1033 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1034 } 1035 1036 if (task->tk_status < 12) { 1037 if (!RPC_IS_SOFT(task)) { 1038 task->tk_action = call_bind; 1039 clnt->cl_stats->rpcretrans++; 1040 goto out_retry; 1041 } 1042 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1043 clnt->cl_protname, task->tk_status); 1044 rpc_exit(task, -EIO); 1045 return; 1046 } 1047 1048 req->rq_rcv_buf.len = req->rq_private_buf.len; 1049 1050 /* Check that the softirq receive buffer is valid */ 1051 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1052 sizeof(req->rq_rcv_buf)) != 0); 1053 1054 /* Verify the RPC header */ 1055 p = call_verify(task); 1056 if (IS_ERR(p)) { 1057 if (p == ERR_PTR(-EAGAIN)) 1058 goto out_retry; 1059 return; 1060 } 1061 1062 task->tk_action = rpc_exit_task; 1063 1064 if (decode) 1065 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1066 task->tk_msg.rpc_resp); 1067 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1068 task->tk_status); 1069 return; 1070out_retry: 1071 req->rq_received = req->rq_private_buf.len = 0; 1072 task->tk_status = 0; 1073} 1074 1075/* 1076 * 8. Refresh the credentials if rejected by the server 1077 */ 1078static void 1079call_refresh(struct rpc_task *task) 1080{ 1081 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1082 1083 xprt_release(task); /* Must do to obtain new XID */ 1084 task->tk_action = call_refreshresult; 1085 task->tk_status = 0; 1086 task->tk_client->cl_stats->rpcauthrefresh++; 1087 rpcauth_refreshcred(task); 1088} 1089 1090/* 1091 * 8a. Process the results of a credential refresh 1092 */ 1093static void 1094call_refreshresult(struct rpc_task *task) 1095{ 1096 int status = task->tk_status; 1097 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1098 task->tk_pid, task->tk_status); 1099 1100 task->tk_status = 0; 1101 task->tk_action = call_reserve; 1102 if (status >= 0 && rpcauth_uptodatecred(task)) 1103 return; 1104 if (status == -EACCES) { 1105 rpc_exit(task, -EACCES); 1106 return; 1107 } 1108 task->tk_action = call_refresh; 1109 if (status != -ETIMEDOUT) 1110 rpc_delay(task, 3*HZ); 1111 return; 1112} 1113 1114/* 1115 * Call header serialization 1116 */ 1117static u32 * 1118call_header(struct rpc_task *task) 1119{ 1120 struct rpc_clnt *clnt = task->tk_client; 1121 struct rpc_rqst *req = task->tk_rqstp; 1122 u32 *p = req->rq_svec[0].iov_base; 1123 1124 /* FIXME: check buffer size? */ 1125 1126 p = xprt_skip_transport_header(task->tk_xprt, p); 1127 *p++ = req->rq_xid; /* XID */ 1128 *p++ = htonl(RPC_CALL); /* CALL */ 1129 *p++ = htonl(RPC_VERSION); /* RPC version */ 1130 *p++ = htonl(clnt->cl_prog); /* program number */ 1131 *p++ = htonl(clnt->cl_vers); /* program version */ 1132 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1133 p = rpcauth_marshcred(task, p); 1134 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1135 return p; 1136} 1137 1138/* 1139 * Reply header verification 1140 */ 1141static u32 * 1142call_verify(struct rpc_task *task) 1143{ 1144 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1145 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1146 u32 *p = iov->iov_base, n; 1147 int error = -EACCES; 1148 1149 if ((len -= 3) < 0) 1150 goto out_overflow; 1151 p += 1; /* skip XID */ 1152 1153 if ((n = ntohl(*p++)) != RPC_REPLY) { 1154 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1155 goto out_garbage; 1156 } 1157 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1158 if (--len < 0) 1159 goto out_overflow; 1160 switch ((n = ntohl(*p++))) { 1161 case RPC_AUTH_ERROR: 1162 break; 1163 case RPC_MISMATCH: 1164 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1165 error = -EPROTONOSUPPORT; 1166 goto out_err; 1167 default: 1168 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1169 goto out_eio; 1170 } 1171 if (--len < 0) 1172 goto out_overflow; 1173 switch ((n = ntohl(*p++))) { 1174 case RPC_AUTH_REJECTEDCRED: 1175 case RPC_AUTH_REJECTEDVERF: 1176 case RPCSEC_GSS_CREDPROBLEM: 1177 case RPCSEC_GSS_CTXPROBLEM: 1178 if (!task->tk_cred_retry) 1179 break; 1180 task->tk_cred_retry--; 1181 dprintk("RPC: %4d call_verify: retry stale creds\n", 1182 task->tk_pid); 1183 rpcauth_invalcred(task); 1184 task->tk_action = call_refresh; 1185 goto out_retry; 1186 case RPC_AUTH_BADCRED: 1187 case RPC_AUTH_BADVERF: 1188 /* possibly garbled cred/verf? */ 1189 if (!task->tk_garb_retry) 1190 break; 1191 task->tk_garb_retry--; 1192 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1193 task->tk_pid); 1194 task->tk_action = call_bind; 1195 goto out_retry; 1196 case RPC_AUTH_TOOWEAK: 1197 printk(KERN_NOTICE "call_verify: server requires stronger " 1198 "authentication.\n"); 1199 break; 1200 default: 1201 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1202 error = -EIO; 1203 } 1204 dprintk("RPC: %4d call_verify: call rejected %d\n", 1205 task->tk_pid, n); 1206 goto out_err; 1207 } 1208 if (!(p = rpcauth_checkverf(task, p))) { 1209 printk(KERN_WARNING "call_verify: auth check failed\n"); 1210 goto out_garbage; /* bad verifier, retry */ 1211 } 1212 len = p - (u32 *)iov->iov_base - 1; 1213 if (len < 0) 1214 goto out_overflow; 1215 switch ((n = ntohl(*p++))) { 1216 case RPC_SUCCESS: 1217 return p; 1218 case RPC_PROG_UNAVAIL: 1219 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1220 (unsigned int)task->tk_client->cl_prog, 1221 task->tk_client->cl_server); 1222 error = -EPFNOSUPPORT; 1223 goto out_err; 1224 case RPC_PROG_MISMATCH: 1225 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1226 (unsigned int)task->tk_client->cl_prog, 1227 (unsigned int)task->tk_client->cl_vers, 1228 task->tk_client->cl_server); 1229 error = -EPROTONOSUPPORT; 1230 goto out_err; 1231 case RPC_PROC_UNAVAIL: 1232 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1233 task->tk_msg.rpc_proc, 1234 task->tk_client->cl_prog, 1235 task->tk_client->cl_vers, 1236 task->tk_client->cl_server); 1237 error = -EOPNOTSUPP; 1238 goto out_err; 1239 case RPC_GARBAGE_ARGS: 1240 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1241 break; /* retry */ 1242 default: 1243 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1244 /* Also retry */ 1245 } 1246 1247out_garbage: 1248 task->tk_client->cl_stats->rpcgarbage++; 1249 if (task->tk_garb_retry) { 1250 task->tk_garb_retry--; 1251 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1252 task->tk_action = call_bind; 1253out_retry: 1254 return ERR_PTR(-EAGAIN); 1255 } 1256 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1257out_eio: 1258 error = -EIO; 1259out_err: 1260 rpc_exit(task, error); 1261 return ERR_PTR(error); 1262out_overflow: 1263 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1264 goto out_garbage; 1265} 1266 1267static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1268{ 1269 return 0; 1270} 1271 1272static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1273{ 1274 return 0; 1275} 1276 1277static struct rpc_procinfo rpcproc_null = { 1278 .p_encode = rpcproc_encode_null, 1279 .p_decode = rpcproc_decode_null, 1280}; 1281 1282int rpc_ping(struct rpc_clnt *clnt, int flags) 1283{ 1284 struct rpc_message msg = { 1285 .rpc_proc = &rpcproc_null, 1286 }; 1287 int err; 1288 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1289 err = rpc_call_sync(clnt, &msg, flags); 1290 put_rpccred(msg.rpc_cred); 1291 return err; 1292} 1293