clnt.c revision 2bd615797ef32ec06ef0ee44198a7aecc21ffd8c
1/* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24#include <asm/system.h> 25 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/slab.h> 30#include <linux/utsname.h> 31 32#include <linux/sunrpc/clnt.h> 33#include <linux/workqueue.h> 34#include <linux/sunrpc/rpc_pipe_fs.h> 35 36#include <linux/nfs.h> 37 38 39#define RPC_SLACK_SPACE (1024) /* total overkill */ 40 41#ifdef RPC_DEBUG 42# define RPCDBG_FACILITY RPCDBG_CALL 43#endif 44 45static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 46 47 48static void call_start(struct rpc_task *task); 49static void call_reserve(struct rpc_task *task); 50static void call_reserveresult(struct rpc_task *task); 51static void call_allocate(struct rpc_task *task); 52static void call_encode(struct rpc_task *task); 53static void call_decode(struct rpc_task *task); 54static void call_bind(struct rpc_task *task); 55static void call_bind_status(struct rpc_task *task); 56static void call_transmit(struct rpc_task *task); 57static void call_status(struct rpc_task *task); 58static void call_transmit_status(struct rpc_task *task); 59static void call_refresh(struct rpc_task *task); 60static void call_refreshresult(struct rpc_task *task); 61static void call_timeout(struct rpc_task *task); 62static void call_connect(struct rpc_task *task); 63static void call_connect_status(struct rpc_task *task); 64static u32 * call_header(struct rpc_task *task); 65static u32 * call_verify(struct rpc_task *task); 66 67 68static int 69rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 70{ 71 static uint32_t clntid; 72 int error; 73 74 if (dir_name == NULL) 75 return 0; 76 for (;;) { 77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 78 "%s/clnt%x", dir_name, 79 (unsigned int)clntid++); 80 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 81 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 82 if (!IS_ERR(clnt->cl_dentry)) 83 return 0; 84 error = PTR_ERR(clnt->cl_dentry); 85 if (error != -EEXIST) { 86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 87 clnt->cl_pathname, error); 88 return error; 89 } 90 } 91} 92 93/* 94 * Create an RPC client 95 * FIXME: This should also take a flags argument (as in task->tk_flags). 96 * It's called (among others) from pmap_create_client, which may in 97 * turn be called by an async task. In this case, rpciod should not be 98 * made to sleep too long. 99 */ 100struct rpc_clnt * 101rpc_new_client(struct rpc_xprt *xprt, char *servname, 102 struct rpc_program *program, u32 vers, 103 rpc_authflavor_t flavor) 104{ 105 struct rpc_version *version; 106 struct rpc_clnt *clnt = NULL; 107 struct rpc_auth *auth; 108 int err; 109 int len; 110 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 err = -EINVAL; 115 if (!xprt) 116 goto out_err; 117 if (vers >= program->nrvers || !(version = program->version[vers])) 118 goto out_err; 119 120 err = -ENOMEM; 121 clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 memset(clnt, 0, sizeof(*clnt)); 125 atomic_set(&clnt->cl_users, 0); 126 atomic_set(&clnt->cl_count, 1); 127 clnt->cl_parent = clnt; 128 129 clnt->cl_server = clnt->cl_inline_name; 130 len = strlen(servname) + 1; 131 if (len > sizeof(clnt->cl_inline_name)) { 132 char *buf = kmalloc(len, GFP_KERNEL); 133 if (buf != 0) 134 clnt->cl_server = buf; 135 else 136 len = sizeof(clnt->cl_inline_name); 137 } 138 strlcpy(clnt->cl_server, servname, len); 139 140 clnt->cl_xprt = xprt; 141 clnt->cl_procinfo = version->procs; 142 clnt->cl_maxproc = version->nrprocs; 143 clnt->cl_protname = program->name; 144 clnt->cl_pmap = &clnt->cl_pmap_default; 145 clnt->cl_port = xprt->addr.sin_port; 146 clnt->cl_prog = program->number; 147 clnt->cl_vers = version->number; 148 clnt->cl_prot = xprt->prot; 149 clnt->cl_stats = program->stats; 150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 151 152 if (!clnt->cl_port) 153 clnt->cl_autobind = 1; 154 155 clnt->cl_rtt = &clnt->cl_rtt_default; 156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 157 158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 159 if (err < 0) 160 goto out_no_path; 161 162 auth = rpcauth_create(flavor, clnt); 163 if (IS_ERR(auth)) { 164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 165 flavor); 166 err = PTR_ERR(auth); 167 goto out_no_auth; 168 } 169 170 /* save the nodename */ 171 clnt->cl_nodelen = strlen(system_utsname.nodename); 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 175 return clnt; 176 177out_no_auth: 178 rpc_rmdir(clnt->cl_pathname); 179out_no_path: 180 if (clnt->cl_server != clnt->cl_inline_name) 181 kfree(clnt->cl_server); 182 kfree(clnt); 183out_err: 184 xprt_destroy(xprt); 185 return ERR_PTR(err); 186} 187 188/** 189 * Create an RPC client 190 * @xprt - pointer to xprt struct 191 * @servname - name of server 192 * @info - rpc_program 193 * @version - rpc_program version 194 * @authflavor - rpc_auth flavour to use 195 * 196 * Creates an RPC client structure, then pings the server in order to 197 * determine if it is up, and if it supports this program and version. 198 * 199 * This function should never be called by asynchronous tasks such as 200 * the portmapper. 201 */ 202struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 203 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 204{ 205 struct rpc_clnt *clnt; 206 int err; 207 208 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 209 if (IS_ERR(clnt)) 210 return clnt; 211 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 212 if (err == 0) 213 return clnt; 214 rpc_shutdown_client(clnt); 215 return ERR_PTR(err); 216} 217 218/* 219 * This function clones the RPC client structure. It allows us to share the 220 * same transport while varying parameters such as the authentication 221 * flavour. 222 */ 223struct rpc_clnt * 224rpc_clone_client(struct rpc_clnt *clnt) 225{ 226 struct rpc_clnt *new; 227 228 new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL); 229 if (!new) 230 goto out_no_clnt; 231 memcpy(new, clnt, sizeof(*new)); 232 atomic_set(&new->cl_count, 1); 233 atomic_set(&new->cl_users, 0); 234 new->cl_parent = clnt; 235 atomic_inc(&clnt->cl_count); 236 /* Duplicate portmapper */ 237 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 238 /* Turn off autobind on clones */ 239 new->cl_autobind = 0; 240 new->cl_oneshot = 0; 241 new->cl_dead = 0; 242 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 243 if (new->cl_auth) 244 atomic_inc(&new->cl_auth->au_count); 245 new->cl_pmap = &new->cl_pmap_default; 246 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 247 return new; 248out_no_clnt: 249 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 250 return ERR_PTR(-ENOMEM); 251} 252 253/* 254 * Properly shut down an RPC client, terminating all outstanding 255 * requests. Note that we must be certain that cl_oneshot and 256 * cl_dead are cleared, or else the client would be destroyed 257 * when the last task releases it. 258 */ 259int 260rpc_shutdown_client(struct rpc_clnt *clnt) 261{ 262 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 263 clnt->cl_protname, clnt->cl_server, 264 atomic_read(&clnt->cl_users)); 265 266 while (atomic_read(&clnt->cl_users) > 0) { 267 /* Don't let rpc_release_client destroy us */ 268 clnt->cl_oneshot = 0; 269 clnt->cl_dead = 0; 270 rpc_killall_tasks(clnt); 271 sleep_on_timeout(&destroy_wait, 1*HZ); 272 } 273 274 if (atomic_read(&clnt->cl_users) < 0) { 275 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 276 clnt, atomic_read(&clnt->cl_users)); 277#ifdef RPC_DEBUG 278 rpc_show_tasks(); 279#endif 280 BUG(); 281 } 282 283 return rpc_destroy_client(clnt); 284} 285 286/* 287 * Delete an RPC client 288 */ 289int 290rpc_destroy_client(struct rpc_clnt *clnt) 291{ 292 if (!atomic_dec_and_test(&clnt->cl_count)) 293 return 1; 294 BUG_ON(atomic_read(&clnt->cl_users) != 0); 295 296 dprintk("RPC: destroying %s client for %s\n", 297 clnt->cl_protname, clnt->cl_server); 298 if (clnt->cl_auth) { 299 rpcauth_destroy(clnt->cl_auth); 300 clnt->cl_auth = NULL; 301 } 302 if (clnt->cl_parent != clnt) { 303 rpc_destroy_client(clnt->cl_parent); 304 goto out_free; 305 } 306 if (clnt->cl_pathname[0]) 307 rpc_rmdir(clnt->cl_pathname); 308 if (clnt->cl_xprt) { 309 xprt_destroy(clnt->cl_xprt); 310 clnt->cl_xprt = NULL; 311 } 312 if (clnt->cl_server != clnt->cl_inline_name) 313 kfree(clnt->cl_server); 314out_free: 315 kfree(clnt); 316 return 0; 317} 318 319/* 320 * Release an RPC client 321 */ 322void 323rpc_release_client(struct rpc_clnt *clnt) 324{ 325 dprintk("RPC: rpc_release_client(%p, %d)\n", 326 clnt, atomic_read(&clnt->cl_users)); 327 328 if (!atomic_dec_and_test(&clnt->cl_users)) 329 return; 330 wake_up(&destroy_wait); 331 if (clnt->cl_oneshot || clnt->cl_dead) 332 rpc_destroy_client(clnt); 333} 334 335/** 336 * rpc_bind_new_program - bind a new RPC program to an existing client 337 * @old - old rpc_client 338 * @program - rpc program to set 339 * @vers - rpc program version 340 * 341 * Clones the rpc client and sets up a new RPC program. This is mainly 342 * of use for enabling different RPC programs to share the same transport. 343 * The Sun NFSv2/v3 ACL protocol can do this. 344 */ 345struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 346 struct rpc_program *program, 347 int vers) 348{ 349 struct rpc_clnt *clnt; 350 struct rpc_version *version; 351 int err; 352 353 BUG_ON(vers >= program->nrvers || !program->version[vers]); 354 version = program->version[vers]; 355 clnt = rpc_clone_client(old); 356 if (IS_ERR(clnt)) 357 goto out; 358 clnt->cl_procinfo = version->procs; 359 clnt->cl_maxproc = version->nrprocs; 360 clnt->cl_protname = program->name; 361 clnt->cl_prog = program->number; 362 clnt->cl_vers = version->number; 363 clnt->cl_stats = program->stats; 364 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 365 if (err != 0) { 366 rpc_shutdown_client(clnt); 367 clnt = ERR_PTR(err); 368 } 369out: 370 return clnt; 371} 372 373/* 374 * Default callback for async RPC calls 375 */ 376static void 377rpc_default_callback(struct rpc_task *task, void *data) 378{ 379} 380 381static const struct rpc_call_ops rpc_default_ops = { 382 .rpc_call_done = rpc_default_callback, 383}; 384 385/* 386 * Export the signal mask handling for synchronous code that 387 * sleeps on RPC calls 388 */ 389#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 390 391static void rpc_save_sigmask(sigset_t *oldset, int intr) 392{ 393 unsigned long sigallow = sigmask(SIGKILL); 394 sigset_t sigmask; 395 396 /* Block all signals except those listed in sigallow */ 397 if (intr) 398 sigallow |= RPC_INTR_SIGNALS; 399 siginitsetinv(&sigmask, sigallow); 400 sigprocmask(SIG_BLOCK, &sigmask, oldset); 401} 402 403static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 404{ 405 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 406} 407 408static inline void rpc_restore_sigmask(sigset_t *oldset) 409{ 410 sigprocmask(SIG_SETMASK, oldset, NULL); 411} 412 413void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 414{ 415 rpc_save_sigmask(oldset, clnt->cl_intr); 416} 417 418void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 419{ 420 rpc_restore_sigmask(oldset); 421} 422 423/* 424 * New rpc_call implementation 425 */ 426int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 427{ 428 struct rpc_task *task; 429 sigset_t oldset; 430 int status; 431 432 /* If this client is slain all further I/O fails */ 433 if (clnt->cl_dead) 434 return -EIO; 435 436 BUG_ON(flags & RPC_TASK_ASYNC); 437 438 status = -ENOMEM; 439 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 440 if (task == NULL) 441 goto out; 442 443 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 444 rpc_task_sigmask(task, &oldset); 445 446 rpc_call_setup(task, msg, 0); 447 448 /* Set up the call info struct and execute the task */ 449 status = task->tk_status; 450 if (status == 0) { 451 atomic_inc(&task->tk_count); 452 status = rpc_execute(task); 453 if (status == 0) 454 status = task->tk_status; 455 } 456 rpc_restore_sigmask(&oldset); 457 rpc_release_task(task); 458out: 459 return status; 460} 461 462/* 463 * New rpc_call implementation 464 */ 465int 466rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 467 const struct rpc_call_ops *tk_ops, void *data) 468{ 469 struct rpc_task *task; 470 sigset_t oldset; 471 int status; 472 473 /* If this client is slain all further I/O fails */ 474 if (clnt->cl_dead) 475 return -EIO; 476 477 flags |= RPC_TASK_ASYNC; 478 479 /* Create/initialize a new RPC task */ 480 status = -ENOMEM; 481 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 482 goto out; 483 484 /* Mask signals on GSS_AUTH upcalls */ 485 rpc_task_sigmask(task, &oldset); 486 487 rpc_call_setup(task, msg, 0); 488 489 /* Set up the call info struct and execute the task */ 490 status = task->tk_status; 491 if (status == 0) 492 rpc_execute(task); 493 else 494 rpc_release_task(task); 495 496 rpc_restore_sigmask(&oldset); 497out: 498 return status; 499} 500 501 502void 503rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 504{ 505 task->tk_msg = *msg; 506 task->tk_flags |= flags; 507 /* Bind the user cred */ 508 if (task->tk_msg.rpc_cred != NULL) 509 rpcauth_holdcred(task); 510 else 511 rpcauth_bindcred(task); 512 513 if (task->tk_status == 0) 514 task->tk_action = call_start; 515 else 516 task->tk_action = rpc_exit_task; 517} 518 519void 520rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 521{ 522 struct rpc_xprt *xprt = clnt->cl_xprt; 523 if (xprt->ops->set_buffer_size) 524 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 525} 526 527/* 528 * Return size of largest payload RPC client can support, in bytes 529 * 530 * For stream transports, this is one RPC record fragment (see RFC 531 * 1831), as we don't support multi-record requests yet. For datagram 532 * transports, this is the size of an IP packet minus the IP, UDP, and 533 * RPC header sizes. 534 */ 535size_t rpc_max_payload(struct rpc_clnt *clnt) 536{ 537 return clnt->cl_xprt->max_payload; 538} 539EXPORT_SYMBOL(rpc_max_payload); 540 541/* 542 * Restart an (async) RPC call. Usually called from within the 543 * exit handler. 544 */ 545void 546rpc_restart_call(struct rpc_task *task) 547{ 548 if (RPC_ASSASSINATED(task)) 549 return; 550 551 task->tk_action = call_start; 552} 553 554/* 555 * 0. Initial state 556 * 557 * Other FSM states can be visited zero or more times, but 558 * this state is visited exactly once for each RPC. 559 */ 560static void 561call_start(struct rpc_task *task) 562{ 563 struct rpc_clnt *clnt = task->tk_client; 564 565 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 566 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 567 (RPC_IS_ASYNC(task) ? "async" : "sync")); 568 569 /* Increment call count */ 570 task->tk_msg.rpc_proc->p_count++; 571 clnt->cl_stats->rpccnt++; 572 task->tk_action = call_reserve; 573} 574 575/* 576 * 1. Reserve an RPC call slot 577 */ 578static void 579call_reserve(struct rpc_task *task) 580{ 581 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 582 583 if (!rpcauth_uptodatecred(task)) { 584 task->tk_action = call_refresh; 585 return; 586 } 587 588 task->tk_status = 0; 589 task->tk_action = call_reserveresult; 590 xprt_reserve(task); 591} 592 593/* 594 * 1b. Grok the result of xprt_reserve() 595 */ 596static void 597call_reserveresult(struct rpc_task *task) 598{ 599 int status = task->tk_status; 600 601 dprintk("RPC: %4d call_reserveresult (status %d)\n", 602 task->tk_pid, task->tk_status); 603 604 /* 605 * After a call to xprt_reserve(), we must have either 606 * a request slot or else an error status. 607 */ 608 task->tk_status = 0; 609 if (status >= 0) { 610 if (task->tk_rqstp) { 611 task->tk_action = call_allocate; 612 return; 613 } 614 615 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 616 __FUNCTION__, status); 617 rpc_exit(task, -EIO); 618 return; 619 } 620 621 /* 622 * Even though there was an error, we may have acquired 623 * a request slot somehow. Make sure not to leak it. 624 */ 625 if (task->tk_rqstp) { 626 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 627 __FUNCTION__, status); 628 xprt_release(task); 629 } 630 631 switch (status) { 632 case -EAGAIN: /* woken up; retry */ 633 task->tk_action = call_reserve; 634 return; 635 case -EIO: /* probably a shutdown */ 636 break; 637 default: 638 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 639 __FUNCTION__, status); 640 break; 641 } 642 rpc_exit(task, status); 643} 644 645/* 646 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 647 * (Note: buffer memory is freed in rpc_task_release). 648 */ 649static void 650call_allocate(struct rpc_task *task) 651{ 652 unsigned int bufsiz; 653 654 dprintk("RPC: %4d call_allocate (status %d)\n", 655 task->tk_pid, task->tk_status); 656 task->tk_action = call_bind; 657 if (task->tk_buffer) 658 return; 659 660 /* FIXME: compute buffer requirements more exactly using 661 * auth->au_wslack */ 662 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 663 664 if (rpc_malloc(task, bufsiz << 1) != NULL) 665 return; 666 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 667 668 if (RPC_IS_ASYNC(task) || !signalled()) { 669 xprt_release(task); 670 task->tk_action = call_reserve; 671 rpc_delay(task, HZ>>4); 672 return; 673 } 674 675 rpc_exit(task, -ERESTARTSYS); 676} 677 678static inline int 679rpc_task_need_encode(struct rpc_task *task) 680{ 681 return task->tk_rqstp->rq_snd_buf.len == 0; 682} 683 684static inline void 685rpc_task_force_reencode(struct rpc_task *task) 686{ 687 task->tk_rqstp->rq_snd_buf.len = 0; 688} 689 690/* 691 * 3. Encode arguments of an RPC call 692 */ 693static void 694call_encode(struct rpc_task *task) 695{ 696 struct rpc_rqst *req = task->tk_rqstp; 697 struct xdr_buf *sndbuf = &req->rq_snd_buf; 698 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 699 unsigned int bufsiz; 700 kxdrproc_t encode; 701 u32 *p; 702 703 dprintk("RPC: %4d call_encode (status %d)\n", 704 task->tk_pid, task->tk_status); 705 706 /* Default buffer setup */ 707 bufsiz = task->tk_bufsize >> 1; 708 sndbuf->head[0].iov_base = (void *)task->tk_buffer; 709 sndbuf->head[0].iov_len = bufsiz; 710 sndbuf->tail[0].iov_len = 0; 711 sndbuf->page_len = 0; 712 sndbuf->len = 0; 713 sndbuf->buflen = bufsiz; 714 rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz); 715 rcvbuf->head[0].iov_len = bufsiz; 716 rcvbuf->tail[0].iov_len = 0; 717 rcvbuf->page_len = 0; 718 rcvbuf->len = 0; 719 rcvbuf->buflen = bufsiz; 720 721 /* Encode header and provided arguments */ 722 encode = task->tk_msg.rpc_proc->p_encode; 723 if (!(p = call_header(task))) { 724 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 725 rpc_exit(task, -EIO); 726 return; 727 } 728 if (encode == NULL) 729 return; 730 731 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 732 task->tk_msg.rpc_argp); 733 if (task->tk_status == -ENOMEM) { 734 /* XXX: Is this sane? */ 735 rpc_delay(task, 3*HZ); 736 task->tk_status = -EAGAIN; 737 } 738} 739 740/* 741 * 4. Get the server port number if not yet set 742 */ 743static void 744call_bind(struct rpc_task *task) 745{ 746 struct rpc_clnt *clnt = task->tk_client; 747 748 dprintk("RPC: %4d call_bind (status %d)\n", 749 task->tk_pid, task->tk_status); 750 751 task->tk_action = call_connect; 752 if (!clnt->cl_port) { 753 task->tk_action = call_bind_status; 754 task->tk_timeout = task->tk_xprt->bind_timeout; 755 rpc_getport(task, clnt); 756 } 757} 758 759/* 760 * 4a. Sort out bind result 761 */ 762static void 763call_bind_status(struct rpc_task *task) 764{ 765 int status = -EACCES; 766 767 if (task->tk_status >= 0) { 768 dprintk("RPC: %4d call_bind_status (status %d)\n", 769 task->tk_pid, task->tk_status); 770 task->tk_status = 0; 771 task->tk_action = call_connect; 772 return; 773 } 774 775 switch (task->tk_status) { 776 case -EACCES: 777 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 778 task->tk_pid); 779 rpc_delay(task, 3*HZ); 780 goto retry_bind; 781 case -ETIMEDOUT: 782 dprintk("RPC: %4d rpcbind request timed out\n", 783 task->tk_pid); 784 if (RPC_IS_SOFT(task)) { 785 status = -EIO; 786 break; 787 } 788 goto retry_bind; 789 case -EPFNOSUPPORT: 790 dprintk("RPC: %4d remote rpcbind service unavailable\n", 791 task->tk_pid); 792 break; 793 case -EPROTONOSUPPORT: 794 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 795 task->tk_pid); 796 break; 797 default: 798 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 799 task->tk_pid, -task->tk_status); 800 status = -EIO; 801 break; 802 } 803 804 rpc_exit(task, status); 805 return; 806 807retry_bind: 808 task->tk_status = 0; 809 task->tk_action = call_bind; 810 return; 811} 812 813/* 814 * 4b. Connect to the RPC server 815 */ 816static void 817call_connect(struct rpc_task *task) 818{ 819 struct rpc_xprt *xprt = task->tk_xprt; 820 821 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 822 task->tk_pid, xprt, 823 (xprt_connected(xprt) ? "is" : "is not")); 824 825 task->tk_action = call_transmit; 826 if (!xprt_connected(xprt)) { 827 task->tk_action = call_connect_status; 828 if (task->tk_status < 0) 829 return; 830 xprt_connect(task); 831 } 832} 833 834/* 835 * 4c. Sort out connect result 836 */ 837static void 838call_connect_status(struct rpc_task *task) 839{ 840 struct rpc_clnt *clnt = task->tk_client; 841 int status = task->tk_status; 842 843 dprintk("RPC: %5u call_connect_status (status %d)\n", 844 task->tk_pid, task->tk_status); 845 846 task->tk_status = 0; 847 if (status >= 0) { 848 clnt->cl_stats->netreconn++; 849 task->tk_action = call_transmit; 850 return; 851 } 852 853 /* Something failed: remote service port may have changed */ 854 if (clnt->cl_autobind) 855 clnt->cl_port = 0; 856 857 switch (status) { 858 case -ENOTCONN: 859 case -ETIMEDOUT: 860 case -EAGAIN: 861 task->tk_action = call_bind; 862 break; 863 default: 864 rpc_exit(task, -EIO); 865 break; 866 } 867} 868 869/* 870 * 5. Transmit the RPC request, and wait for reply 871 */ 872static void 873call_transmit(struct rpc_task *task) 874{ 875 dprintk("RPC: %4d call_transmit (status %d)\n", 876 task->tk_pid, task->tk_status); 877 878 task->tk_action = call_status; 879 if (task->tk_status < 0) 880 return; 881 task->tk_status = xprt_prepare_transmit(task); 882 if (task->tk_status != 0) 883 return; 884 /* Encode here so that rpcsec_gss can use correct sequence number. */ 885 if (rpc_task_need_encode(task)) { 886 task->tk_rqstp->rq_bytes_sent = 0; 887 call_encode(task); 888 /* Did the encode result in an error condition? */ 889 if (task->tk_status != 0) 890 goto out_nosend; 891 } 892 task->tk_action = call_transmit_status; 893 xprt_transmit(task); 894 if (task->tk_status < 0) 895 return; 896 if (!task->tk_msg.rpc_proc->p_decode) { 897 task->tk_action = rpc_exit_task; 898 rpc_wake_up_task(task); 899 } 900 return; 901out_nosend: 902 /* release socket write lock before attempting to handle error */ 903 xprt_abort_transmit(task); 904 rpc_task_force_reencode(task); 905} 906 907/* 908 * 6. Sort out the RPC call status 909 */ 910static void 911call_status(struct rpc_task *task) 912{ 913 struct rpc_clnt *clnt = task->tk_client; 914 struct rpc_rqst *req = task->tk_rqstp; 915 int status; 916 917 if (req->rq_received > 0 && !req->rq_bytes_sent) 918 task->tk_status = req->rq_received; 919 920 dprintk("RPC: %4d call_status (status %d)\n", 921 task->tk_pid, task->tk_status); 922 923 status = task->tk_status; 924 if (status >= 0) { 925 task->tk_action = call_decode; 926 return; 927 } 928 929 task->tk_status = 0; 930 switch(status) { 931 case -ETIMEDOUT: 932 task->tk_action = call_timeout; 933 break; 934 case -ECONNREFUSED: 935 case -ENOTCONN: 936 if (clnt->cl_autobind) 937 clnt->cl_port = 0; 938 task->tk_action = call_bind; 939 break; 940 case -EAGAIN: 941 task->tk_action = call_transmit; 942 break; 943 case -EIO: 944 /* shutdown or soft timeout */ 945 rpc_exit(task, status); 946 break; 947 default: 948 if (clnt->cl_chatty) 949 printk("%s: RPC call returned error %d\n", 950 clnt->cl_protname, -status); 951 rpc_exit(task, status); 952 break; 953 } 954} 955 956/* 957 * 6a. Handle transmission errors. 958 */ 959static void 960call_transmit_status(struct rpc_task *task) 961{ 962 if (task->tk_status != -EAGAIN) 963 rpc_task_force_reencode(task); 964 call_status(task); 965} 966 967/* 968 * 6b. Handle RPC timeout 969 * We do not release the request slot, so we keep using the 970 * same XID for all retransmits. 971 */ 972static void 973call_timeout(struct rpc_task *task) 974{ 975 struct rpc_clnt *clnt = task->tk_client; 976 977 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 978 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 979 goto retry; 980 } 981 982 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 983 if (RPC_IS_SOFT(task)) { 984 if (clnt->cl_chatty) 985 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 986 clnt->cl_protname, clnt->cl_server); 987 rpc_exit(task, -EIO); 988 return; 989 } 990 991 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) { 992 task->tk_flags |= RPC_CALL_MAJORSEEN; 993 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 994 clnt->cl_protname, clnt->cl_server); 995 } 996 if (clnt->cl_autobind) 997 clnt->cl_port = 0; 998 999retry: 1000 clnt->cl_stats->rpcretrans++; 1001 task->tk_action = call_bind; 1002 task->tk_status = 0; 1003} 1004 1005/* 1006 * 7. Decode the RPC reply 1007 */ 1008static void 1009call_decode(struct rpc_task *task) 1010{ 1011 struct rpc_clnt *clnt = task->tk_client; 1012 struct rpc_rqst *req = task->tk_rqstp; 1013 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1014 u32 *p; 1015 1016 dprintk("RPC: %4d call_decode (status %d)\n", 1017 task->tk_pid, task->tk_status); 1018 1019 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) { 1020 printk(KERN_NOTICE "%s: server %s OK\n", 1021 clnt->cl_protname, clnt->cl_server); 1022 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1023 } 1024 1025 if (task->tk_status < 12) { 1026 if (!RPC_IS_SOFT(task)) { 1027 task->tk_action = call_bind; 1028 clnt->cl_stats->rpcretrans++; 1029 goto out_retry; 1030 } 1031 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1032 clnt->cl_protname, task->tk_status); 1033 rpc_exit(task, -EIO); 1034 return; 1035 } 1036 1037 req->rq_rcv_buf.len = req->rq_private_buf.len; 1038 1039 /* Check that the softirq receive buffer is valid */ 1040 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1041 sizeof(req->rq_rcv_buf)) != 0); 1042 1043 /* Verify the RPC header */ 1044 p = call_verify(task); 1045 if (IS_ERR(p)) { 1046 if (p == ERR_PTR(-EAGAIN)) 1047 goto out_retry; 1048 return; 1049 } 1050 1051 task->tk_action = rpc_exit_task; 1052 1053 if (decode) 1054 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1055 task->tk_msg.rpc_resp); 1056 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1057 task->tk_status); 1058 return; 1059out_retry: 1060 req->rq_received = req->rq_private_buf.len = 0; 1061 task->tk_status = 0; 1062} 1063 1064/* 1065 * 8. Refresh the credentials if rejected by the server 1066 */ 1067static void 1068call_refresh(struct rpc_task *task) 1069{ 1070 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1071 1072 xprt_release(task); /* Must do to obtain new XID */ 1073 task->tk_action = call_refreshresult; 1074 task->tk_status = 0; 1075 task->tk_client->cl_stats->rpcauthrefresh++; 1076 rpcauth_refreshcred(task); 1077} 1078 1079/* 1080 * 8a. Process the results of a credential refresh 1081 */ 1082static void 1083call_refreshresult(struct rpc_task *task) 1084{ 1085 int status = task->tk_status; 1086 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1087 task->tk_pid, task->tk_status); 1088 1089 task->tk_status = 0; 1090 task->tk_action = call_reserve; 1091 if (status >= 0 && rpcauth_uptodatecred(task)) 1092 return; 1093 if (status == -EACCES) { 1094 rpc_exit(task, -EACCES); 1095 return; 1096 } 1097 task->tk_action = call_refresh; 1098 if (status != -ETIMEDOUT) 1099 rpc_delay(task, 3*HZ); 1100 return; 1101} 1102 1103/* 1104 * Call header serialization 1105 */ 1106static u32 * 1107call_header(struct rpc_task *task) 1108{ 1109 struct rpc_clnt *clnt = task->tk_client; 1110 struct rpc_rqst *req = task->tk_rqstp; 1111 u32 *p = req->rq_svec[0].iov_base; 1112 1113 /* FIXME: check buffer size? */ 1114 1115 p = xprt_skip_transport_header(task->tk_xprt, p); 1116 *p++ = req->rq_xid; /* XID */ 1117 *p++ = htonl(RPC_CALL); /* CALL */ 1118 *p++ = htonl(RPC_VERSION); /* RPC version */ 1119 *p++ = htonl(clnt->cl_prog); /* program number */ 1120 *p++ = htonl(clnt->cl_vers); /* program version */ 1121 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1122 p = rpcauth_marshcred(task, p); 1123 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1124 return p; 1125} 1126 1127/* 1128 * Reply header verification 1129 */ 1130static u32 * 1131call_verify(struct rpc_task *task) 1132{ 1133 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1134 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1135 u32 *p = iov->iov_base, n; 1136 int error = -EACCES; 1137 1138 if ((len -= 3) < 0) 1139 goto out_overflow; 1140 p += 1; /* skip XID */ 1141 1142 if ((n = ntohl(*p++)) != RPC_REPLY) { 1143 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1144 goto out_garbage; 1145 } 1146 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1147 if (--len < 0) 1148 goto out_overflow; 1149 switch ((n = ntohl(*p++))) { 1150 case RPC_AUTH_ERROR: 1151 break; 1152 case RPC_MISMATCH: 1153 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1154 error = -EPROTONOSUPPORT; 1155 goto out_err; 1156 default: 1157 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1158 goto out_eio; 1159 } 1160 if (--len < 0) 1161 goto out_overflow; 1162 switch ((n = ntohl(*p++))) { 1163 case RPC_AUTH_REJECTEDCRED: 1164 case RPC_AUTH_REJECTEDVERF: 1165 case RPCSEC_GSS_CREDPROBLEM: 1166 case RPCSEC_GSS_CTXPROBLEM: 1167 if (!task->tk_cred_retry) 1168 break; 1169 task->tk_cred_retry--; 1170 dprintk("RPC: %4d call_verify: retry stale creds\n", 1171 task->tk_pid); 1172 rpcauth_invalcred(task); 1173 task->tk_action = call_refresh; 1174 goto out_retry; 1175 case RPC_AUTH_BADCRED: 1176 case RPC_AUTH_BADVERF: 1177 /* possibly garbled cred/verf? */ 1178 if (!task->tk_garb_retry) 1179 break; 1180 task->tk_garb_retry--; 1181 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1182 task->tk_pid); 1183 task->tk_action = call_bind; 1184 goto out_retry; 1185 case RPC_AUTH_TOOWEAK: 1186 printk(KERN_NOTICE "call_verify: server requires stronger " 1187 "authentication.\n"); 1188 break; 1189 default: 1190 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1191 error = -EIO; 1192 } 1193 dprintk("RPC: %4d call_verify: call rejected %d\n", 1194 task->tk_pid, n); 1195 goto out_err; 1196 } 1197 if (!(p = rpcauth_checkverf(task, p))) { 1198 printk(KERN_WARNING "call_verify: auth check failed\n"); 1199 goto out_garbage; /* bad verifier, retry */ 1200 } 1201 len = p - (u32 *)iov->iov_base - 1; 1202 if (len < 0) 1203 goto out_overflow; 1204 switch ((n = ntohl(*p++))) { 1205 case RPC_SUCCESS: 1206 return p; 1207 case RPC_PROG_UNAVAIL: 1208 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1209 (unsigned int)task->tk_client->cl_prog, 1210 task->tk_client->cl_server); 1211 error = -EPFNOSUPPORT; 1212 goto out_err; 1213 case RPC_PROG_MISMATCH: 1214 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1215 (unsigned int)task->tk_client->cl_prog, 1216 (unsigned int)task->tk_client->cl_vers, 1217 task->tk_client->cl_server); 1218 error = -EPROTONOSUPPORT; 1219 goto out_err; 1220 case RPC_PROC_UNAVAIL: 1221 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1222 task->tk_msg.rpc_proc, 1223 task->tk_client->cl_prog, 1224 task->tk_client->cl_vers, 1225 task->tk_client->cl_server); 1226 error = -EOPNOTSUPP; 1227 goto out_err; 1228 case RPC_GARBAGE_ARGS: 1229 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1230 break; /* retry */ 1231 default: 1232 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1233 /* Also retry */ 1234 } 1235 1236out_garbage: 1237 task->tk_client->cl_stats->rpcgarbage++; 1238 if (task->tk_garb_retry) { 1239 task->tk_garb_retry--; 1240 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1241 task->tk_action = call_bind; 1242out_retry: 1243 return ERR_PTR(-EAGAIN); 1244 } 1245 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1246out_eio: 1247 error = -EIO; 1248out_err: 1249 rpc_exit(task, error); 1250 return ERR_PTR(error); 1251out_overflow: 1252 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1253 goto out_garbage; 1254} 1255 1256static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1257{ 1258 return 0; 1259} 1260 1261static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1262{ 1263 return 0; 1264} 1265 1266static struct rpc_procinfo rpcproc_null = { 1267 .p_encode = rpcproc_encode_null, 1268 .p_decode = rpcproc_decode_null, 1269}; 1270 1271int rpc_ping(struct rpc_clnt *clnt, int flags) 1272{ 1273 struct rpc_message msg = { 1274 .rpc_proc = &rpcproc_null, 1275 }; 1276 int err; 1277 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1278 err = rpc_call_sync(clnt, &msg, flags); 1279 put_rpccred(msg.rpc_cred); 1280 return err; 1281} 1282