xprt.c revision 7c1d71cf56feebfb5b98219b9d11dfc3a2feca62
1/* 2 * linux/net/sunrpc/xprt.c 3 * 4 * This is a generic RPC call interface supporting congestion avoidance, 5 * and asynchronous calls. 6 * 7 * The interface works like this: 8 * 9 * - When a process places a call, it allocates a request slot if 10 * one is available. Otherwise, it sleeps on the backlog queue 11 * (xprt_reserve). 12 * - Next, the caller puts together the RPC message, stuffs it into 13 * the request struct, and calls xprt_transmit(). 14 * - xprt_transmit sends the message and installs the caller on the 15 * transport's wait list. At the same time, it installs a timer that 16 * is run after the packet's timeout has expired. 17 * - When a packet arrives, the data_ready handler walks the list of 18 * pending requests for that transport. If a matching XID is found, the 19 * caller is woken up, and the timer removed. 20 * - When no reply arrives within the timeout interval, the timer is 21 * fired by the kernel and runs xprt_timer(). It either adjusts the 22 * timeout values (minor timeout) or wakes up the caller with a status 23 * of -ETIMEDOUT. 24 * - When the caller receives a notification from RPC that a reply arrived, 25 * it should release the RPC slot, and process the reply. 26 * If the call timed out, it may choose to retry the operation by 27 * adjusting the initial timeout value, and simply calling rpc_call 28 * again. 29 * 30 * Support for async RPC is done through a set of RPC-specific scheduling 31 * primitives that `transparently' work for processes as well as async 32 * tasks that rely on callbacks. 33 * 34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 35 * 36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 37 */ 38 39#include <linux/module.h> 40 41#include <linux/types.h> 42#include <linux/interrupt.h> 43#include <linux/workqueue.h> 44#include <linux/net.h> 45 46#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/metrics.h> 48 49/* 50 * Local variables 51 */ 52 53#ifdef RPC_DEBUG 54# define RPCDBG_FACILITY RPCDBG_XPRT 55#endif 56 57/* 58 * Local functions 59 */ 60static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 61static inline void do_xprt_reserve(struct rpc_task *); 62static void xprt_connect_status(struct rpc_task *task); 63static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 64 65static DEFINE_SPINLOCK(xprt_list_lock); 66static LIST_HEAD(xprt_list); 67 68/* 69 * The transport code maintains an estimate on the maximum number of out- 70 * standing RPC requests, using a smoothed version of the congestion 71 * avoidance implemented in 44BSD. This is basically the Van Jacobson 72 * congestion algorithm: If a retransmit occurs, the congestion window is 73 * halved; otherwise, it is incremented by 1/cwnd when 74 * 75 * - a reply is received and 76 * - a full number of requests are outstanding and 77 * - the congestion window hasn't been updated recently. 78 */ 79#define RPC_CWNDSHIFT (8U) 80#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) 81#define RPC_INITCWND RPC_CWNDSCALE 82#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 83 84#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 85 86/** 87 * xprt_register_transport - register a transport implementation 88 * @transport: transport to register 89 * 90 * If a transport implementation is loaded as a kernel module, it can 91 * call this interface to make itself known to the RPC client. 92 * 93 * Returns: 94 * 0: transport successfully registered 95 * -EEXIST: transport already registered 96 * -EINVAL: transport module being unloaded 97 */ 98int xprt_register_transport(struct xprt_class *transport) 99{ 100 struct xprt_class *t; 101 int result; 102 103 result = -EEXIST; 104 spin_lock(&xprt_list_lock); 105 list_for_each_entry(t, &xprt_list, list) { 106 /* don't register the same transport class twice */ 107 if (t->ident == transport->ident) 108 goto out; 109 } 110 111 result = -EINVAL; 112 if (try_module_get(THIS_MODULE)) { 113 list_add_tail(&transport->list, &xprt_list); 114 printk(KERN_INFO "RPC: Registered %s transport module.\n", 115 transport->name); 116 result = 0; 117 } 118 119out: 120 spin_unlock(&xprt_list_lock); 121 return result; 122} 123EXPORT_SYMBOL_GPL(xprt_register_transport); 124 125/** 126 * xprt_unregister_transport - unregister a transport implementation 127 * @transport: transport to unregister 128 * 129 * Returns: 130 * 0: transport successfully unregistered 131 * -ENOENT: transport never registered 132 */ 133int xprt_unregister_transport(struct xprt_class *transport) 134{ 135 struct xprt_class *t; 136 int result; 137 138 result = 0; 139 spin_lock(&xprt_list_lock); 140 list_for_each_entry(t, &xprt_list, list) { 141 if (t == transport) { 142 printk(KERN_INFO 143 "RPC: Unregistered %s transport module.\n", 144 transport->name); 145 list_del_init(&transport->list); 146 module_put(THIS_MODULE); 147 goto out; 148 } 149 } 150 result = -ENOENT; 151 152out: 153 spin_unlock(&xprt_list_lock); 154 return result; 155} 156EXPORT_SYMBOL_GPL(xprt_unregister_transport); 157 158/** 159 * xprt_reserve_xprt - serialize write access to transports 160 * @task: task that is requesting access to the transport 161 * 162 * This prevents mixing the payload of separate requests, and prevents 163 * transport connects from colliding with writes. No congestion control 164 * is provided. 165 */ 166int xprt_reserve_xprt(struct rpc_task *task) 167{ 168 struct rpc_xprt *xprt = task->tk_xprt; 169 struct rpc_rqst *req = task->tk_rqstp; 170 171 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 172 if (task == xprt->snd_task) 173 return 1; 174 if (task == NULL) 175 return 0; 176 goto out_sleep; 177 } 178 xprt->snd_task = task; 179 if (req) { 180 req->rq_bytes_sent = 0; 181 req->rq_ntrans++; 182 } 183 return 1; 184 185out_sleep: 186 dprintk("RPC: %5u failed to lock transport %p\n", 187 task->tk_pid, xprt); 188 task->tk_timeout = 0; 189 task->tk_status = -EAGAIN; 190 if (req && req->rq_ntrans) 191 rpc_sleep_on(&xprt->resend, task, NULL); 192 else 193 rpc_sleep_on(&xprt->sending, task, NULL); 194 return 0; 195} 196EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 197 198static void xprt_clear_locked(struct rpc_xprt *xprt) 199{ 200 xprt->snd_task = NULL; 201 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { 202 smp_mb__before_clear_bit(); 203 clear_bit(XPRT_LOCKED, &xprt->state); 204 smp_mb__after_clear_bit(); 205 } else 206 queue_work(rpciod_workqueue, &xprt->task_cleanup); 207} 208 209/* 210 * xprt_reserve_xprt_cong - serialize write access to transports 211 * @task: task that is requesting access to the transport 212 * 213 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 214 * integrated into the decision of whether a request is allowed to be 215 * woken up and given access to the transport. 216 */ 217int xprt_reserve_xprt_cong(struct rpc_task *task) 218{ 219 struct rpc_xprt *xprt = task->tk_xprt; 220 struct rpc_rqst *req = task->tk_rqstp; 221 222 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 223 if (task == xprt->snd_task) 224 return 1; 225 goto out_sleep; 226 } 227 if (__xprt_get_cong(xprt, task)) { 228 xprt->snd_task = task; 229 if (req) { 230 req->rq_bytes_sent = 0; 231 req->rq_ntrans++; 232 } 233 return 1; 234 } 235 xprt_clear_locked(xprt); 236out_sleep: 237 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 238 task->tk_timeout = 0; 239 task->tk_status = -EAGAIN; 240 if (req && req->rq_ntrans) 241 rpc_sleep_on(&xprt->resend, task, NULL); 242 else 243 rpc_sleep_on(&xprt->sending, task, NULL); 244 return 0; 245} 246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 247 248static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 249{ 250 int retval; 251 252 spin_lock_bh(&xprt->transport_lock); 253 retval = xprt->ops->reserve_xprt(task); 254 spin_unlock_bh(&xprt->transport_lock); 255 return retval; 256} 257 258static void __xprt_lock_write_next(struct rpc_xprt *xprt) 259{ 260 struct rpc_task *task; 261 struct rpc_rqst *req; 262 263 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 264 return; 265 266 task = rpc_wake_up_next(&xprt->resend); 267 if (!task) { 268 task = rpc_wake_up_next(&xprt->sending); 269 if (!task) 270 goto out_unlock; 271 } 272 273 req = task->tk_rqstp; 274 xprt->snd_task = task; 275 if (req) { 276 req->rq_bytes_sent = 0; 277 req->rq_ntrans++; 278 } 279 return; 280 281out_unlock: 282 xprt_clear_locked(xprt); 283} 284 285static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 286{ 287 struct rpc_task *task; 288 289 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 290 return; 291 if (RPCXPRT_CONGESTED(xprt)) 292 goto out_unlock; 293 task = rpc_wake_up_next(&xprt->resend); 294 if (!task) { 295 task = rpc_wake_up_next(&xprt->sending); 296 if (!task) 297 goto out_unlock; 298 } 299 if (__xprt_get_cong(xprt, task)) { 300 struct rpc_rqst *req = task->tk_rqstp; 301 xprt->snd_task = task; 302 if (req) { 303 req->rq_bytes_sent = 0; 304 req->rq_ntrans++; 305 } 306 return; 307 } 308out_unlock: 309 xprt_clear_locked(xprt); 310} 311 312/** 313 * xprt_release_xprt - allow other requests to use a transport 314 * @xprt: transport with other tasks potentially waiting 315 * @task: task that is releasing access to the transport 316 * 317 * Note that "task" can be NULL. No congestion control is provided. 318 */ 319void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 320{ 321 if (xprt->snd_task == task) { 322 xprt_clear_locked(xprt); 323 __xprt_lock_write_next(xprt); 324 } 325} 326EXPORT_SYMBOL_GPL(xprt_release_xprt); 327 328/** 329 * xprt_release_xprt_cong - allow other requests to use a transport 330 * @xprt: transport with other tasks potentially waiting 331 * @task: task that is releasing access to the transport 332 * 333 * Note that "task" can be NULL. Another task is awoken to use the 334 * transport if the transport's congestion window allows it. 335 */ 336void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 337{ 338 if (xprt->snd_task == task) { 339 xprt_clear_locked(xprt); 340 __xprt_lock_write_next_cong(xprt); 341 } 342} 343EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 344 345static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 346{ 347 spin_lock_bh(&xprt->transport_lock); 348 xprt->ops->release_xprt(xprt, task); 349 spin_unlock_bh(&xprt->transport_lock); 350} 351 352/* 353 * Van Jacobson congestion avoidance. Check if the congestion window 354 * overflowed. Put the task to sleep if this is the case. 355 */ 356static int 357__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) 358{ 359 struct rpc_rqst *req = task->tk_rqstp; 360 361 if (req->rq_cong) 362 return 1; 363 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", 364 task->tk_pid, xprt->cong, xprt->cwnd); 365 if (RPCXPRT_CONGESTED(xprt)) 366 return 0; 367 req->rq_cong = 1; 368 xprt->cong += RPC_CWNDSCALE; 369 return 1; 370} 371 372/* 373 * Adjust the congestion window, and wake up the next task 374 * that has been sleeping due to congestion 375 */ 376static void 377__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 378{ 379 if (!req->rq_cong) 380 return; 381 req->rq_cong = 0; 382 xprt->cong -= RPC_CWNDSCALE; 383 __xprt_lock_write_next_cong(xprt); 384} 385 386/** 387 * xprt_release_rqst_cong - housekeeping when request is complete 388 * @task: RPC request that recently completed 389 * 390 * Useful for transports that require congestion control. 391 */ 392void xprt_release_rqst_cong(struct rpc_task *task) 393{ 394 __xprt_put_cong(task->tk_xprt, task->tk_rqstp); 395} 396EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 397 398/** 399 * xprt_adjust_cwnd - adjust transport congestion window 400 * @task: recently completed RPC request used to adjust window 401 * @result: result code of completed RPC request 402 * 403 * We use a time-smoothed congestion estimator to avoid heavy oscillation. 404 */ 405void xprt_adjust_cwnd(struct rpc_task *task, int result) 406{ 407 struct rpc_rqst *req = task->tk_rqstp; 408 struct rpc_xprt *xprt = task->tk_xprt; 409 unsigned long cwnd = xprt->cwnd; 410 411 if (result >= 0 && cwnd <= xprt->cong) { 412 /* The (cwnd >> 1) term makes sure 413 * the result gets rounded properly. */ 414 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 415 if (cwnd > RPC_MAXCWND(xprt)) 416 cwnd = RPC_MAXCWND(xprt); 417 __xprt_lock_write_next_cong(xprt); 418 } else if (result == -ETIMEDOUT) { 419 cwnd >>= 1; 420 if (cwnd < RPC_CWNDSCALE) 421 cwnd = RPC_CWNDSCALE; 422 } 423 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 424 xprt->cong, xprt->cwnd, cwnd); 425 xprt->cwnd = cwnd; 426 __xprt_put_cong(xprt, req); 427} 428EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 429 430/** 431 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 432 * @xprt: transport with waiting tasks 433 * @status: result code to plant in each task before waking it 434 * 435 */ 436void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 437{ 438 if (status < 0) 439 rpc_wake_up_status(&xprt->pending, status); 440 else 441 rpc_wake_up(&xprt->pending); 442} 443EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 444 445/** 446 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 447 * @task: task to be put to sleep 448 * 449 */ 450void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) 451{ 452 struct rpc_rqst *req = task->tk_rqstp; 453 struct rpc_xprt *xprt = req->rq_xprt; 454 455 task->tk_timeout = req->rq_timeout; 456 rpc_sleep_on(&xprt->pending, task, action); 457} 458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 459 460/** 461 * xprt_write_space - wake the task waiting for transport output buffer space 462 * @xprt: transport with waiting tasks 463 * 464 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 465 */ 466void xprt_write_space(struct rpc_xprt *xprt) 467{ 468 if (unlikely(xprt->shutdown)) 469 return; 470 471 spin_lock_bh(&xprt->transport_lock); 472 if (xprt->snd_task) { 473 dprintk("RPC: write space: waking waiting task on " 474 "xprt %p\n", xprt); 475 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); 476 } 477 spin_unlock_bh(&xprt->transport_lock); 478} 479EXPORT_SYMBOL_GPL(xprt_write_space); 480 481/** 482 * xprt_set_retrans_timeout_def - set a request's retransmit timeout 483 * @task: task whose timeout is to be set 484 * 485 * Set a request's retransmit timeout based on the transport's 486 * default timeout parameters. Used by transports that don't adjust 487 * the retransmit timeout based on round-trip time estimation. 488 */ 489void xprt_set_retrans_timeout_def(struct rpc_task *task) 490{ 491 task->tk_timeout = task->tk_rqstp->rq_timeout; 492} 493EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 494 495/* 496 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 497 * @task: task whose timeout is to be set 498 * 499 * Set a request's retransmit timeout using the RTT estimator. 500 */ 501void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 502{ 503 int timer = task->tk_msg.rpc_proc->p_timer; 504 struct rpc_clnt *clnt = task->tk_client; 505 struct rpc_rtt *rtt = clnt->cl_rtt; 506 struct rpc_rqst *req = task->tk_rqstp; 507 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 508 509 task->tk_timeout = rpc_calc_rto(rtt, timer); 510 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 511 if (task->tk_timeout > max_timeout || task->tk_timeout == 0) 512 task->tk_timeout = max_timeout; 513} 514EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt); 515 516static void xprt_reset_majortimeo(struct rpc_rqst *req) 517{ 518 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 519 520 req->rq_majortimeo = req->rq_timeout; 521 if (to->to_exponential) 522 req->rq_majortimeo <<= to->to_retries; 523 else 524 req->rq_majortimeo += to->to_increment * to->to_retries; 525 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) 526 req->rq_majortimeo = to->to_maxval; 527 req->rq_majortimeo += jiffies; 528} 529 530/** 531 * xprt_adjust_timeout - adjust timeout values for next retransmit 532 * @req: RPC request containing parameters to use for the adjustment 533 * 534 */ 535int xprt_adjust_timeout(struct rpc_rqst *req) 536{ 537 struct rpc_xprt *xprt = req->rq_xprt; 538 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 539 int status = 0; 540 541 if (time_before(jiffies, req->rq_majortimeo)) { 542 if (to->to_exponential) 543 req->rq_timeout <<= 1; 544 else 545 req->rq_timeout += to->to_increment; 546 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 547 req->rq_timeout = to->to_maxval; 548 req->rq_retries++; 549 } else { 550 req->rq_timeout = to->to_initval; 551 req->rq_retries = 0; 552 xprt_reset_majortimeo(req); 553 /* Reset the RTT counters == "slow start" */ 554 spin_lock_bh(&xprt->transport_lock); 555 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 556 spin_unlock_bh(&xprt->transport_lock); 557 status = -ETIMEDOUT; 558 } 559 560 if (req->rq_timeout == 0) { 561 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 562 req->rq_timeout = 5 * HZ; 563 } 564 return status; 565} 566 567static void xprt_autoclose(struct work_struct *work) 568{ 569 struct rpc_xprt *xprt = 570 container_of(work, struct rpc_xprt, task_cleanup); 571 572 xprt->ops->close(xprt); 573 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 574 xprt_release_write(xprt, NULL); 575} 576 577/** 578 * xprt_disconnect_done - mark a transport as disconnected 579 * @xprt: transport to flag for disconnect 580 * 581 */ 582void xprt_disconnect_done(struct rpc_xprt *xprt) 583{ 584 dprintk("RPC: disconnected transport %p\n", xprt); 585 spin_lock_bh(&xprt->transport_lock); 586 xprt_clear_connected(xprt); 587 xprt_wake_pending_tasks(xprt, -ENOTCONN); 588 spin_unlock_bh(&xprt->transport_lock); 589} 590EXPORT_SYMBOL_GPL(xprt_disconnect_done); 591 592/** 593 * xprt_force_disconnect - force a transport to disconnect 594 * @xprt: transport to disconnect 595 * 596 */ 597void xprt_force_disconnect(struct rpc_xprt *xprt) 598{ 599 /* Don't race with the test_bit() in xprt_clear_locked() */ 600 spin_lock_bh(&xprt->transport_lock); 601 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 602 /* Try to schedule an autoclose RPC call */ 603 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 604 queue_work(rpciod_workqueue, &xprt->task_cleanup); 605 xprt_wake_pending_tasks(xprt, -ENOTCONN); 606 spin_unlock_bh(&xprt->transport_lock); 607} 608 609/** 610 * xprt_conditional_disconnect - force a transport to disconnect 611 * @xprt: transport to disconnect 612 * @cookie: 'connection cookie' 613 * 614 * This attempts to break the connection if and only if 'cookie' matches 615 * the current transport 'connection cookie'. It ensures that we don't 616 * try to break the connection more than once when we need to retransmit 617 * a batch of RPC requests. 618 * 619 */ 620void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 621{ 622 /* Don't race with the test_bit() in xprt_clear_locked() */ 623 spin_lock_bh(&xprt->transport_lock); 624 if (cookie != xprt->connect_cookie) 625 goto out; 626 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt)) 627 goto out; 628 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 629 /* Try to schedule an autoclose RPC call */ 630 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 631 queue_work(rpciod_workqueue, &xprt->task_cleanup); 632 xprt_wake_pending_tasks(xprt, -ENOTCONN); 633out: 634 spin_unlock_bh(&xprt->transport_lock); 635} 636 637static void 638xprt_init_autodisconnect(unsigned long data) 639{ 640 struct rpc_xprt *xprt = (struct rpc_xprt *)data; 641 642 spin_lock(&xprt->transport_lock); 643 if (!list_empty(&xprt->recv) || xprt->shutdown) 644 goto out_abort; 645 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 646 goto out_abort; 647 spin_unlock(&xprt->transport_lock); 648 if (xprt_connecting(xprt)) 649 xprt_release_write(xprt, NULL); 650 else 651 queue_work(rpciod_workqueue, &xprt->task_cleanup); 652 return; 653out_abort: 654 spin_unlock(&xprt->transport_lock); 655} 656 657/** 658 * xprt_connect - schedule a transport connect operation 659 * @task: RPC task that is requesting the connect 660 * 661 */ 662void xprt_connect(struct rpc_task *task) 663{ 664 struct rpc_xprt *xprt = task->tk_xprt; 665 666 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 667 xprt, (xprt_connected(xprt) ? "is" : "is not")); 668 669 if (!xprt_bound(xprt)) { 670 task->tk_status = -EIO; 671 return; 672 } 673 if (!xprt_lock_write(xprt, task)) 674 return; 675 if (xprt_connected(xprt)) 676 xprt_release_write(xprt, task); 677 else { 678 if (task->tk_rqstp) 679 task->tk_rqstp->rq_bytes_sent = 0; 680 681 task->tk_timeout = xprt->connect_timeout; 682 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 683 xprt->stat.connect_start = jiffies; 684 xprt->ops->connect(task); 685 } 686 return; 687} 688 689static void xprt_connect_status(struct rpc_task *task) 690{ 691 struct rpc_xprt *xprt = task->tk_xprt; 692 693 if (task->tk_status >= 0) { 694 xprt->stat.connect_count++; 695 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; 696 dprintk("RPC: %5u xprt_connect_status: connection established\n", 697 task->tk_pid); 698 return; 699 } 700 701 switch (task->tk_status) { 702 case -ECONNREFUSED: 703 case -ECONNRESET: 704 dprintk("RPC: %5u xprt_connect_status: server %s refused " 705 "connection\n", task->tk_pid, 706 task->tk_client->cl_server); 707 break; 708 case -ENOTCONN: 709 dprintk("RPC: %5u xprt_connect_status: connection broken\n", 710 task->tk_pid); 711 break; 712 case -ETIMEDOUT: 713 dprintk("RPC: %5u xprt_connect_status: connect attempt timed " 714 "out\n", task->tk_pid); 715 break; 716 default: 717 dprintk("RPC: %5u xprt_connect_status: error %d connecting to " 718 "server %s\n", task->tk_pid, -task->tk_status, 719 task->tk_client->cl_server); 720 xprt_release_write(xprt, task); 721 task->tk_status = -EIO; 722 } 723} 724 725/** 726 * xprt_lookup_rqst - find an RPC request corresponding to an XID 727 * @xprt: transport on which the original request was transmitted 728 * @xid: RPC XID of incoming reply 729 * 730 */ 731struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 732{ 733 struct list_head *pos; 734 735 list_for_each(pos, &xprt->recv) { 736 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); 737 if (entry->rq_xid == xid) 738 return entry; 739 } 740 741 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 742 ntohl(xid)); 743 xprt->stat.bad_xids++; 744 return NULL; 745} 746EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 747 748/** 749 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply 750 * @task: RPC request that recently completed 751 * 752 */ 753void xprt_update_rtt(struct rpc_task *task) 754{ 755 struct rpc_rqst *req = task->tk_rqstp; 756 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 757 unsigned timer = task->tk_msg.rpc_proc->p_timer; 758 759 if (timer) { 760 if (req->rq_ntrans == 1) 761 rpc_update_rtt(rtt, timer, 762 (long)jiffies - req->rq_xtime); 763 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 764 } 765} 766EXPORT_SYMBOL_GPL(xprt_update_rtt); 767 768/** 769 * xprt_complete_rqst - called when reply processing is complete 770 * @task: RPC request that recently completed 771 * @copied: actual number of bytes received from the transport 772 * 773 * Caller holds transport lock. 774 */ 775void xprt_complete_rqst(struct rpc_task *task, int copied) 776{ 777 struct rpc_rqst *req = task->tk_rqstp; 778 struct rpc_xprt *xprt = req->rq_xprt; 779 780 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 781 task->tk_pid, ntohl(req->rq_xid), copied); 782 783 xprt->stat.recvs++; 784 task->tk_rtt = (long)jiffies - req->rq_xtime; 785 786 list_del_init(&req->rq_list); 787 req->rq_private_buf.len = copied; 788 /* Ensure all writes are done before we update req->rq_received */ 789 smp_wmb(); 790 req->rq_received = copied; 791 rpc_wake_up_queued_task(&xprt->pending, task); 792} 793EXPORT_SYMBOL_GPL(xprt_complete_rqst); 794 795static void xprt_timer(struct rpc_task *task) 796{ 797 struct rpc_rqst *req = task->tk_rqstp; 798 struct rpc_xprt *xprt = req->rq_xprt; 799 800 if (task->tk_status != -ETIMEDOUT) 801 return; 802 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 803 804 spin_lock_bh(&xprt->transport_lock); 805 if (!req->rq_received) { 806 if (xprt->ops->timer) 807 xprt->ops->timer(task); 808 } else 809 task->tk_status = 0; 810 spin_unlock_bh(&xprt->transport_lock); 811} 812 813/** 814 * xprt_prepare_transmit - reserve the transport before sending a request 815 * @task: RPC task about to send a request 816 * 817 */ 818int xprt_prepare_transmit(struct rpc_task *task) 819{ 820 struct rpc_rqst *req = task->tk_rqstp; 821 struct rpc_xprt *xprt = req->rq_xprt; 822 int err = 0; 823 824 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 825 826 spin_lock_bh(&xprt->transport_lock); 827 if (req->rq_received && !req->rq_bytes_sent) { 828 err = req->rq_received; 829 goto out_unlock; 830 } 831 if (!xprt->ops->reserve_xprt(task)) { 832 err = -EAGAIN; 833 goto out_unlock; 834 } 835 836 if (!xprt_connected(xprt)) { 837 err = -ENOTCONN; 838 goto out_unlock; 839 } 840out_unlock: 841 spin_unlock_bh(&xprt->transport_lock); 842 return err; 843} 844 845void xprt_end_transmit(struct rpc_task *task) 846{ 847 xprt_release_write(task->tk_xprt, task); 848} 849 850/** 851 * xprt_transmit - send an RPC request on a transport 852 * @task: controlling RPC task 853 * 854 * We have to copy the iovec because sendmsg fiddles with its contents. 855 */ 856void xprt_transmit(struct rpc_task *task) 857{ 858 struct rpc_rqst *req = task->tk_rqstp; 859 struct rpc_xprt *xprt = req->rq_xprt; 860 int status; 861 862 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 863 864 if (!req->rq_received) { 865 if (list_empty(&req->rq_list)) { 866 spin_lock_bh(&xprt->transport_lock); 867 /* Update the softirq receive buffer */ 868 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 869 sizeof(req->rq_private_buf)); 870 /* Add request to the receive list */ 871 list_add_tail(&req->rq_list, &xprt->recv); 872 spin_unlock_bh(&xprt->transport_lock); 873 xprt_reset_majortimeo(req); 874 /* Turn off autodisconnect */ 875 del_singleshot_timer_sync(&xprt->timer); 876 } 877 } else if (!req->rq_bytes_sent) 878 return; 879 880 req->rq_connect_cookie = xprt->connect_cookie; 881 status = xprt->ops->send_request(task); 882 if (status == 0) { 883 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 884 spin_lock_bh(&xprt->transport_lock); 885 886 xprt->ops->set_retrans_timeout(task); 887 888 xprt->stat.sends++; 889 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 890 xprt->stat.bklog_u += xprt->backlog.qlen; 891 892 /* Don't race with disconnect */ 893 if (!xprt_connected(xprt)) 894 task->tk_status = -ENOTCONN; 895 else if (!req->rq_received) 896 rpc_sleep_on(&xprt->pending, task, xprt_timer); 897 spin_unlock_bh(&xprt->transport_lock); 898 return; 899 } 900 901 /* Note: at this point, task->tk_sleeping has not yet been set, 902 * hence there is no danger of the waking up task being put on 903 * schedq, and being picked up by a parallel run of rpciod(). 904 */ 905 task->tk_status = status; 906 if (status == -ECONNREFUSED) 907 rpc_sleep_on(&xprt->sending, task, NULL); 908} 909 910static inline void do_xprt_reserve(struct rpc_task *task) 911{ 912 struct rpc_xprt *xprt = task->tk_xprt; 913 914 task->tk_status = 0; 915 if (task->tk_rqstp) 916 return; 917 if (!list_empty(&xprt->free)) { 918 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 919 list_del_init(&req->rq_list); 920 task->tk_rqstp = req; 921 xprt_request_init(task, xprt); 922 return; 923 } 924 dprintk("RPC: waiting for request slot\n"); 925 task->tk_status = -EAGAIN; 926 task->tk_timeout = 0; 927 rpc_sleep_on(&xprt->backlog, task, NULL); 928} 929 930/** 931 * xprt_reserve - allocate an RPC request slot 932 * @task: RPC task requesting a slot allocation 933 * 934 * If no more slots are available, place the task on the transport's 935 * backlog queue. 936 */ 937void xprt_reserve(struct rpc_task *task) 938{ 939 struct rpc_xprt *xprt = task->tk_xprt; 940 941 task->tk_status = -EIO; 942 spin_lock(&xprt->reserve_lock); 943 do_xprt_reserve(task); 944 spin_unlock(&xprt->reserve_lock); 945} 946 947static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 948{ 949 return xprt->xid++; 950} 951 952static inline void xprt_init_xid(struct rpc_xprt *xprt) 953{ 954 xprt->xid = net_random(); 955} 956 957static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) 958{ 959 struct rpc_rqst *req = task->tk_rqstp; 960 961 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 962 req->rq_task = task; 963 req->rq_xprt = xprt; 964 req->rq_buffer = NULL; 965 req->rq_xid = xprt_alloc_xid(xprt); 966 req->rq_release_snd_buf = NULL; 967 xprt_reset_majortimeo(req); 968 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 969 req, ntohl(req->rq_xid)); 970} 971 972/** 973 * xprt_release - release an RPC request slot 974 * @task: task which is finished with the slot 975 * 976 */ 977void xprt_release(struct rpc_task *task) 978{ 979 struct rpc_xprt *xprt = task->tk_xprt; 980 struct rpc_rqst *req; 981 982 if (!(req = task->tk_rqstp)) 983 return; 984 rpc_count_iostats(task); 985 spin_lock_bh(&xprt->transport_lock); 986 xprt->ops->release_xprt(xprt, task); 987 if (xprt->ops->release_request) 988 xprt->ops->release_request(task); 989 if (!list_empty(&req->rq_list)) 990 list_del(&req->rq_list); 991 xprt->last_used = jiffies; 992 if (list_empty(&xprt->recv)) 993 mod_timer(&xprt->timer, 994 xprt->last_used + xprt->idle_timeout); 995 spin_unlock_bh(&xprt->transport_lock); 996 xprt->ops->buf_free(req->rq_buffer); 997 task->tk_rqstp = NULL; 998 if (req->rq_release_snd_buf) 999 req->rq_release_snd_buf(req); 1000 memset(req, 0, sizeof(*req)); /* mark unused */ 1001 1002 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1003 1004 spin_lock(&xprt->reserve_lock); 1005 list_add(&req->rq_list, &xprt->free); 1006 rpc_wake_up_next(&xprt->backlog); 1007 spin_unlock(&xprt->reserve_lock); 1008} 1009 1010/** 1011 * xprt_create_transport - create an RPC transport 1012 * @args: rpc transport creation arguments 1013 * 1014 */ 1015struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1016{ 1017 struct rpc_xprt *xprt; 1018 struct rpc_rqst *req; 1019 struct xprt_class *t; 1020 1021 spin_lock(&xprt_list_lock); 1022 list_for_each_entry(t, &xprt_list, list) { 1023 if (t->ident == args->ident) { 1024 spin_unlock(&xprt_list_lock); 1025 goto found; 1026 } 1027 } 1028 spin_unlock(&xprt_list_lock); 1029 printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident); 1030 return ERR_PTR(-EIO); 1031 1032found: 1033 xprt = t->setup(args); 1034 if (IS_ERR(xprt)) { 1035 dprintk("RPC: xprt_create_transport: failed, %ld\n", 1036 -PTR_ERR(xprt)); 1037 return xprt; 1038 } 1039 1040 kref_init(&xprt->kref); 1041 spin_lock_init(&xprt->transport_lock); 1042 spin_lock_init(&xprt->reserve_lock); 1043 1044 INIT_LIST_HEAD(&xprt->free); 1045 INIT_LIST_HEAD(&xprt->recv); 1046 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1047 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1048 (unsigned long)xprt); 1049 xprt->last_used = jiffies; 1050 xprt->cwnd = RPC_INITCWND; 1051 xprt->bind_index = 0; 1052 1053 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1054 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1055 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1056 rpc_init_wait_queue(&xprt->resend, "xprt_resend"); 1057 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1058 1059 /* initialize free list */ 1060 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) 1061 list_add(&req->rq_list, &xprt->free); 1062 1063 xprt_init_xid(xprt); 1064 1065 dprintk("RPC: created transport %p with %u slots\n", xprt, 1066 xprt->max_reqs); 1067 1068 return xprt; 1069} 1070 1071/** 1072 * xprt_destroy - destroy an RPC transport, killing off all requests. 1073 * @kref: kref for the transport to destroy 1074 * 1075 */ 1076static void xprt_destroy(struct kref *kref) 1077{ 1078 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); 1079 1080 dprintk("RPC: destroying transport %p\n", xprt); 1081 xprt->shutdown = 1; 1082 del_timer_sync(&xprt->timer); 1083 1084 rpc_destroy_wait_queue(&xprt->binding); 1085 rpc_destroy_wait_queue(&xprt->pending); 1086 rpc_destroy_wait_queue(&xprt->sending); 1087 rpc_destroy_wait_queue(&xprt->resend); 1088 rpc_destroy_wait_queue(&xprt->backlog); 1089 /* 1090 * Tear down transport state and free the rpc_xprt 1091 */ 1092 xprt->ops->destroy(xprt); 1093} 1094 1095/** 1096 * xprt_put - release a reference to an RPC transport. 1097 * @xprt: pointer to the transport 1098 * 1099 */ 1100void xprt_put(struct rpc_xprt *xprt) 1101{ 1102 kref_put(&xprt->kref, xprt_destroy); 1103} 1104 1105/** 1106 * xprt_get - return a reference to an RPC transport. 1107 * @xprt: pointer to the transport 1108 * 1109 */ 1110struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 1111{ 1112 kref_get(&xprt->kref); 1113 return xprt; 1114} 1115