svc_xprt.c revision 03cf6c9f49a8fea953d38648d016e3f46e814991
1/* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7#include <linux/sched.h> 8#include <linux/errno.h> 9#include <linux/freezer.h> 10#include <linux/kthread.h> 11#include <net/sock.h> 12#include <linux/sunrpc/stats.h> 13#include <linux/sunrpc/svc_xprt.h> 14 15#define RPCDBG_FACILITY RPCDBG_SVCXPRT 16 17#define SVC_MAX_WAKING 5 18 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 20static int svc_deferred_recv(struct svc_rqst *rqstp); 21static struct cache_deferred_req *svc_defer(struct cache_req *req); 22static void svc_age_temp_xprts(unsigned long closure); 23 24/* apparently the "standard" is that clients close 25 * idle connections after 5 minutes, servers after 26 * 6 minutes 27 * http://www.connectathon.org/talks96/nfstcp.pdf 28 */ 29static int svc_conn_age_period = 6*60; 30 31/* List of registered transport classes */ 32static DEFINE_SPINLOCK(svc_xprt_class_lock); 33static LIST_HEAD(svc_xprt_class_list); 34 35/* SMP locking strategy: 36 * 37 * svc_pool->sp_lock protects most of the fields of that pool. 38 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 39 * when both need to be taken (rare), svc_serv->sv_lock is first. 40 * BKL protects svc_serv->sv_nrthread. 41 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 42 * and the ->sk_info_authunix cache. 43 * 44 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 45 * enqueued multiply. During normal transport processing this bit 46 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 47 * Providers should not manipulate this bit directly. 48 * 49 * Some flags can be set to certain values at any time 50 * providing that certain rules are followed: 51 * 52 * XPT_CONN, XPT_DATA: 53 * - Can be set or cleared at any time. 54 * - After a set, svc_xprt_enqueue must be called to enqueue 55 * the transport for processing. 56 * - After a clear, the transport must be read/accepted. 57 * If this succeeds, it must be set again. 58 * XPT_CLOSE: 59 * - Can set at any time. It is never cleared. 60 * XPT_DEAD: 61 * - Can only be set while XPT_BUSY is held which ensures 62 * that no other thread will be using the transport or will 63 * try to set XPT_DEAD. 64 */ 65 66int svc_reg_xprt_class(struct svc_xprt_class *xcl) 67{ 68 struct svc_xprt_class *cl; 69 int res = -EEXIST; 70 71 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 72 73 INIT_LIST_HEAD(&xcl->xcl_list); 74 spin_lock(&svc_xprt_class_lock); 75 /* Make sure there isn't already a class with the same name */ 76 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 77 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 78 goto out; 79 } 80 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 81 res = 0; 82out: 83 spin_unlock(&svc_xprt_class_lock); 84 return res; 85} 86EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 87 88void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 89{ 90 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 91 spin_lock(&svc_xprt_class_lock); 92 list_del_init(&xcl->xcl_list); 93 spin_unlock(&svc_xprt_class_lock); 94} 95EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 96 97/* 98 * Format the transport list for printing 99 */ 100int svc_print_xprts(char *buf, int maxlen) 101{ 102 struct list_head *le; 103 char tmpstr[80]; 104 int len = 0; 105 buf[0] = '\0'; 106 107 spin_lock(&svc_xprt_class_lock); 108 list_for_each(le, &svc_xprt_class_list) { 109 int slen; 110 struct svc_xprt_class *xcl = 111 list_entry(le, struct svc_xprt_class, xcl_list); 112 113 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 114 slen = strlen(tmpstr); 115 if (len + slen > maxlen) 116 break; 117 len += slen; 118 strcat(buf, tmpstr); 119 } 120 spin_unlock(&svc_xprt_class_lock); 121 122 return len; 123} 124 125static void svc_xprt_free(struct kref *kref) 126{ 127 struct svc_xprt *xprt = 128 container_of(kref, struct svc_xprt, xpt_ref); 129 struct module *owner = xprt->xpt_class->xcl_owner; 130 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) 131 && xprt->xpt_auth_cache != NULL) 132 svcauth_unix_info_release(xprt->xpt_auth_cache); 133 xprt->xpt_ops->xpo_free(xprt); 134 module_put(owner); 135} 136 137void svc_xprt_put(struct svc_xprt *xprt) 138{ 139 kref_put(&xprt->xpt_ref, svc_xprt_free); 140} 141EXPORT_SYMBOL_GPL(svc_xprt_put); 142 143/* 144 * Called by transport drivers to initialize the transport independent 145 * portion of the transport instance. 146 */ 147void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, 148 struct svc_serv *serv) 149{ 150 memset(xprt, 0, sizeof(*xprt)); 151 xprt->xpt_class = xcl; 152 xprt->xpt_ops = xcl->xcl_ops; 153 kref_init(&xprt->xpt_ref); 154 xprt->xpt_server = serv; 155 INIT_LIST_HEAD(&xprt->xpt_list); 156 INIT_LIST_HEAD(&xprt->xpt_ready); 157 INIT_LIST_HEAD(&xprt->xpt_deferred); 158 mutex_init(&xprt->xpt_mutex); 159 spin_lock_init(&xprt->xpt_lock); 160 set_bit(XPT_BUSY, &xprt->xpt_flags); 161} 162EXPORT_SYMBOL_GPL(svc_xprt_init); 163 164static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 165 struct svc_serv *serv, 166 unsigned short port, int flags) 167{ 168 struct sockaddr_in sin = { 169 .sin_family = AF_INET, 170 .sin_addr.s_addr = htonl(INADDR_ANY), 171 .sin_port = htons(port), 172 }; 173 struct sockaddr_in6 sin6 = { 174 .sin6_family = AF_INET6, 175 .sin6_addr = IN6ADDR_ANY_INIT, 176 .sin6_port = htons(port), 177 }; 178 struct sockaddr *sap; 179 size_t len; 180 181 switch (serv->sv_family) { 182 case AF_INET: 183 sap = (struct sockaddr *)&sin; 184 len = sizeof(sin); 185 break; 186 case AF_INET6: 187 sap = (struct sockaddr *)&sin6; 188 len = sizeof(sin6); 189 break; 190 default: 191 return ERR_PTR(-EAFNOSUPPORT); 192 } 193 194 return xcl->xcl_ops->xpo_create(serv, sap, len, flags); 195} 196 197int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port, 198 int flags) 199{ 200 struct svc_xprt_class *xcl; 201 202 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 203 spin_lock(&svc_xprt_class_lock); 204 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 205 struct svc_xprt *newxprt; 206 207 if (strcmp(xprt_name, xcl->xcl_name)) 208 continue; 209 210 if (!try_module_get(xcl->xcl_owner)) 211 goto err; 212 213 spin_unlock(&svc_xprt_class_lock); 214 newxprt = __svc_xpo_create(xcl, serv, port, flags); 215 if (IS_ERR(newxprt)) { 216 module_put(xcl->xcl_owner); 217 return PTR_ERR(newxprt); 218 } 219 220 clear_bit(XPT_TEMP, &newxprt->xpt_flags); 221 spin_lock_bh(&serv->sv_lock); 222 list_add(&newxprt->xpt_list, &serv->sv_permsocks); 223 spin_unlock_bh(&serv->sv_lock); 224 clear_bit(XPT_BUSY, &newxprt->xpt_flags); 225 return svc_xprt_local_port(newxprt); 226 } 227 err: 228 spin_unlock(&svc_xprt_class_lock); 229 dprintk("svc: transport %s not found\n", xprt_name); 230 return -ENOENT; 231} 232EXPORT_SYMBOL_GPL(svc_create_xprt); 233 234/* 235 * Copy the local and remote xprt addresses to the rqstp structure 236 */ 237void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 238{ 239 struct sockaddr *sin; 240 241 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 242 rqstp->rq_addrlen = xprt->xpt_remotelen; 243 244 /* 245 * Destination address in request is needed for binding the 246 * source address in RPC replies/callbacks later. 247 */ 248 sin = (struct sockaddr *)&xprt->xpt_local; 249 switch (sin->sa_family) { 250 case AF_INET: 251 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; 252 break; 253 case AF_INET6: 254 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; 255 break; 256 } 257} 258EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 259 260/** 261 * svc_print_addr - Format rq_addr field for printing 262 * @rqstp: svc_rqst struct containing address to print 263 * @buf: target buffer for formatted address 264 * @len: length of target buffer 265 * 266 */ 267char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 268{ 269 return __svc_print_addr(svc_addr(rqstp), buf, len); 270} 271EXPORT_SYMBOL_GPL(svc_print_addr); 272 273/* 274 * Queue up an idle server thread. Must have pool->sp_lock held. 275 * Note: this is really a stack rather than a queue, so that we only 276 * use as many different threads as we need, and the rest don't pollute 277 * the cache. 278 */ 279static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 280{ 281 list_add(&rqstp->rq_list, &pool->sp_threads); 282} 283 284/* 285 * Dequeue an nfsd thread. Must have pool->sp_lock held. 286 */ 287static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 288{ 289 list_del(&rqstp->rq_list); 290} 291 292/* 293 * Queue up a transport with data pending. If there are idle nfsd 294 * processes, wake 'em up. 295 * 296 */ 297void svc_xprt_enqueue(struct svc_xprt *xprt) 298{ 299 struct svc_serv *serv = xprt->xpt_server; 300 struct svc_pool *pool; 301 struct svc_rqst *rqstp; 302 int cpu; 303 int thread_avail; 304 305 if (!(xprt->xpt_flags & 306 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 307 return; 308 309 cpu = get_cpu(); 310 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 311 put_cpu(); 312 313 spin_lock_bh(&pool->sp_lock); 314 315 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 316 /* Don't enqueue dead transports */ 317 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 318 goto out_unlock; 319 } 320 321 pool->sp_stats.packets++; 322 323 /* Mark transport as busy. It will remain in this state until 324 * the provider calls svc_xprt_received. We update XPT_BUSY 325 * atomically because it also guards against trying to enqueue 326 * the transport twice. 327 */ 328 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 329 /* Don't enqueue transport while already enqueued */ 330 dprintk("svc: transport %p busy, not enqueued\n", xprt); 331 goto out_unlock; 332 } 333 BUG_ON(xprt->xpt_pool != NULL); 334 xprt->xpt_pool = pool; 335 336 /* Handle pending connection */ 337 if (test_bit(XPT_CONN, &xprt->xpt_flags)) 338 goto process; 339 340 /* Handle close in-progress */ 341 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) 342 goto process; 343 344 /* Check if we have space to reply to a request */ 345 if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { 346 /* Don't enqueue while not enough space for reply */ 347 dprintk("svc: no write space, transport %p not enqueued\n", 348 xprt); 349 xprt->xpt_pool = NULL; 350 clear_bit(XPT_BUSY, &xprt->xpt_flags); 351 goto out_unlock; 352 } 353 354 process: 355 /* Work out whether threads are available */ 356 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */ 357 if (pool->sp_nwaking >= SVC_MAX_WAKING) { 358 /* too many threads are runnable and trying to wake up */ 359 thread_avail = 0; 360 pool->sp_stats.overloads_avoided++; 361 } 362 363 if (thread_avail) { 364 rqstp = list_entry(pool->sp_threads.next, 365 struct svc_rqst, 366 rq_list); 367 dprintk("svc: transport %p served by daemon %p\n", 368 xprt, rqstp); 369 svc_thread_dequeue(pool, rqstp); 370 if (rqstp->rq_xprt) 371 printk(KERN_ERR 372 "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 373 rqstp, rqstp->rq_xprt); 374 rqstp->rq_xprt = xprt; 375 svc_xprt_get(xprt); 376 rqstp->rq_reserved = serv->sv_max_mesg; 377 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 378 rqstp->rq_waking = 1; 379 pool->sp_nwaking++; 380 pool->sp_stats.threads_woken++; 381 BUG_ON(xprt->xpt_pool != pool); 382 wake_up(&rqstp->rq_wait); 383 } else { 384 dprintk("svc: transport %p put into queue\n", xprt); 385 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 386 pool->sp_stats.sockets_queued++; 387 BUG_ON(xprt->xpt_pool != pool); 388 } 389 390out_unlock: 391 spin_unlock_bh(&pool->sp_lock); 392} 393EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 394 395/* 396 * Dequeue the first transport. Must be called with the pool->sp_lock held. 397 */ 398static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 399{ 400 struct svc_xprt *xprt; 401 402 if (list_empty(&pool->sp_sockets)) 403 return NULL; 404 405 xprt = list_entry(pool->sp_sockets.next, 406 struct svc_xprt, xpt_ready); 407 list_del_init(&xprt->xpt_ready); 408 409 dprintk("svc: transport %p dequeued, inuse=%d\n", 410 xprt, atomic_read(&xprt->xpt_ref.refcount)); 411 412 return xprt; 413} 414 415/* 416 * svc_xprt_received conditionally queues the transport for processing 417 * by another thread. The caller must hold the XPT_BUSY bit and must 418 * not thereafter touch transport data. 419 * 420 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 421 * insufficient) data. 422 */ 423void svc_xprt_received(struct svc_xprt *xprt) 424{ 425 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 426 xprt->xpt_pool = NULL; 427 clear_bit(XPT_BUSY, &xprt->xpt_flags); 428 svc_xprt_enqueue(xprt); 429} 430EXPORT_SYMBOL_GPL(svc_xprt_received); 431 432/** 433 * svc_reserve - change the space reserved for the reply to a request. 434 * @rqstp: The request in question 435 * @space: new max space to reserve 436 * 437 * Each request reserves some space on the output queue of the transport 438 * to make sure the reply fits. This function reduces that reserved 439 * space to be the amount of space used already, plus @space. 440 * 441 */ 442void svc_reserve(struct svc_rqst *rqstp, int space) 443{ 444 space += rqstp->rq_res.head[0].iov_len; 445 446 if (space < rqstp->rq_reserved) { 447 struct svc_xprt *xprt = rqstp->rq_xprt; 448 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 449 rqstp->rq_reserved = space; 450 451 svc_xprt_enqueue(xprt); 452 } 453} 454EXPORT_SYMBOL_GPL(svc_reserve); 455 456static void svc_xprt_release(struct svc_rqst *rqstp) 457{ 458 struct svc_xprt *xprt = rqstp->rq_xprt; 459 460 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 461 462 kfree(rqstp->rq_deferred); 463 rqstp->rq_deferred = NULL; 464 465 svc_free_res_pages(rqstp); 466 rqstp->rq_res.page_len = 0; 467 rqstp->rq_res.page_base = 0; 468 469 /* Reset response buffer and release 470 * the reservation. 471 * But first, check that enough space was reserved 472 * for the reply, otherwise we have a bug! 473 */ 474 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 475 printk(KERN_ERR "RPC request reserved %d but used %d\n", 476 rqstp->rq_reserved, 477 rqstp->rq_res.len); 478 479 rqstp->rq_res.head[0].iov_len = 0; 480 svc_reserve(rqstp, 0); 481 rqstp->rq_xprt = NULL; 482 483 svc_xprt_put(xprt); 484} 485 486/* 487 * External function to wake up a server waiting for data 488 * This really only makes sense for services like lockd 489 * which have exactly one thread anyway. 490 */ 491void svc_wake_up(struct svc_serv *serv) 492{ 493 struct svc_rqst *rqstp; 494 unsigned int i; 495 struct svc_pool *pool; 496 497 for (i = 0; i < serv->sv_nrpools; i++) { 498 pool = &serv->sv_pools[i]; 499 500 spin_lock_bh(&pool->sp_lock); 501 if (!list_empty(&pool->sp_threads)) { 502 rqstp = list_entry(pool->sp_threads.next, 503 struct svc_rqst, 504 rq_list); 505 dprintk("svc: daemon %p woken up.\n", rqstp); 506 /* 507 svc_thread_dequeue(pool, rqstp); 508 rqstp->rq_xprt = NULL; 509 */ 510 wake_up(&rqstp->rq_wait); 511 } 512 spin_unlock_bh(&pool->sp_lock); 513 } 514} 515EXPORT_SYMBOL_GPL(svc_wake_up); 516 517int svc_port_is_privileged(struct sockaddr *sin) 518{ 519 switch (sin->sa_family) { 520 case AF_INET: 521 return ntohs(((struct sockaddr_in *)sin)->sin_port) 522 < PROT_SOCK; 523 case AF_INET6: 524 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 525 < PROT_SOCK; 526 default: 527 return 0; 528 } 529} 530 531/* 532 * Make sure that we don't have too many active connections. If we have, 533 * something must be dropped. It's not clear what will happen if we allow 534 * "too many" connections, but when dealing with network-facing software, 535 * we have to code defensively. Here we do that by imposing hard limits. 536 * 537 * There's no point in trying to do random drop here for DoS 538 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 539 * attacker can easily beat that. 540 * 541 * The only somewhat efficient mechanism would be if drop old 542 * connections from the same IP first. But right now we don't even 543 * record the client IP in svc_sock. 544 * 545 * single-threaded services that expect a lot of clients will probably 546 * need to set sv_maxconn to override the default value which is based 547 * on the number of threads 548 */ 549static void svc_check_conn_limits(struct svc_serv *serv) 550{ 551 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 552 (serv->sv_nrthreads+3) * 20; 553 554 if (serv->sv_tmpcnt > limit) { 555 struct svc_xprt *xprt = NULL; 556 spin_lock_bh(&serv->sv_lock); 557 if (!list_empty(&serv->sv_tempsocks)) { 558 if (net_ratelimit()) { 559 /* Try to help the admin */ 560 printk(KERN_NOTICE "%s: too many open " 561 "connections, consider increasing %s\n", 562 serv->sv_name, serv->sv_maxconn ? 563 "the max number of connections." : 564 "the number of threads."); 565 } 566 /* 567 * Always select the oldest connection. It's not fair, 568 * but so is life 569 */ 570 xprt = list_entry(serv->sv_tempsocks.prev, 571 struct svc_xprt, 572 xpt_list); 573 set_bit(XPT_CLOSE, &xprt->xpt_flags); 574 svc_xprt_get(xprt); 575 } 576 spin_unlock_bh(&serv->sv_lock); 577 578 if (xprt) { 579 svc_xprt_enqueue(xprt); 580 svc_xprt_put(xprt); 581 } 582 } 583} 584 585/* 586 * Receive the next request on any transport. This code is carefully 587 * organised not to touch any cachelines in the shared svc_serv 588 * structure, only cachelines in the local svc_pool. 589 */ 590int svc_recv(struct svc_rqst *rqstp, long timeout) 591{ 592 struct svc_xprt *xprt = NULL; 593 struct svc_serv *serv = rqstp->rq_server; 594 struct svc_pool *pool = rqstp->rq_pool; 595 int len, i; 596 int pages; 597 struct xdr_buf *arg; 598 DECLARE_WAITQUEUE(wait, current); 599 long time_left; 600 601 dprintk("svc: server %p waiting for data (to = %ld)\n", 602 rqstp, timeout); 603 604 if (rqstp->rq_xprt) 605 printk(KERN_ERR 606 "svc_recv: service %p, transport not NULL!\n", 607 rqstp); 608 if (waitqueue_active(&rqstp->rq_wait)) 609 printk(KERN_ERR 610 "svc_recv: service %p, wait queue active!\n", 611 rqstp); 612 613 /* now allocate needed pages. If we get a failure, sleep briefly */ 614 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 615 for (i = 0; i < pages ; i++) 616 while (rqstp->rq_pages[i] == NULL) { 617 struct page *p = alloc_page(GFP_KERNEL); 618 if (!p) { 619 set_current_state(TASK_INTERRUPTIBLE); 620 if (signalled() || kthread_should_stop()) { 621 set_current_state(TASK_RUNNING); 622 return -EINTR; 623 } 624 schedule_timeout(msecs_to_jiffies(500)); 625 } 626 rqstp->rq_pages[i] = p; 627 } 628 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 629 BUG_ON(pages >= RPCSVC_MAXPAGES); 630 631 /* Make arg->head point to first page and arg->pages point to rest */ 632 arg = &rqstp->rq_arg; 633 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 634 arg->head[0].iov_len = PAGE_SIZE; 635 arg->pages = rqstp->rq_pages + 1; 636 arg->page_base = 0; 637 /* save at least one page for response */ 638 arg->page_len = (pages-2)*PAGE_SIZE; 639 arg->len = (pages-1)*PAGE_SIZE; 640 arg->tail[0].iov_len = 0; 641 642 try_to_freeze(); 643 cond_resched(); 644 if (signalled() || kthread_should_stop()) 645 return -EINTR; 646 647 spin_lock_bh(&pool->sp_lock); 648 if (rqstp->rq_waking) { 649 rqstp->rq_waking = 0; 650 pool->sp_nwaking--; 651 BUG_ON(pool->sp_nwaking < 0); 652 } 653 xprt = svc_xprt_dequeue(pool); 654 if (xprt) { 655 rqstp->rq_xprt = xprt; 656 svc_xprt_get(xprt); 657 rqstp->rq_reserved = serv->sv_max_mesg; 658 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 659 } else { 660 /* No data pending. Go to sleep */ 661 svc_thread_enqueue(pool, rqstp); 662 663 /* 664 * We have to be able to interrupt this wait 665 * to bring down the daemons ... 666 */ 667 set_current_state(TASK_INTERRUPTIBLE); 668 669 /* 670 * checking kthread_should_stop() here allows us to avoid 671 * locking and signalling when stopping kthreads that call 672 * svc_recv. If the thread has already been woken up, then 673 * we can exit here without sleeping. If not, then it 674 * it'll be woken up quickly during the schedule_timeout 675 */ 676 if (kthread_should_stop()) { 677 set_current_state(TASK_RUNNING); 678 spin_unlock_bh(&pool->sp_lock); 679 return -EINTR; 680 } 681 682 add_wait_queue(&rqstp->rq_wait, &wait); 683 spin_unlock_bh(&pool->sp_lock); 684 685 time_left = schedule_timeout(timeout); 686 687 try_to_freeze(); 688 689 spin_lock_bh(&pool->sp_lock); 690 remove_wait_queue(&rqstp->rq_wait, &wait); 691 if (!time_left) 692 pool->sp_stats.threads_timedout++; 693 694 xprt = rqstp->rq_xprt; 695 if (!xprt) { 696 svc_thread_dequeue(pool, rqstp); 697 spin_unlock_bh(&pool->sp_lock); 698 dprintk("svc: server %p, no data yet\n", rqstp); 699 if (signalled() || kthread_should_stop()) 700 return -EINTR; 701 else 702 return -EAGAIN; 703 } 704 } 705 spin_unlock_bh(&pool->sp_lock); 706 707 len = 0; 708 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 709 dprintk("svc_recv: found XPT_CLOSE\n"); 710 svc_delete_xprt(xprt); 711 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 712 struct svc_xprt *newxpt; 713 newxpt = xprt->xpt_ops->xpo_accept(xprt); 714 if (newxpt) { 715 /* 716 * We know this module_get will succeed because the 717 * listener holds a reference too 718 */ 719 __module_get(newxpt->xpt_class->xcl_owner); 720 svc_check_conn_limits(xprt->xpt_server); 721 spin_lock_bh(&serv->sv_lock); 722 set_bit(XPT_TEMP, &newxpt->xpt_flags); 723 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 724 serv->sv_tmpcnt++; 725 if (serv->sv_temptimer.function == NULL) { 726 /* setup timer to age temp transports */ 727 setup_timer(&serv->sv_temptimer, 728 svc_age_temp_xprts, 729 (unsigned long)serv); 730 mod_timer(&serv->sv_temptimer, 731 jiffies + svc_conn_age_period * HZ); 732 } 733 spin_unlock_bh(&serv->sv_lock); 734 svc_xprt_received(newxpt); 735 } 736 svc_xprt_received(xprt); 737 } else { 738 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 739 rqstp, pool->sp_id, xprt, 740 atomic_read(&xprt->xpt_ref.refcount)); 741 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 742 if (rqstp->rq_deferred) { 743 svc_xprt_received(xprt); 744 len = svc_deferred_recv(rqstp); 745 } else 746 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 747 dprintk("svc: got len=%d\n", len); 748 } 749 750 /* No data, incomplete (TCP) read, or accept() */ 751 if (len == 0 || len == -EAGAIN) { 752 rqstp->rq_res.len = 0; 753 svc_xprt_release(rqstp); 754 return -EAGAIN; 755 } 756 clear_bit(XPT_OLD, &xprt->xpt_flags); 757 758 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 759 rqstp->rq_chandle.defer = svc_defer; 760 761 if (serv->sv_stats) 762 serv->sv_stats->netcnt++; 763 return len; 764} 765EXPORT_SYMBOL_GPL(svc_recv); 766 767/* 768 * Drop request 769 */ 770void svc_drop(struct svc_rqst *rqstp) 771{ 772 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 773 svc_xprt_release(rqstp); 774} 775EXPORT_SYMBOL_GPL(svc_drop); 776 777/* 778 * Return reply to client. 779 */ 780int svc_send(struct svc_rqst *rqstp) 781{ 782 struct svc_xprt *xprt; 783 int len; 784 struct xdr_buf *xb; 785 786 xprt = rqstp->rq_xprt; 787 if (!xprt) 788 return -EFAULT; 789 790 /* release the receive skb before sending the reply */ 791 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 792 793 /* calculate over-all length */ 794 xb = &rqstp->rq_res; 795 xb->len = xb->head[0].iov_len + 796 xb->page_len + 797 xb->tail[0].iov_len; 798 799 /* Grab mutex to serialize outgoing data. */ 800 mutex_lock(&xprt->xpt_mutex); 801 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 802 len = -ENOTCONN; 803 else 804 len = xprt->xpt_ops->xpo_sendto(rqstp); 805 mutex_unlock(&xprt->xpt_mutex); 806 svc_xprt_release(rqstp); 807 808 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 809 return 0; 810 return len; 811} 812 813/* 814 * Timer function to close old temporary transports, using 815 * a mark-and-sweep algorithm. 816 */ 817static void svc_age_temp_xprts(unsigned long closure) 818{ 819 struct svc_serv *serv = (struct svc_serv *)closure; 820 struct svc_xprt *xprt; 821 struct list_head *le, *next; 822 LIST_HEAD(to_be_aged); 823 824 dprintk("svc_age_temp_xprts\n"); 825 826 if (!spin_trylock_bh(&serv->sv_lock)) { 827 /* busy, try again 1 sec later */ 828 dprintk("svc_age_temp_xprts: busy\n"); 829 mod_timer(&serv->sv_temptimer, jiffies + HZ); 830 return; 831 } 832 833 list_for_each_safe(le, next, &serv->sv_tempsocks) { 834 xprt = list_entry(le, struct svc_xprt, xpt_list); 835 836 /* First time through, just mark it OLD. Second time 837 * through, close it. */ 838 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 839 continue; 840 if (atomic_read(&xprt->xpt_ref.refcount) > 1 841 || test_bit(XPT_BUSY, &xprt->xpt_flags)) 842 continue; 843 svc_xprt_get(xprt); 844 list_move(le, &to_be_aged); 845 set_bit(XPT_CLOSE, &xprt->xpt_flags); 846 set_bit(XPT_DETACHED, &xprt->xpt_flags); 847 } 848 spin_unlock_bh(&serv->sv_lock); 849 850 while (!list_empty(&to_be_aged)) { 851 le = to_be_aged.next; 852 /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ 853 list_del_init(le); 854 xprt = list_entry(le, struct svc_xprt, xpt_list); 855 856 dprintk("queuing xprt %p for closing\n", xprt); 857 858 /* a thread will dequeue and close it soon */ 859 svc_xprt_enqueue(xprt); 860 svc_xprt_put(xprt); 861 } 862 863 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 864} 865 866/* 867 * Remove a dead transport 868 */ 869void svc_delete_xprt(struct svc_xprt *xprt) 870{ 871 struct svc_serv *serv = xprt->xpt_server; 872 struct svc_deferred_req *dr; 873 874 /* Only do this once */ 875 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 876 return; 877 878 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 879 xprt->xpt_ops->xpo_detach(xprt); 880 881 spin_lock_bh(&serv->sv_lock); 882 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 883 list_del_init(&xprt->xpt_list); 884 /* 885 * We used to delete the transport from whichever list 886 * it's sk_xprt.xpt_ready node was on, but we don't actually 887 * need to. This is because the only time we're called 888 * while still attached to a queue, the queue itself 889 * is about to be destroyed (in svc_destroy). 890 */ 891 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 892 serv->sv_tmpcnt--; 893 894 for (dr = svc_deferred_dequeue(xprt); dr; 895 dr = svc_deferred_dequeue(xprt)) { 896 svc_xprt_put(xprt); 897 kfree(dr); 898 } 899 900 svc_xprt_put(xprt); 901 spin_unlock_bh(&serv->sv_lock); 902} 903 904void svc_close_xprt(struct svc_xprt *xprt) 905{ 906 set_bit(XPT_CLOSE, &xprt->xpt_flags); 907 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 908 /* someone else will have to effect the close */ 909 return; 910 911 svc_xprt_get(xprt); 912 svc_delete_xprt(xprt); 913 clear_bit(XPT_BUSY, &xprt->xpt_flags); 914 svc_xprt_put(xprt); 915} 916EXPORT_SYMBOL_GPL(svc_close_xprt); 917 918void svc_close_all(struct list_head *xprt_list) 919{ 920 struct svc_xprt *xprt; 921 struct svc_xprt *tmp; 922 923 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 924 set_bit(XPT_CLOSE, &xprt->xpt_flags); 925 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 926 /* Waiting to be processed, but no threads left, 927 * So just remove it from the waiting list 928 */ 929 list_del_init(&xprt->xpt_ready); 930 clear_bit(XPT_BUSY, &xprt->xpt_flags); 931 } 932 svc_close_xprt(xprt); 933 } 934} 935 936/* 937 * Handle defer and revisit of requests 938 */ 939 940static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 941{ 942 struct svc_deferred_req *dr = 943 container_of(dreq, struct svc_deferred_req, handle); 944 struct svc_xprt *xprt = dr->xprt; 945 946 spin_lock(&xprt->xpt_lock); 947 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 948 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 949 spin_unlock(&xprt->xpt_lock); 950 dprintk("revisit canceled\n"); 951 svc_xprt_put(xprt); 952 kfree(dr); 953 return; 954 } 955 dprintk("revisit queued\n"); 956 dr->xprt = NULL; 957 list_add(&dr->handle.recent, &xprt->xpt_deferred); 958 spin_unlock(&xprt->xpt_lock); 959 svc_xprt_enqueue(xprt); 960 svc_xprt_put(xprt); 961} 962 963/* 964 * Save the request off for later processing. The request buffer looks 965 * like this: 966 * 967 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 968 * 969 * This code can only handle requests that consist of an xprt-header 970 * and rpc-header. 971 */ 972static struct cache_deferred_req *svc_defer(struct cache_req *req) 973{ 974 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 975 struct svc_deferred_req *dr; 976 977 if (rqstp->rq_arg.page_len) 978 return NULL; /* if more than a page, give up FIXME */ 979 if (rqstp->rq_deferred) { 980 dr = rqstp->rq_deferred; 981 rqstp->rq_deferred = NULL; 982 } else { 983 size_t skip; 984 size_t size; 985 /* FIXME maybe discard if size too large */ 986 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 987 dr = kmalloc(size, GFP_KERNEL); 988 if (dr == NULL) 989 return NULL; 990 991 dr->handle.owner = rqstp->rq_server; 992 dr->prot = rqstp->rq_prot; 993 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 994 dr->addrlen = rqstp->rq_addrlen; 995 dr->daddr = rqstp->rq_daddr; 996 dr->argslen = rqstp->rq_arg.len >> 2; 997 dr->xprt_hlen = rqstp->rq_xprt_hlen; 998 999 /* back up head to the start of the buffer and copy */ 1000 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1001 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1002 dr->argslen << 2); 1003 } 1004 svc_xprt_get(rqstp->rq_xprt); 1005 dr->xprt = rqstp->rq_xprt; 1006 1007 dr->handle.revisit = svc_revisit; 1008 return &dr->handle; 1009} 1010 1011/* 1012 * recv data from a deferred request into an active one 1013 */ 1014static int svc_deferred_recv(struct svc_rqst *rqstp) 1015{ 1016 struct svc_deferred_req *dr = rqstp->rq_deferred; 1017 1018 /* setup iov_base past transport header */ 1019 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1020 /* The iov_len does not include the transport header bytes */ 1021 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1022 rqstp->rq_arg.page_len = 0; 1023 /* The rq_arg.len includes the transport header bytes */ 1024 rqstp->rq_arg.len = dr->argslen<<2; 1025 rqstp->rq_prot = dr->prot; 1026 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1027 rqstp->rq_addrlen = dr->addrlen; 1028 /* Save off transport header len in case we get deferred again */ 1029 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1030 rqstp->rq_daddr = dr->daddr; 1031 rqstp->rq_respages = rqstp->rq_pages; 1032 return (dr->argslen<<2) - dr->xprt_hlen; 1033} 1034 1035 1036static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1037{ 1038 struct svc_deferred_req *dr = NULL; 1039 1040 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1041 return NULL; 1042 spin_lock(&xprt->xpt_lock); 1043 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1044 if (!list_empty(&xprt->xpt_deferred)) { 1045 dr = list_entry(xprt->xpt_deferred.next, 1046 struct svc_deferred_req, 1047 handle.recent); 1048 list_del_init(&dr->handle.recent); 1049 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1050 } 1051 spin_unlock(&xprt->xpt_lock); 1052 return dr; 1053} 1054 1055/* 1056 * Return the transport instance pointer for the endpoint accepting 1057 * connections/peer traffic from the specified transport class, 1058 * address family and port. 1059 * 1060 * Specifying 0 for the address family or port is effectively a 1061 * wild-card, and will result in matching the first transport in the 1062 * service's list that has a matching class name. 1063 */ 1064struct svc_xprt *svc_find_xprt(struct svc_serv *serv, char *xcl_name, 1065 int af, int port) 1066{ 1067 struct svc_xprt *xprt; 1068 struct svc_xprt *found = NULL; 1069 1070 /* Sanity check the args */ 1071 if (!serv || !xcl_name) 1072 return found; 1073 1074 spin_lock_bh(&serv->sv_lock); 1075 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1076 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1077 continue; 1078 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1079 continue; 1080 if (port && port != svc_xprt_local_port(xprt)) 1081 continue; 1082 found = xprt; 1083 svc_xprt_get(xprt); 1084 break; 1085 } 1086 spin_unlock_bh(&serv->sv_lock); 1087 return found; 1088} 1089EXPORT_SYMBOL_GPL(svc_find_xprt); 1090 1091/* 1092 * Format a buffer with a list of the active transports. A zero for 1093 * the buflen parameter disables target buffer overflow checking. 1094 */ 1095int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen) 1096{ 1097 struct svc_xprt *xprt; 1098 char xprt_str[64]; 1099 int totlen = 0; 1100 int len; 1101 1102 /* Sanity check args */ 1103 if (!serv) 1104 return 0; 1105 1106 spin_lock_bh(&serv->sv_lock); 1107 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1108 len = snprintf(xprt_str, sizeof(xprt_str), 1109 "%s %d\n", xprt->xpt_class->xcl_name, 1110 svc_xprt_local_port(xprt)); 1111 /* If the string was truncated, replace with error string */ 1112 if (len >= sizeof(xprt_str)) 1113 strcpy(xprt_str, "name-too-long\n"); 1114 /* Don't overflow buffer */ 1115 len = strlen(xprt_str); 1116 if (buflen && (len + totlen >= buflen)) 1117 break; 1118 strcpy(buf+totlen, xprt_str); 1119 totlen += len; 1120 } 1121 spin_unlock_bh(&serv->sv_lock); 1122 return totlen; 1123} 1124EXPORT_SYMBOL_GPL(svc_xprt_names); 1125 1126 1127/*----------------------------------------------------------------------------*/ 1128 1129static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1130{ 1131 unsigned int pidx = (unsigned int)*pos; 1132 struct svc_serv *serv = m->private; 1133 1134 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1135 1136 lock_kernel(); 1137 /* bump up the pseudo refcount while traversing */ 1138 svc_get(serv); 1139 unlock_kernel(); 1140 1141 if (!pidx) 1142 return SEQ_START_TOKEN; 1143 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1144} 1145 1146static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1147{ 1148 struct svc_pool *pool = p; 1149 struct svc_serv *serv = m->private; 1150 1151 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1152 1153 if (p == SEQ_START_TOKEN) { 1154 pool = &serv->sv_pools[0]; 1155 } else { 1156 unsigned int pidx = (pool - &serv->sv_pools[0]); 1157 if (pidx < serv->sv_nrpools-1) 1158 pool = &serv->sv_pools[pidx+1]; 1159 else 1160 pool = NULL; 1161 } 1162 ++*pos; 1163 return pool; 1164} 1165 1166static void svc_pool_stats_stop(struct seq_file *m, void *p) 1167{ 1168 struct svc_serv *serv = m->private; 1169 1170 lock_kernel(); 1171 /* this function really, really should have been called svc_put() */ 1172 svc_destroy(serv); 1173 unlock_kernel(); 1174} 1175 1176static int svc_pool_stats_show(struct seq_file *m, void *p) 1177{ 1178 struct svc_pool *pool = p; 1179 1180 if (p == SEQ_START_TOKEN) { 1181 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); 1182 return 0; 1183 } 1184 1185 seq_printf(m, "%u %lu %lu %lu %lu %lu\n", 1186 pool->sp_id, 1187 pool->sp_stats.packets, 1188 pool->sp_stats.sockets_queued, 1189 pool->sp_stats.threads_woken, 1190 pool->sp_stats.overloads_avoided, 1191 pool->sp_stats.threads_timedout); 1192 1193 return 0; 1194} 1195 1196static const struct seq_operations svc_pool_stats_seq_ops = { 1197 .start = svc_pool_stats_start, 1198 .next = svc_pool_stats_next, 1199 .stop = svc_pool_stats_stop, 1200 .show = svc_pool_stats_show, 1201}; 1202 1203int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1204{ 1205 int err; 1206 1207 err = seq_open(file, &svc_pool_stats_seq_ops); 1208 if (!err) 1209 ((struct seq_file *) file->private_data)->private = serv; 1210 return err; 1211} 1212EXPORT_SYMBOL(svc_pool_stats_open); 1213 1214/*----------------------------------------------------------------------------*/ 1215