sock.c revision 43815482370c510c569fd18edb57afcb0fa8cab6
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113 114#include <asm/uaccess.h> 115#include <asm/system.h> 116 117#include <linux/netdevice.h> 118#include <net/protocol.h> 119#include <linux/skbuff.h> 120#include <net/net_namespace.h> 121#include <net/request_sock.h> 122#include <net/sock.h> 123#include <linux/net_tstamp.h> 124#include <net/xfrm.h> 125#include <linux/ipsec.h> 126 127#include <linux/filter.h> 128 129#ifdef CONFIG_INET 130#include <net/tcp.h> 131#endif 132 133/* 134 * Each address family might have different locking rules, so we have 135 * one slock key per address family: 136 */ 137static struct lock_class_key af_family_keys[AF_MAX]; 138static struct lock_class_key af_family_slock_keys[AF_MAX]; 139 140/* 141 * Make lock validator output more readable. (we pre-construct these 142 * strings build-time, so that runtime initialization of socket 143 * locks is fast): 144 */ 145static const char *const af_family_key_strings[AF_MAX+1] = { 146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 149 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 150 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 151 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 152 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 153 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 158 "sk_lock-AF_IEEE802154", 159 "sk_lock-AF_MAX" 160}; 161static const char *const af_family_slock_key_strings[AF_MAX+1] = { 162 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 163 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 164 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 165 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 166 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 167 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 168 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 169 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 170 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 171 "slock-27" , "slock-28" , "slock-AF_CAN" , 172 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 173 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 174 "slock-AF_IEEE802154", 175 "slock-AF_MAX" 176}; 177static const char *const af_family_clock_key_strings[AF_MAX+1] = { 178 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 179 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 180 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 181 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 182 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 183 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 184 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 185 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 186 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 187 "clock-27" , "clock-28" , "clock-AF_CAN" , 188 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 189 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 190 "clock-AF_IEEE802154", 191 "clock-AF_MAX" 192}; 193 194/* 195 * sk_callback_lock locking rules are per-address-family, 196 * so split the lock classes by using a per-AF key: 197 */ 198static struct lock_class_key af_callback_keys[AF_MAX]; 199 200/* Take into consideration the size of the struct sk_buff overhead in the 201 * determination of these values, since that is non-constant across 202 * platforms. This makes socket queueing behavior and performance 203 * not depend upon such differences. 204 */ 205#define _SK_MEM_PACKETS 256 206#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 207#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 208#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 209 210/* Run time adjustable parameters. */ 211__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 212__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 213__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 214__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 215 216/* Maximal space eaten by iovec or ancilliary data plus some space */ 217int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 218EXPORT_SYMBOL(sysctl_optmem_max); 219 220static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 221{ 222 struct timeval tv; 223 224 if (optlen < sizeof(tv)) 225 return -EINVAL; 226 if (copy_from_user(&tv, optval, sizeof(tv))) 227 return -EFAULT; 228 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 229 return -EDOM; 230 231 if (tv.tv_sec < 0) { 232 static int warned __read_mostly; 233 234 *timeo_p = 0; 235 if (warned < 10 && net_ratelimit()) { 236 warned++; 237 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 238 "tries to set negative timeout\n", 239 current->comm, task_pid_nr(current)); 240 } 241 return 0; 242 } 243 *timeo_p = MAX_SCHEDULE_TIMEOUT; 244 if (tv.tv_sec == 0 && tv.tv_usec == 0) 245 return 0; 246 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 247 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 248 return 0; 249} 250 251static void sock_warn_obsolete_bsdism(const char *name) 252{ 253 static int warned; 254 static char warncomm[TASK_COMM_LEN]; 255 if (strcmp(warncomm, current->comm) && warned < 5) { 256 strcpy(warncomm, current->comm); 257 printk(KERN_WARNING "process `%s' is using obsolete " 258 "%s SO_BSDCOMPAT\n", warncomm, name); 259 warned++; 260 } 261} 262 263static void sock_disable_timestamp(struct sock *sk, int flag) 264{ 265 if (sock_flag(sk, flag)) { 266 sock_reset_flag(sk, flag); 267 if (!sock_flag(sk, SOCK_TIMESTAMP) && 268 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { 269 net_disable_timestamp(); 270 } 271 } 272} 273 274 275int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 276{ 277 int err; 278 int skb_len; 279 unsigned long flags; 280 struct sk_buff_head *list = &sk->sk_receive_queue; 281 282 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 283 number of warnings when compiling with -W --ANK 284 */ 285 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 286 (unsigned)sk->sk_rcvbuf) { 287 atomic_inc(&sk->sk_drops); 288 return -ENOMEM; 289 } 290 291 err = sk_filter(sk, skb); 292 if (err) 293 return err; 294 295 if (!sk_rmem_schedule(sk, skb->truesize)) { 296 atomic_inc(&sk->sk_drops); 297 return -ENOBUFS; 298 } 299 300 skb->dev = NULL; 301 skb_set_owner_r(skb, sk); 302 303 /* Cache the SKB length before we tack it onto the receive 304 * queue. Once it is added it no longer belongs to us and 305 * may be freed by other threads of control pulling packets 306 * from the queue. 307 */ 308 skb_len = skb->len; 309 310 spin_lock_irqsave(&list->lock, flags); 311 skb->dropcount = atomic_read(&sk->sk_drops); 312 __skb_queue_tail(list, skb); 313 spin_unlock_irqrestore(&list->lock, flags); 314 315 if (!sock_flag(sk, SOCK_DEAD)) 316 sk->sk_data_ready(sk, skb_len); 317 return 0; 318} 319EXPORT_SYMBOL(sock_queue_rcv_skb); 320 321int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 322{ 323 int rc = NET_RX_SUCCESS; 324 325 if (sk_filter(sk, skb)) 326 goto discard_and_relse; 327 328 skb->dev = NULL; 329 330 if (sk_rcvqueues_full(sk, skb)) { 331 atomic_inc(&sk->sk_drops); 332 goto discard_and_relse; 333 } 334 if (nested) 335 bh_lock_sock_nested(sk); 336 else 337 bh_lock_sock(sk); 338 if (!sock_owned_by_user(sk)) { 339 /* 340 * trylock + unlock semantics: 341 */ 342 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 343 344 rc = sk_backlog_rcv(sk, skb); 345 346 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 347 } else if (sk_add_backlog(sk, skb)) { 348 bh_unlock_sock(sk); 349 atomic_inc(&sk->sk_drops); 350 goto discard_and_relse; 351 } 352 353 bh_unlock_sock(sk); 354out: 355 sock_put(sk); 356 return rc; 357discard_and_relse: 358 kfree_skb(skb); 359 goto out; 360} 361EXPORT_SYMBOL(sk_receive_skb); 362 363void sk_reset_txq(struct sock *sk) 364{ 365 sk_tx_queue_clear(sk); 366} 367EXPORT_SYMBOL(sk_reset_txq); 368 369struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 370{ 371 struct dst_entry *dst = __sk_dst_get(sk); 372 373 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 374 sk_tx_queue_clear(sk); 375 rcu_assign_pointer(sk->sk_dst_cache, NULL); 376 dst_release(dst); 377 return NULL; 378 } 379 380 return dst; 381} 382EXPORT_SYMBOL(__sk_dst_check); 383 384struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 385{ 386 struct dst_entry *dst = sk_dst_get(sk); 387 388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 389 sk_dst_reset(sk); 390 dst_release(dst); 391 return NULL; 392 } 393 394 return dst; 395} 396EXPORT_SYMBOL(sk_dst_check); 397 398static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 399{ 400 int ret = -ENOPROTOOPT; 401#ifdef CONFIG_NETDEVICES 402 struct net *net = sock_net(sk); 403 char devname[IFNAMSIZ]; 404 int index; 405 406 /* Sorry... */ 407 ret = -EPERM; 408 if (!capable(CAP_NET_RAW)) 409 goto out; 410 411 ret = -EINVAL; 412 if (optlen < 0) 413 goto out; 414 415 /* Bind this socket to a particular device like "eth0", 416 * as specified in the passed interface name. If the 417 * name is "" or the option length is zero the socket 418 * is not bound. 419 */ 420 if (optlen > IFNAMSIZ - 1) 421 optlen = IFNAMSIZ - 1; 422 memset(devname, 0, sizeof(devname)); 423 424 ret = -EFAULT; 425 if (copy_from_user(devname, optval, optlen)) 426 goto out; 427 428 index = 0; 429 if (devname[0] != '\0') { 430 struct net_device *dev; 431 432 rcu_read_lock(); 433 dev = dev_get_by_name_rcu(net, devname); 434 if (dev) 435 index = dev->ifindex; 436 rcu_read_unlock(); 437 ret = -ENODEV; 438 if (!dev) 439 goto out; 440 } 441 442 lock_sock(sk); 443 sk->sk_bound_dev_if = index; 444 sk_dst_reset(sk); 445 release_sock(sk); 446 447 ret = 0; 448 449out: 450#endif 451 452 return ret; 453} 454 455static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 456{ 457 if (valbool) 458 sock_set_flag(sk, bit); 459 else 460 sock_reset_flag(sk, bit); 461} 462 463/* 464 * This is meant for all protocols to use and covers goings on 465 * at the socket level. Everything here is generic. 466 */ 467 468int sock_setsockopt(struct socket *sock, int level, int optname, 469 char __user *optval, unsigned int optlen) 470{ 471 struct sock *sk = sock->sk; 472 int val; 473 int valbool; 474 struct linger ling; 475 int ret = 0; 476 477 /* 478 * Options without arguments 479 */ 480 481 if (optname == SO_BINDTODEVICE) 482 return sock_bindtodevice(sk, optval, optlen); 483 484 if (optlen < sizeof(int)) 485 return -EINVAL; 486 487 if (get_user(val, (int __user *)optval)) 488 return -EFAULT; 489 490 valbool = val ? 1 : 0; 491 492 lock_sock(sk); 493 494 switch (optname) { 495 case SO_DEBUG: 496 if (val && !capable(CAP_NET_ADMIN)) 497 ret = -EACCES; 498 else 499 sock_valbool_flag(sk, SOCK_DBG, valbool); 500 break; 501 case SO_REUSEADDR: 502 sk->sk_reuse = valbool; 503 break; 504 case SO_TYPE: 505 case SO_PROTOCOL: 506 case SO_DOMAIN: 507 case SO_ERROR: 508 ret = -ENOPROTOOPT; 509 break; 510 case SO_DONTROUTE: 511 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 512 break; 513 case SO_BROADCAST: 514 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 515 break; 516 case SO_SNDBUF: 517 /* Don't error on this BSD doesn't and if you think 518 about it this is right. Otherwise apps have to 519 play 'guess the biggest size' games. RCVBUF/SNDBUF 520 are treated in BSD as hints */ 521 522 if (val > sysctl_wmem_max) 523 val = sysctl_wmem_max; 524set_sndbuf: 525 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 526 if ((val * 2) < SOCK_MIN_SNDBUF) 527 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 528 else 529 sk->sk_sndbuf = val * 2; 530 531 /* 532 * Wake up sending tasks if we 533 * upped the value. 534 */ 535 sk->sk_write_space(sk); 536 break; 537 538 case SO_SNDBUFFORCE: 539 if (!capable(CAP_NET_ADMIN)) { 540 ret = -EPERM; 541 break; 542 } 543 goto set_sndbuf; 544 545 case SO_RCVBUF: 546 /* Don't error on this BSD doesn't and if you think 547 about it this is right. Otherwise apps have to 548 play 'guess the biggest size' games. RCVBUF/SNDBUF 549 are treated in BSD as hints */ 550 551 if (val > sysctl_rmem_max) 552 val = sysctl_rmem_max; 553set_rcvbuf: 554 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 555 /* 556 * We double it on the way in to account for 557 * "struct sk_buff" etc. overhead. Applications 558 * assume that the SO_RCVBUF setting they make will 559 * allow that much actual data to be received on that 560 * socket. 561 * 562 * Applications are unaware that "struct sk_buff" and 563 * other overheads allocate from the receive buffer 564 * during socket buffer allocation. 565 * 566 * And after considering the possible alternatives, 567 * returning the value we actually used in getsockopt 568 * is the most desirable behavior. 569 */ 570 if ((val * 2) < SOCK_MIN_RCVBUF) 571 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 572 else 573 sk->sk_rcvbuf = val * 2; 574 break; 575 576 case SO_RCVBUFFORCE: 577 if (!capable(CAP_NET_ADMIN)) { 578 ret = -EPERM; 579 break; 580 } 581 goto set_rcvbuf; 582 583 case SO_KEEPALIVE: 584#ifdef CONFIG_INET 585 if (sk->sk_protocol == IPPROTO_TCP) 586 tcp_set_keepalive(sk, valbool); 587#endif 588 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 589 break; 590 591 case SO_OOBINLINE: 592 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 593 break; 594 595 case SO_NO_CHECK: 596 sk->sk_no_check = valbool; 597 break; 598 599 case SO_PRIORITY: 600 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 601 sk->sk_priority = val; 602 else 603 ret = -EPERM; 604 break; 605 606 case SO_LINGER: 607 if (optlen < sizeof(ling)) { 608 ret = -EINVAL; /* 1003.1g */ 609 break; 610 } 611 if (copy_from_user(&ling, optval, sizeof(ling))) { 612 ret = -EFAULT; 613 break; 614 } 615 if (!ling.l_onoff) 616 sock_reset_flag(sk, SOCK_LINGER); 617 else { 618#if (BITS_PER_LONG == 32) 619 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 620 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 621 else 622#endif 623 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 624 sock_set_flag(sk, SOCK_LINGER); 625 } 626 break; 627 628 case SO_BSDCOMPAT: 629 sock_warn_obsolete_bsdism("setsockopt"); 630 break; 631 632 case SO_PASSCRED: 633 if (valbool) 634 set_bit(SOCK_PASSCRED, &sock->flags); 635 else 636 clear_bit(SOCK_PASSCRED, &sock->flags); 637 break; 638 639 case SO_TIMESTAMP: 640 case SO_TIMESTAMPNS: 641 if (valbool) { 642 if (optname == SO_TIMESTAMP) 643 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 644 else 645 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 646 sock_set_flag(sk, SOCK_RCVTSTAMP); 647 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 648 } else { 649 sock_reset_flag(sk, SOCK_RCVTSTAMP); 650 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 651 } 652 break; 653 654 case SO_TIMESTAMPING: 655 if (val & ~SOF_TIMESTAMPING_MASK) { 656 ret = -EINVAL; 657 break; 658 } 659 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 660 val & SOF_TIMESTAMPING_TX_HARDWARE); 661 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 662 val & SOF_TIMESTAMPING_TX_SOFTWARE); 663 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 664 val & SOF_TIMESTAMPING_RX_HARDWARE); 665 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 666 sock_enable_timestamp(sk, 667 SOCK_TIMESTAMPING_RX_SOFTWARE); 668 else 669 sock_disable_timestamp(sk, 670 SOCK_TIMESTAMPING_RX_SOFTWARE); 671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 672 val & SOF_TIMESTAMPING_SOFTWARE); 673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 674 val & SOF_TIMESTAMPING_SYS_HARDWARE); 675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 676 val & SOF_TIMESTAMPING_RAW_HARDWARE); 677 break; 678 679 case SO_RCVLOWAT: 680 if (val < 0) 681 val = INT_MAX; 682 sk->sk_rcvlowat = val ? : 1; 683 break; 684 685 case SO_RCVTIMEO: 686 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 687 break; 688 689 case SO_SNDTIMEO: 690 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 691 break; 692 693 case SO_ATTACH_FILTER: 694 ret = -EINVAL; 695 if (optlen == sizeof(struct sock_fprog)) { 696 struct sock_fprog fprog; 697 698 ret = -EFAULT; 699 if (copy_from_user(&fprog, optval, sizeof(fprog))) 700 break; 701 702 ret = sk_attach_filter(&fprog, sk); 703 } 704 break; 705 706 case SO_DETACH_FILTER: 707 ret = sk_detach_filter(sk); 708 break; 709 710 case SO_PASSSEC: 711 if (valbool) 712 set_bit(SOCK_PASSSEC, &sock->flags); 713 else 714 clear_bit(SOCK_PASSSEC, &sock->flags); 715 break; 716 case SO_MARK: 717 if (!capable(CAP_NET_ADMIN)) 718 ret = -EPERM; 719 else 720 sk->sk_mark = val; 721 break; 722 723 /* We implement the SO_SNDLOWAT etc to 724 not be settable (1003.1g 5.3) */ 725 case SO_RXQ_OVFL: 726 if (valbool) 727 sock_set_flag(sk, SOCK_RXQ_OVFL); 728 else 729 sock_reset_flag(sk, SOCK_RXQ_OVFL); 730 break; 731 default: 732 ret = -ENOPROTOOPT; 733 break; 734 } 735 release_sock(sk); 736 return ret; 737} 738EXPORT_SYMBOL(sock_setsockopt); 739 740 741int sock_getsockopt(struct socket *sock, int level, int optname, 742 char __user *optval, int __user *optlen) 743{ 744 struct sock *sk = sock->sk; 745 746 union { 747 int val; 748 struct linger ling; 749 struct timeval tm; 750 } v; 751 752 int lv = sizeof(int); 753 int len; 754 755 if (get_user(len, optlen)) 756 return -EFAULT; 757 if (len < 0) 758 return -EINVAL; 759 760 memset(&v, 0, sizeof(v)); 761 762 switch (optname) { 763 case SO_DEBUG: 764 v.val = sock_flag(sk, SOCK_DBG); 765 break; 766 767 case SO_DONTROUTE: 768 v.val = sock_flag(sk, SOCK_LOCALROUTE); 769 break; 770 771 case SO_BROADCAST: 772 v.val = !!sock_flag(sk, SOCK_BROADCAST); 773 break; 774 775 case SO_SNDBUF: 776 v.val = sk->sk_sndbuf; 777 break; 778 779 case SO_RCVBUF: 780 v.val = sk->sk_rcvbuf; 781 break; 782 783 case SO_REUSEADDR: 784 v.val = sk->sk_reuse; 785 break; 786 787 case SO_KEEPALIVE: 788 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 789 break; 790 791 case SO_TYPE: 792 v.val = sk->sk_type; 793 break; 794 795 case SO_PROTOCOL: 796 v.val = sk->sk_protocol; 797 break; 798 799 case SO_DOMAIN: 800 v.val = sk->sk_family; 801 break; 802 803 case SO_ERROR: 804 v.val = -sock_error(sk); 805 if (v.val == 0) 806 v.val = xchg(&sk->sk_err_soft, 0); 807 break; 808 809 case SO_OOBINLINE: 810 v.val = !!sock_flag(sk, SOCK_URGINLINE); 811 break; 812 813 case SO_NO_CHECK: 814 v.val = sk->sk_no_check; 815 break; 816 817 case SO_PRIORITY: 818 v.val = sk->sk_priority; 819 break; 820 821 case SO_LINGER: 822 lv = sizeof(v.ling); 823 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 824 v.ling.l_linger = sk->sk_lingertime / HZ; 825 break; 826 827 case SO_BSDCOMPAT: 828 sock_warn_obsolete_bsdism("getsockopt"); 829 break; 830 831 case SO_TIMESTAMP: 832 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 833 !sock_flag(sk, SOCK_RCVTSTAMPNS); 834 break; 835 836 case SO_TIMESTAMPNS: 837 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 838 break; 839 840 case SO_TIMESTAMPING: 841 v.val = 0; 842 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 843 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 844 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 845 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 846 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 847 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 848 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 849 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 850 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 851 v.val |= SOF_TIMESTAMPING_SOFTWARE; 852 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 853 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 854 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 855 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 856 break; 857 858 case SO_RCVTIMEO: 859 lv = sizeof(struct timeval); 860 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 861 v.tm.tv_sec = 0; 862 v.tm.tv_usec = 0; 863 } else { 864 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 865 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 866 } 867 break; 868 869 case SO_SNDTIMEO: 870 lv = sizeof(struct timeval); 871 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 872 v.tm.tv_sec = 0; 873 v.tm.tv_usec = 0; 874 } else { 875 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 876 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 877 } 878 break; 879 880 case SO_RCVLOWAT: 881 v.val = sk->sk_rcvlowat; 882 break; 883 884 case SO_SNDLOWAT: 885 v.val = 1; 886 break; 887 888 case SO_PASSCRED: 889 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 890 break; 891 892 case SO_PEERCRED: 893 if (len > sizeof(sk->sk_peercred)) 894 len = sizeof(sk->sk_peercred); 895 if (copy_to_user(optval, &sk->sk_peercred, len)) 896 return -EFAULT; 897 goto lenout; 898 899 case SO_PEERNAME: 900 { 901 char address[128]; 902 903 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 904 return -ENOTCONN; 905 if (lv < len) 906 return -EINVAL; 907 if (copy_to_user(optval, address, len)) 908 return -EFAULT; 909 goto lenout; 910 } 911 912 /* Dubious BSD thing... Probably nobody even uses it, but 913 * the UNIX standard wants it for whatever reason... -DaveM 914 */ 915 case SO_ACCEPTCONN: 916 v.val = sk->sk_state == TCP_LISTEN; 917 break; 918 919 case SO_PASSSEC: 920 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 921 break; 922 923 case SO_PEERSEC: 924 return security_socket_getpeersec_stream(sock, optval, optlen, len); 925 926 case SO_MARK: 927 v.val = sk->sk_mark; 928 break; 929 930 case SO_RXQ_OVFL: 931 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 932 break; 933 934 default: 935 return -ENOPROTOOPT; 936 } 937 938 if (len > lv) 939 len = lv; 940 if (copy_to_user(optval, &v, len)) 941 return -EFAULT; 942lenout: 943 if (put_user(len, optlen)) 944 return -EFAULT; 945 return 0; 946} 947 948/* 949 * Initialize an sk_lock. 950 * 951 * (We also register the sk_lock with the lock validator.) 952 */ 953static inline void sock_lock_init(struct sock *sk) 954{ 955 sock_lock_init_class_and_name(sk, 956 af_family_slock_key_strings[sk->sk_family], 957 af_family_slock_keys + sk->sk_family, 958 af_family_key_strings[sk->sk_family], 959 af_family_keys + sk->sk_family); 960} 961 962/* 963 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 964 * even temporarly, because of RCU lookups. sk_node should also be left as is. 965 */ 966static void sock_copy(struct sock *nsk, const struct sock *osk) 967{ 968#ifdef CONFIG_SECURITY_NETWORK 969 void *sptr = nsk->sk_security; 970#endif 971 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 972 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + 973 sizeof(osk->sk_tx_queue_mapping)); 974 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 975 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 976#ifdef CONFIG_SECURITY_NETWORK 977 nsk->sk_security = sptr; 978 security_sk_clone(osk, nsk); 979#endif 980} 981 982static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 983 int family) 984{ 985 struct sock *sk; 986 struct kmem_cache *slab; 987 988 slab = prot->slab; 989 if (slab != NULL) { 990 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 991 if (!sk) 992 return sk; 993 if (priority & __GFP_ZERO) { 994 /* 995 * caches using SLAB_DESTROY_BY_RCU should let 996 * sk_node.next un-modified. Special care is taken 997 * when initializing object to zero. 998 */ 999 if (offsetof(struct sock, sk_node.next) != 0) 1000 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1001 memset(&sk->sk_node.pprev, 0, 1002 prot->obj_size - offsetof(struct sock, 1003 sk_node.pprev)); 1004 } 1005 } 1006 else 1007 sk = kmalloc(prot->obj_size, priority); 1008 1009 if (sk != NULL) { 1010 kmemcheck_annotate_bitfield(sk, flags); 1011 1012 if (security_sk_alloc(sk, family, priority)) 1013 goto out_free; 1014 1015 if (!try_module_get(prot->owner)) 1016 goto out_free_sec; 1017 sk_tx_queue_clear(sk); 1018 } 1019 1020 return sk; 1021 1022out_free_sec: 1023 security_sk_free(sk); 1024out_free: 1025 if (slab != NULL) 1026 kmem_cache_free(slab, sk); 1027 else 1028 kfree(sk); 1029 return NULL; 1030} 1031 1032static void sk_prot_free(struct proto *prot, struct sock *sk) 1033{ 1034 struct kmem_cache *slab; 1035 struct module *owner; 1036 1037 owner = prot->owner; 1038 slab = prot->slab; 1039 1040 security_sk_free(sk); 1041 if (slab != NULL) 1042 kmem_cache_free(slab, sk); 1043 else 1044 kfree(sk); 1045 module_put(owner); 1046} 1047 1048/** 1049 * sk_alloc - All socket objects are allocated here 1050 * @net: the applicable net namespace 1051 * @family: protocol family 1052 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1053 * @prot: struct proto associated with this new sock instance 1054 */ 1055struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1056 struct proto *prot) 1057{ 1058 struct sock *sk; 1059 1060 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1061 if (sk) { 1062 sk->sk_family = family; 1063 /* 1064 * See comment in struct sock definition to understand 1065 * why we need sk_prot_creator -acme 1066 */ 1067 sk->sk_prot = sk->sk_prot_creator = prot; 1068 sock_lock_init(sk); 1069 sock_net_set(sk, get_net(net)); 1070 atomic_set(&sk->sk_wmem_alloc, 1); 1071 } 1072 1073 return sk; 1074} 1075EXPORT_SYMBOL(sk_alloc); 1076 1077static void __sk_free(struct sock *sk) 1078{ 1079 struct sk_filter *filter; 1080 1081 if (sk->sk_destruct) 1082 sk->sk_destruct(sk); 1083 1084 filter = rcu_dereference_check(sk->sk_filter, 1085 atomic_read(&sk->sk_wmem_alloc) == 0); 1086 if (filter) { 1087 sk_filter_uncharge(sk, filter); 1088 rcu_assign_pointer(sk->sk_filter, NULL); 1089 } 1090 1091 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1092 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); 1093 1094 if (atomic_read(&sk->sk_omem_alloc)) 1095 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1096 __func__, atomic_read(&sk->sk_omem_alloc)); 1097 1098 put_net(sock_net(sk)); 1099 sk_prot_free(sk->sk_prot_creator, sk); 1100} 1101 1102void sk_free(struct sock *sk) 1103{ 1104 /* 1105 * We substract one from sk_wmem_alloc and can know if 1106 * some packets are still in some tx queue. 1107 * If not null, sock_wfree() will call __sk_free(sk) later 1108 */ 1109 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1110 __sk_free(sk); 1111} 1112EXPORT_SYMBOL(sk_free); 1113 1114/* 1115 * Last sock_put should drop referrence to sk->sk_net. It has already 1116 * been dropped in sk_change_net. Taking referrence to stopping namespace 1117 * is not an option. 1118 * Take referrence to a socket to remove it from hash _alive_ and after that 1119 * destroy it in the context of init_net. 1120 */ 1121void sk_release_kernel(struct sock *sk) 1122{ 1123 if (sk == NULL || sk->sk_socket == NULL) 1124 return; 1125 1126 sock_hold(sk); 1127 sock_release(sk->sk_socket); 1128 release_net(sock_net(sk)); 1129 sock_net_set(sk, get_net(&init_net)); 1130 sock_put(sk); 1131} 1132EXPORT_SYMBOL(sk_release_kernel); 1133 1134struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1135{ 1136 struct sock *newsk; 1137 1138 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1139 if (newsk != NULL) { 1140 struct sk_filter *filter; 1141 1142 sock_copy(newsk, sk); 1143 1144 /* SANITY */ 1145 get_net(sock_net(newsk)); 1146 sk_node_init(&newsk->sk_node); 1147 sock_lock_init(newsk); 1148 bh_lock_sock(newsk); 1149 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1150 newsk->sk_backlog.len = 0; 1151 1152 atomic_set(&newsk->sk_rmem_alloc, 0); 1153 /* 1154 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1155 */ 1156 atomic_set(&newsk->sk_wmem_alloc, 1); 1157 atomic_set(&newsk->sk_omem_alloc, 0); 1158 skb_queue_head_init(&newsk->sk_receive_queue); 1159 skb_queue_head_init(&newsk->sk_write_queue); 1160#ifdef CONFIG_NET_DMA 1161 skb_queue_head_init(&newsk->sk_async_wait_queue); 1162#endif 1163 1164 spin_lock_init(&newsk->sk_dst_lock); 1165 rwlock_init(&newsk->sk_callback_lock); 1166 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1167 af_callback_keys + newsk->sk_family, 1168 af_family_clock_key_strings[newsk->sk_family]); 1169 1170 newsk->sk_dst_cache = NULL; 1171 newsk->sk_wmem_queued = 0; 1172 newsk->sk_forward_alloc = 0; 1173 newsk->sk_send_head = NULL; 1174 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1175 1176 sock_reset_flag(newsk, SOCK_DONE); 1177 skb_queue_head_init(&newsk->sk_error_queue); 1178 1179 filter = newsk->sk_filter; 1180 if (filter != NULL) 1181 sk_filter_charge(newsk, filter); 1182 1183 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1184 /* It is still raw copy of parent, so invalidate 1185 * destructor and make plain sk_free() */ 1186 newsk->sk_destruct = NULL; 1187 sk_free(newsk); 1188 newsk = NULL; 1189 goto out; 1190 } 1191 1192 newsk->sk_err = 0; 1193 newsk->sk_priority = 0; 1194 /* 1195 * Before updating sk_refcnt, we must commit prior changes to memory 1196 * (Documentation/RCU/rculist_nulls.txt for details) 1197 */ 1198 smp_wmb(); 1199 atomic_set(&newsk->sk_refcnt, 2); 1200 1201 /* 1202 * Increment the counter in the same struct proto as the master 1203 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1204 * is the same as sk->sk_prot->socks, as this field was copied 1205 * with memcpy). 1206 * 1207 * This _changes_ the previous behaviour, where 1208 * tcp_create_openreq_child always was incrementing the 1209 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1210 * to be taken into account in all callers. -acme 1211 */ 1212 sk_refcnt_debug_inc(newsk); 1213 sk_set_socket(newsk, NULL); 1214 newsk->sk_wq = NULL; 1215 1216 if (newsk->sk_prot->sockets_allocated) 1217 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1218 1219 if (sock_flag(newsk, SOCK_TIMESTAMP) || 1220 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1221 net_enable_timestamp(); 1222 } 1223out: 1224 return newsk; 1225} 1226EXPORT_SYMBOL_GPL(sk_clone); 1227 1228void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1229{ 1230 __sk_dst_set(sk, dst); 1231 sk->sk_route_caps = dst->dev->features; 1232 if (sk->sk_route_caps & NETIF_F_GSO) 1233 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1234 if (sk_can_gso(sk)) { 1235 if (dst->header_len) { 1236 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1237 } else { 1238 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1239 sk->sk_gso_max_size = dst->dev->gso_max_size; 1240 } 1241 } 1242} 1243EXPORT_SYMBOL_GPL(sk_setup_caps); 1244 1245void __init sk_init(void) 1246{ 1247 if (totalram_pages <= 4096) { 1248 sysctl_wmem_max = 32767; 1249 sysctl_rmem_max = 32767; 1250 sysctl_wmem_default = 32767; 1251 sysctl_rmem_default = 32767; 1252 } else if (totalram_pages >= 131072) { 1253 sysctl_wmem_max = 131071; 1254 sysctl_rmem_max = 131071; 1255 } 1256} 1257 1258/* 1259 * Simple resource managers for sockets. 1260 */ 1261 1262 1263/* 1264 * Write buffer destructor automatically called from kfree_skb. 1265 */ 1266void sock_wfree(struct sk_buff *skb) 1267{ 1268 struct sock *sk = skb->sk; 1269 unsigned int len = skb->truesize; 1270 1271 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1272 /* 1273 * Keep a reference on sk_wmem_alloc, this will be released 1274 * after sk_write_space() call 1275 */ 1276 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1277 sk->sk_write_space(sk); 1278 len = 1; 1279 } 1280 /* 1281 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1282 * could not do because of in-flight packets 1283 */ 1284 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1285 __sk_free(sk); 1286} 1287EXPORT_SYMBOL(sock_wfree); 1288 1289/* 1290 * Read buffer destructor automatically called from kfree_skb. 1291 */ 1292void sock_rfree(struct sk_buff *skb) 1293{ 1294 struct sock *sk = skb->sk; 1295 1296 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1297 sk_mem_uncharge(skb->sk, skb->truesize); 1298} 1299EXPORT_SYMBOL(sock_rfree); 1300 1301 1302int sock_i_uid(struct sock *sk) 1303{ 1304 int uid; 1305 1306 read_lock(&sk->sk_callback_lock); 1307 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1308 read_unlock(&sk->sk_callback_lock); 1309 return uid; 1310} 1311EXPORT_SYMBOL(sock_i_uid); 1312 1313unsigned long sock_i_ino(struct sock *sk) 1314{ 1315 unsigned long ino; 1316 1317 read_lock(&sk->sk_callback_lock); 1318 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1319 read_unlock(&sk->sk_callback_lock); 1320 return ino; 1321} 1322EXPORT_SYMBOL(sock_i_ino); 1323 1324/* 1325 * Allocate a skb from the socket's send buffer. 1326 */ 1327struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1328 gfp_t priority) 1329{ 1330 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1331 struct sk_buff *skb = alloc_skb(size, priority); 1332 if (skb) { 1333 skb_set_owner_w(skb, sk); 1334 return skb; 1335 } 1336 } 1337 return NULL; 1338} 1339EXPORT_SYMBOL(sock_wmalloc); 1340 1341/* 1342 * Allocate a skb from the socket's receive buffer. 1343 */ 1344struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1345 gfp_t priority) 1346{ 1347 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1348 struct sk_buff *skb = alloc_skb(size, priority); 1349 if (skb) { 1350 skb_set_owner_r(skb, sk); 1351 return skb; 1352 } 1353 } 1354 return NULL; 1355} 1356 1357/* 1358 * Allocate a memory block from the socket's option memory buffer. 1359 */ 1360void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1361{ 1362 if ((unsigned)size <= sysctl_optmem_max && 1363 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1364 void *mem; 1365 /* First do the add, to avoid the race if kmalloc 1366 * might sleep. 1367 */ 1368 atomic_add(size, &sk->sk_omem_alloc); 1369 mem = kmalloc(size, priority); 1370 if (mem) 1371 return mem; 1372 atomic_sub(size, &sk->sk_omem_alloc); 1373 } 1374 return NULL; 1375} 1376EXPORT_SYMBOL(sock_kmalloc); 1377 1378/* 1379 * Free an option memory block. 1380 */ 1381void sock_kfree_s(struct sock *sk, void *mem, int size) 1382{ 1383 kfree(mem); 1384 atomic_sub(size, &sk->sk_omem_alloc); 1385} 1386EXPORT_SYMBOL(sock_kfree_s); 1387 1388/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1389 I think, these locks should be removed for datagram sockets. 1390 */ 1391static long sock_wait_for_wmem(struct sock *sk, long timeo) 1392{ 1393 DEFINE_WAIT(wait); 1394 1395 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1396 for (;;) { 1397 if (!timeo) 1398 break; 1399 if (signal_pending(current)) 1400 break; 1401 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1402 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1403 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1404 break; 1405 if (sk->sk_shutdown & SEND_SHUTDOWN) 1406 break; 1407 if (sk->sk_err) 1408 break; 1409 timeo = schedule_timeout(timeo); 1410 } 1411 finish_wait(sk_sleep(sk), &wait); 1412 return timeo; 1413} 1414 1415 1416/* 1417 * Generic send/receive buffer handlers 1418 */ 1419 1420struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1421 unsigned long data_len, int noblock, 1422 int *errcode) 1423{ 1424 struct sk_buff *skb; 1425 gfp_t gfp_mask; 1426 long timeo; 1427 int err; 1428 1429 gfp_mask = sk->sk_allocation; 1430 if (gfp_mask & __GFP_WAIT) 1431 gfp_mask |= __GFP_REPEAT; 1432 1433 timeo = sock_sndtimeo(sk, noblock); 1434 while (1) { 1435 err = sock_error(sk); 1436 if (err != 0) 1437 goto failure; 1438 1439 err = -EPIPE; 1440 if (sk->sk_shutdown & SEND_SHUTDOWN) 1441 goto failure; 1442 1443 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1444 skb = alloc_skb(header_len, gfp_mask); 1445 if (skb) { 1446 int npages; 1447 int i; 1448 1449 /* No pages, we're done... */ 1450 if (!data_len) 1451 break; 1452 1453 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1454 skb->truesize += data_len; 1455 skb_shinfo(skb)->nr_frags = npages; 1456 for (i = 0; i < npages; i++) { 1457 struct page *page; 1458 skb_frag_t *frag; 1459 1460 page = alloc_pages(sk->sk_allocation, 0); 1461 if (!page) { 1462 err = -ENOBUFS; 1463 skb_shinfo(skb)->nr_frags = i; 1464 kfree_skb(skb); 1465 goto failure; 1466 } 1467 1468 frag = &skb_shinfo(skb)->frags[i]; 1469 frag->page = page; 1470 frag->page_offset = 0; 1471 frag->size = (data_len >= PAGE_SIZE ? 1472 PAGE_SIZE : 1473 data_len); 1474 data_len -= PAGE_SIZE; 1475 } 1476 1477 /* Full success... */ 1478 break; 1479 } 1480 err = -ENOBUFS; 1481 goto failure; 1482 } 1483 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1484 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1485 err = -EAGAIN; 1486 if (!timeo) 1487 goto failure; 1488 if (signal_pending(current)) 1489 goto interrupted; 1490 timeo = sock_wait_for_wmem(sk, timeo); 1491 } 1492 1493 skb_set_owner_w(skb, sk); 1494 return skb; 1495 1496interrupted: 1497 err = sock_intr_errno(timeo); 1498failure: 1499 *errcode = err; 1500 return NULL; 1501} 1502EXPORT_SYMBOL(sock_alloc_send_pskb); 1503 1504struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1505 int noblock, int *errcode) 1506{ 1507 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1508} 1509EXPORT_SYMBOL(sock_alloc_send_skb); 1510 1511static void __lock_sock(struct sock *sk) 1512{ 1513 DEFINE_WAIT(wait); 1514 1515 for (;;) { 1516 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1517 TASK_UNINTERRUPTIBLE); 1518 spin_unlock_bh(&sk->sk_lock.slock); 1519 schedule(); 1520 spin_lock_bh(&sk->sk_lock.slock); 1521 if (!sock_owned_by_user(sk)) 1522 break; 1523 } 1524 finish_wait(&sk->sk_lock.wq, &wait); 1525} 1526 1527static void __release_sock(struct sock *sk) 1528{ 1529 struct sk_buff *skb = sk->sk_backlog.head; 1530 1531 do { 1532 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1533 bh_unlock_sock(sk); 1534 1535 do { 1536 struct sk_buff *next = skb->next; 1537 1538 skb->next = NULL; 1539 sk_backlog_rcv(sk, skb); 1540 1541 /* 1542 * We are in process context here with softirqs 1543 * disabled, use cond_resched_softirq() to preempt. 1544 * This is safe to do because we've taken the backlog 1545 * queue private: 1546 */ 1547 cond_resched_softirq(); 1548 1549 skb = next; 1550 } while (skb != NULL); 1551 1552 bh_lock_sock(sk); 1553 } while ((skb = sk->sk_backlog.head) != NULL); 1554 1555 /* 1556 * Doing the zeroing here guarantee we can not loop forever 1557 * while a wild producer attempts to flood us. 1558 */ 1559 sk->sk_backlog.len = 0; 1560} 1561 1562/** 1563 * sk_wait_data - wait for data to arrive at sk_receive_queue 1564 * @sk: sock to wait on 1565 * @timeo: for how long 1566 * 1567 * Now socket state including sk->sk_err is changed only under lock, 1568 * hence we may omit checks after joining wait queue. 1569 * We check receive queue before schedule() only as optimization; 1570 * it is very likely that release_sock() added new data. 1571 */ 1572int sk_wait_data(struct sock *sk, long *timeo) 1573{ 1574 int rc; 1575 DEFINE_WAIT(wait); 1576 1577 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1578 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1579 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1580 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1581 finish_wait(sk_sleep(sk), &wait); 1582 return rc; 1583} 1584EXPORT_SYMBOL(sk_wait_data); 1585 1586/** 1587 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1588 * @sk: socket 1589 * @size: memory size to allocate 1590 * @kind: allocation type 1591 * 1592 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1593 * rmem allocation. This function assumes that protocols which have 1594 * memory_pressure use sk_wmem_queued as write buffer accounting. 1595 */ 1596int __sk_mem_schedule(struct sock *sk, int size, int kind) 1597{ 1598 struct proto *prot = sk->sk_prot; 1599 int amt = sk_mem_pages(size); 1600 int allocated; 1601 1602 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1603 allocated = atomic_add_return(amt, prot->memory_allocated); 1604 1605 /* Under limit. */ 1606 if (allocated <= prot->sysctl_mem[0]) { 1607 if (prot->memory_pressure && *prot->memory_pressure) 1608 *prot->memory_pressure = 0; 1609 return 1; 1610 } 1611 1612 /* Under pressure. */ 1613 if (allocated > prot->sysctl_mem[1]) 1614 if (prot->enter_memory_pressure) 1615 prot->enter_memory_pressure(sk); 1616 1617 /* Over hard limit. */ 1618 if (allocated > prot->sysctl_mem[2]) 1619 goto suppress_allocation; 1620 1621 /* guarantee minimum buffer size under pressure */ 1622 if (kind == SK_MEM_RECV) { 1623 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1624 return 1; 1625 } else { /* SK_MEM_SEND */ 1626 if (sk->sk_type == SOCK_STREAM) { 1627 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1628 return 1; 1629 } else if (atomic_read(&sk->sk_wmem_alloc) < 1630 prot->sysctl_wmem[0]) 1631 return 1; 1632 } 1633 1634 if (prot->memory_pressure) { 1635 int alloc; 1636 1637 if (!*prot->memory_pressure) 1638 return 1; 1639 alloc = percpu_counter_read_positive(prot->sockets_allocated); 1640 if (prot->sysctl_mem[2] > alloc * 1641 sk_mem_pages(sk->sk_wmem_queued + 1642 atomic_read(&sk->sk_rmem_alloc) + 1643 sk->sk_forward_alloc)) 1644 return 1; 1645 } 1646 1647suppress_allocation: 1648 1649 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1650 sk_stream_moderate_sndbuf(sk); 1651 1652 /* Fail only if socket is _under_ its sndbuf. 1653 * In this case we cannot block, so that we have to fail. 1654 */ 1655 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1656 return 1; 1657 } 1658 1659 /* Alas. Undo changes. */ 1660 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1661 atomic_sub(amt, prot->memory_allocated); 1662 return 0; 1663} 1664EXPORT_SYMBOL(__sk_mem_schedule); 1665 1666/** 1667 * __sk_reclaim - reclaim memory_allocated 1668 * @sk: socket 1669 */ 1670void __sk_mem_reclaim(struct sock *sk) 1671{ 1672 struct proto *prot = sk->sk_prot; 1673 1674 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1675 prot->memory_allocated); 1676 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1677 1678 if (prot->memory_pressure && *prot->memory_pressure && 1679 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1680 *prot->memory_pressure = 0; 1681} 1682EXPORT_SYMBOL(__sk_mem_reclaim); 1683 1684 1685/* 1686 * Set of default routines for initialising struct proto_ops when 1687 * the protocol does not support a particular function. In certain 1688 * cases where it makes no sense for a protocol to have a "do nothing" 1689 * function, some default processing is provided. 1690 */ 1691 1692int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1693{ 1694 return -EOPNOTSUPP; 1695} 1696EXPORT_SYMBOL(sock_no_bind); 1697 1698int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1699 int len, int flags) 1700{ 1701 return -EOPNOTSUPP; 1702} 1703EXPORT_SYMBOL(sock_no_connect); 1704 1705int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1706{ 1707 return -EOPNOTSUPP; 1708} 1709EXPORT_SYMBOL(sock_no_socketpair); 1710 1711int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1712{ 1713 return -EOPNOTSUPP; 1714} 1715EXPORT_SYMBOL(sock_no_accept); 1716 1717int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1718 int *len, int peer) 1719{ 1720 return -EOPNOTSUPP; 1721} 1722EXPORT_SYMBOL(sock_no_getname); 1723 1724unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1725{ 1726 return 0; 1727} 1728EXPORT_SYMBOL(sock_no_poll); 1729 1730int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1731{ 1732 return -EOPNOTSUPP; 1733} 1734EXPORT_SYMBOL(sock_no_ioctl); 1735 1736int sock_no_listen(struct socket *sock, int backlog) 1737{ 1738 return -EOPNOTSUPP; 1739} 1740EXPORT_SYMBOL(sock_no_listen); 1741 1742int sock_no_shutdown(struct socket *sock, int how) 1743{ 1744 return -EOPNOTSUPP; 1745} 1746EXPORT_SYMBOL(sock_no_shutdown); 1747 1748int sock_no_setsockopt(struct socket *sock, int level, int optname, 1749 char __user *optval, unsigned int optlen) 1750{ 1751 return -EOPNOTSUPP; 1752} 1753EXPORT_SYMBOL(sock_no_setsockopt); 1754 1755int sock_no_getsockopt(struct socket *sock, int level, int optname, 1756 char __user *optval, int __user *optlen) 1757{ 1758 return -EOPNOTSUPP; 1759} 1760EXPORT_SYMBOL(sock_no_getsockopt); 1761 1762int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1763 size_t len) 1764{ 1765 return -EOPNOTSUPP; 1766} 1767EXPORT_SYMBOL(sock_no_sendmsg); 1768 1769int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1770 size_t len, int flags) 1771{ 1772 return -EOPNOTSUPP; 1773} 1774EXPORT_SYMBOL(sock_no_recvmsg); 1775 1776int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1777{ 1778 /* Mirror missing mmap method error code */ 1779 return -ENODEV; 1780} 1781EXPORT_SYMBOL(sock_no_mmap); 1782 1783ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1784{ 1785 ssize_t res; 1786 struct msghdr msg = {.msg_flags = flags}; 1787 struct kvec iov; 1788 char *kaddr = kmap(page); 1789 iov.iov_base = kaddr + offset; 1790 iov.iov_len = size; 1791 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1792 kunmap(page); 1793 return res; 1794} 1795EXPORT_SYMBOL(sock_no_sendpage); 1796 1797/* 1798 * Default Socket Callbacks 1799 */ 1800 1801static void sock_def_wakeup(struct sock *sk) 1802{ 1803 struct socket_wq *wq; 1804 1805 rcu_read_lock(); 1806 wq = rcu_dereference(sk->sk_wq); 1807 if (wq_has_sleeper(wq)) 1808 wake_up_interruptible_all(&wq->wait); 1809 rcu_read_unlock(); 1810} 1811 1812static void sock_def_error_report(struct sock *sk) 1813{ 1814 struct socket_wq *wq; 1815 1816 rcu_read_lock(); 1817 wq = rcu_dereference(sk->sk_wq); 1818 if (wq_has_sleeper(wq)) 1819 wake_up_interruptible_poll(&wq->wait, POLLERR); 1820 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1821 rcu_read_unlock(); 1822} 1823 1824static void sock_def_readable(struct sock *sk, int len) 1825{ 1826 struct socket_wq *wq; 1827 1828 rcu_read_lock(); 1829 wq = rcu_dereference(sk->sk_wq); 1830 if (wq_has_sleeper(wq)) 1831 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1832 POLLRDNORM | POLLRDBAND); 1833 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1834 rcu_read_unlock(); 1835} 1836 1837static void sock_def_write_space(struct sock *sk) 1838{ 1839 struct socket_wq *wq; 1840 1841 rcu_read_lock(); 1842 1843 /* Do not wake up a writer until he can make "significant" 1844 * progress. --DaveM 1845 */ 1846 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1847 wq = rcu_dereference(sk->sk_wq); 1848 if (wq_has_sleeper(wq)) 1849 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1850 POLLWRNORM | POLLWRBAND); 1851 1852 /* Should agree with poll, otherwise some programs break */ 1853 if (sock_writeable(sk)) 1854 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1855 } 1856 1857 rcu_read_unlock(); 1858} 1859 1860static void sock_def_destruct(struct sock *sk) 1861{ 1862 kfree(sk->sk_protinfo); 1863} 1864 1865void sk_send_sigurg(struct sock *sk) 1866{ 1867 if (sk->sk_socket && sk->sk_socket->file) 1868 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1869 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1870} 1871EXPORT_SYMBOL(sk_send_sigurg); 1872 1873void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1874 unsigned long expires) 1875{ 1876 if (!mod_timer(timer, expires)) 1877 sock_hold(sk); 1878} 1879EXPORT_SYMBOL(sk_reset_timer); 1880 1881void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1882{ 1883 if (timer_pending(timer) && del_timer(timer)) 1884 __sock_put(sk); 1885} 1886EXPORT_SYMBOL(sk_stop_timer); 1887 1888void sock_init_data(struct socket *sock, struct sock *sk) 1889{ 1890 skb_queue_head_init(&sk->sk_receive_queue); 1891 skb_queue_head_init(&sk->sk_write_queue); 1892 skb_queue_head_init(&sk->sk_error_queue); 1893#ifdef CONFIG_NET_DMA 1894 skb_queue_head_init(&sk->sk_async_wait_queue); 1895#endif 1896 1897 sk->sk_send_head = NULL; 1898 1899 init_timer(&sk->sk_timer); 1900 1901 sk->sk_allocation = GFP_KERNEL; 1902 sk->sk_rcvbuf = sysctl_rmem_default; 1903 sk->sk_sndbuf = sysctl_wmem_default; 1904 sk->sk_state = TCP_CLOSE; 1905 sk_set_socket(sk, sock); 1906 1907 sock_set_flag(sk, SOCK_ZAPPED); 1908 1909 if (sock) { 1910 sk->sk_type = sock->type; 1911 sk->sk_wq = sock->wq; 1912 sock->sk = sk; 1913 } else 1914 sk->sk_wq = NULL; 1915 1916 spin_lock_init(&sk->sk_dst_lock); 1917 rwlock_init(&sk->sk_callback_lock); 1918 lockdep_set_class_and_name(&sk->sk_callback_lock, 1919 af_callback_keys + sk->sk_family, 1920 af_family_clock_key_strings[sk->sk_family]); 1921 1922 sk->sk_state_change = sock_def_wakeup; 1923 sk->sk_data_ready = sock_def_readable; 1924 sk->sk_write_space = sock_def_write_space; 1925 sk->sk_error_report = sock_def_error_report; 1926 sk->sk_destruct = sock_def_destruct; 1927 1928 sk->sk_sndmsg_page = NULL; 1929 sk->sk_sndmsg_off = 0; 1930 1931 sk->sk_peercred.pid = 0; 1932 sk->sk_peercred.uid = -1; 1933 sk->sk_peercred.gid = -1; 1934 sk->sk_write_pending = 0; 1935 sk->sk_rcvlowat = 1; 1936 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1937 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1938 1939 sk->sk_stamp = ktime_set(-1L, 0); 1940 1941 /* 1942 * Before updating sk_refcnt, we must commit prior changes to memory 1943 * (Documentation/RCU/rculist_nulls.txt for details) 1944 */ 1945 smp_wmb(); 1946 atomic_set(&sk->sk_refcnt, 1); 1947 atomic_set(&sk->sk_drops, 0); 1948} 1949EXPORT_SYMBOL(sock_init_data); 1950 1951void lock_sock_nested(struct sock *sk, int subclass) 1952{ 1953 might_sleep(); 1954 spin_lock_bh(&sk->sk_lock.slock); 1955 if (sk->sk_lock.owned) 1956 __lock_sock(sk); 1957 sk->sk_lock.owned = 1; 1958 spin_unlock(&sk->sk_lock.slock); 1959 /* 1960 * The sk_lock has mutex_lock() semantics here: 1961 */ 1962 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1963 local_bh_enable(); 1964} 1965EXPORT_SYMBOL(lock_sock_nested); 1966 1967void release_sock(struct sock *sk) 1968{ 1969 /* 1970 * The sk_lock has mutex_unlock() semantics: 1971 */ 1972 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1973 1974 spin_lock_bh(&sk->sk_lock.slock); 1975 if (sk->sk_backlog.tail) 1976 __release_sock(sk); 1977 sk->sk_lock.owned = 0; 1978 if (waitqueue_active(&sk->sk_lock.wq)) 1979 wake_up(&sk->sk_lock.wq); 1980 spin_unlock_bh(&sk->sk_lock.slock); 1981} 1982EXPORT_SYMBOL(release_sock); 1983 1984int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 1985{ 1986 struct timeval tv; 1987 if (!sock_flag(sk, SOCK_TIMESTAMP)) 1988 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 1989 tv = ktime_to_timeval(sk->sk_stamp); 1990 if (tv.tv_sec == -1) 1991 return -ENOENT; 1992 if (tv.tv_sec == 0) { 1993 sk->sk_stamp = ktime_get_real(); 1994 tv = ktime_to_timeval(sk->sk_stamp); 1995 } 1996 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 1997} 1998EXPORT_SYMBOL(sock_get_timestamp); 1999 2000int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2001{ 2002 struct timespec ts; 2003 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2004 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2005 ts = ktime_to_timespec(sk->sk_stamp); 2006 if (ts.tv_sec == -1) 2007 return -ENOENT; 2008 if (ts.tv_sec == 0) { 2009 sk->sk_stamp = ktime_get_real(); 2010 ts = ktime_to_timespec(sk->sk_stamp); 2011 } 2012 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2013} 2014EXPORT_SYMBOL(sock_get_timestampns); 2015 2016void sock_enable_timestamp(struct sock *sk, int flag) 2017{ 2018 if (!sock_flag(sk, flag)) { 2019 sock_set_flag(sk, flag); 2020 /* 2021 * we just set one of the two flags which require net 2022 * time stamping, but time stamping might have been on 2023 * already because of the other one 2024 */ 2025 if (!sock_flag(sk, 2026 flag == SOCK_TIMESTAMP ? 2027 SOCK_TIMESTAMPING_RX_SOFTWARE : 2028 SOCK_TIMESTAMP)) 2029 net_enable_timestamp(); 2030 } 2031} 2032 2033/* 2034 * Get a socket option on an socket. 2035 * 2036 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2037 * asynchronous errors should be reported by getsockopt. We assume 2038 * this means if you specify SO_ERROR (otherwise whats the point of it). 2039 */ 2040int sock_common_getsockopt(struct socket *sock, int level, int optname, 2041 char __user *optval, int __user *optlen) 2042{ 2043 struct sock *sk = sock->sk; 2044 2045 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2046} 2047EXPORT_SYMBOL(sock_common_getsockopt); 2048 2049#ifdef CONFIG_COMPAT 2050int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2051 char __user *optval, int __user *optlen) 2052{ 2053 struct sock *sk = sock->sk; 2054 2055 if (sk->sk_prot->compat_getsockopt != NULL) 2056 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2057 optval, optlen); 2058 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2059} 2060EXPORT_SYMBOL(compat_sock_common_getsockopt); 2061#endif 2062 2063int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2064 struct msghdr *msg, size_t size, int flags) 2065{ 2066 struct sock *sk = sock->sk; 2067 int addr_len = 0; 2068 int err; 2069 2070 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2071 flags & ~MSG_DONTWAIT, &addr_len); 2072 if (err >= 0) 2073 msg->msg_namelen = addr_len; 2074 return err; 2075} 2076EXPORT_SYMBOL(sock_common_recvmsg); 2077 2078/* 2079 * Set socket options on an inet socket. 2080 */ 2081int sock_common_setsockopt(struct socket *sock, int level, int optname, 2082 char __user *optval, unsigned int optlen) 2083{ 2084 struct sock *sk = sock->sk; 2085 2086 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2087} 2088EXPORT_SYMBOL(sock_common_setsockopt); 2089 2090#ifdef CONFIG_COMPAT 2091int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2092 char __user *optval, unsigned int optlen) 2093{ 2094 struct sock *sk = sock->sk; 2095 2096 if (sk->sk_prot->compat_setsockopt != NULL) 2097 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2098 optval, optlen); 2099 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2100} 2101EXPORT_SYMBOL(compat_sock_common_setsockopt); 2102#endif 2103 2104void sk_common_release(struct sock *sk) 2105{ 2106 if (sk->sk_prot->destroy) 2107 sk->sk_prot->destroy(sk); 2108 2109 /* 2110 * Observation: when sock_common_release is called, processes have 2111 * no access to socket. But net still has. 2112 * Step one, detach it from networking: 2113 * 2114 * A. Remove from hash tables. 2115 */ 2116 2117 sk->sk_prot->unhash(sk); 2118 2119 /* 2120 * In this point socket cannot receive new packets, but it is possible 2121 * that some packets are in flight because some CPU runs receiver and 2122 * did hash table lookup before we unhashed socket. They will achieve 2123 * receive queue and will be purged by socket destructor. 2124 * 2125 * Also we still have packets pending on receive queue and probably, 2126 * our own packets waiting in device queues. sock_destroy will drain 2127 * receive queue, but transmitted packets will delay socket destruction 2128 * until the last reference will be released. 2129 */ 2130 2131 sock_orphan(sk); 2132 2133 xfrm_sk_free_policy(sk); 2134 2135 sk_refcnt_debug_release(sk); 2136 sock_put(sk); 2137} 2138EXPORT_SYMBOL(sk_common_release); 2139 2140static DEFINE_RWLOCK(proto_list_lock); 2141static LIST_HEAD(proto_list); 2142 2143#ifdef CONFIG_PROC_FS 2144#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2145struct prot_inuse { 2146 int val[PROTO_INUSE_NR]; 2147}; 2148 2149static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2150 2151#ifdef CONFIG_NET_NS 2152void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2153{ 2154 int cpu = smp_processor_id(); 2155 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; 2156} 2157EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2158 2159int sock_prot_inuse_get(struct net *net, struct proto *prot) 2160{ 2161 int cpu, idx = prot->inuse_idx; 2162 int res = 0; 2163 2164 for_each_possible_cpu(cpu) 2165 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2166 2167 return res >= 0 ? res : 0; 2168} 2169EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2170 2171static int __net_init sock_inuse_init_net(struct net *net) 2172{ 2173 net->core.inuse = alloc_percpu(struct prot_inuse); 2174 return net->core.inuse ? 0 : -ENOMEM; 2175} 2176 2177static void __net_exit sock_inuse_exit_net(struct net *net) 2178{ 2179 free_percpu(net->core.inuse); 2180} 2181 2182static struct pernet_operations net_inuse_ops = { 2183 .init = sock_inuse_init_net, 2184 .exit = sock_inuse_exit_net, 2185}; 2186 2187static __init int net_inuse_init(void) 2188{ 2189 if (register_pernet_subsys(&net_inuse_ops)) 2190 panic("Cannot initialize net inuse counters"); 2191 2192 return 0; 2193} 2194 2195core_initcall(net_inuse_init); 2196#else 2197static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2198 2199void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2200{ 2201 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; 2202} 2203EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2204 2205int sock_prot_inuse_get(struct net *net, struct proto *prot) 2206{ 2207 int cpu, idx = prot->inuse_idx; 2208 int res = 0; 2209 2210 for_each_possible_cpu(cpu) 2211 res += per_cpu(prot_inuse, cpu).val[idx]; 2212 2213 return res >= 0 ? res : 0; 2214} 2215EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2216#endif 2217 2218static void assign_proto_idx(struct proto *prot) 2219{ 2220 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2221 2222 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2223 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2224 return; 2225 } 2226 2227 set_bit(prot->inuse_idx, proto_inuse_idx); 2228} 2229 2230static void release_proto_idx(struct proto *prot) 2231{ 2232 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2233 clear_bit(prot->inuse_idx, proto_inuse_idx); 2234} 2235#else 2236static inline void assign_proto_idx(struct proto *prot) 2237{ 2238} 2239 2240static inline void release_proto_idx(struct proto *prot) 2241{ 2242} 2243#endif 2244 2245int proto_register(struct proto *prot, int alloc_slab) 2246{ 2247 if (alloc_slab) { 2248 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2249 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2250 NULL); 2251 2252 if (prot->slab == NULL) { 2253 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2254 prot->name); 2255 goto out; 2256 } 2257 2258 if (prot->rsk_prot != NULL) { 2259 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2260 if (prot->rsk_prot->slab_name == NULL) 2261 goto out_free_sock_slab; 2262 2263 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2264 prot->rsk_prot->obj_size, 0, 2265 SLAB_HWCACHE_ALIGN, NULL); 2266 2267 if (prot->rsk_prot->slab == NULL) { 2268 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2269 prot->name); 2270 goto out_free_request_sock_slab_name; 2271 } 2272 } 2273 2274 if (prot->twsk_prot != NULL) { 2275 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2276 2277 if (prot->twsk_prot->twsk_slab_name == NULL) 2278 goto out_free_request_sock_slab; 2279 2280 prot->twsk_prot->twsk_slab = 2281 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2282 prot->twsk_prot->twsk_obj_size, 2283 0, 2284 SLAB_HWCACHE_ALIGN | 2285 prot->slab_flags, 2286 NULL); 2287 if (prot->twsk_prot->twsk_slab == NULL) 2288 goto out_free_timewait_sock_slab_name; 2289 } 2290 } 2291 2292 write_lock(&proto_list_lock); 2293 list_add(&prot->node, &proto_list); 2294 assign_proto_idx(prot); 2295 write_unlock(&proto_list_lock); 2296 return 0; 2297 2298out_free_timewait_sock_slab_name: 2299 kfree(prot->twsk_prot->twsk_slab_name); 2300out_free_request_sock_slab: 2301 if (prot->rsk_prot && prot->rsk_prot->slab) { 2302 kmem_cache_destroy(prot->rsk_prot->slab); 2303 prot->rsk_prot->slab = NULL; 2304 } 2305out_free_request_sock_slab_name: 2306 if (prot->rsk_prot) 2307 kfree(prot->rsk_prot->slab_name); 2308out_free_sock_slab: 2309 kmem_cache_destroy(prot->slab); 2310 prot->slab = NULL; 2311out: 2312 return -ENOBUFS; 2313} 2314EXPORT_SYMBOL(proto_register); 2315 2316void proto_unregister(struct proto *prot) 2317{ 2318 write_lock(&proto_list_lock); 2319 release_proto_idx(prot); 2320 list_del(&prot->node); 2321 write_unlock(&proto_list_lock); 2322 2323 if (prot->slab != NULL) { 2324 kmem_cache_destroy(prot->slab); 2325 prot->slab = NULL; 2326 } 2327 2328 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2329 kmem_cache_destroy(prot->rsk_prot->slab); 2330 kfree(prot->rsk_prot->slab_name); 2331 prot->rsk_prot->slab = NULL; 2332 } 2333 2334 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2335 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2336 kfree(prot->twsk_prot->twsk_slab_name); 2337 prot->twsk_prot->twsk_slab = NULL; 2338 } 2339} 2340EXPORT_SYMBOL(proto_unregister); 2341 2342#ifdef CONFIG_PROC_FS 2343static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2344 __acquires(proto_list_lock) 2345{ 2346 read_lock(&proto_list_lock); 2347 return seq_list_start_head(&proto_list, *pos); 2348} 2349 2350static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2351{ 2352 return seq_list_next(v, &proto_list, pos); 2353} 2354 2355static void proto_seq_stop(struct seq_file *seq, void *v) 2356 __releases(proto_list_lock) 2357{ 2358 read_unlock(&proto_list_lock); 2359} 2360 2361static char proto_method_implemented(const void *method) 2362{ 2363 return method == NULL ? 'n' : 'y'; 2364} 2365 2366static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2367{ 2368 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2369 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2370 proto->name, 2371 proto->obj_size, 2372 sock_prot_inuse_get(seq_file_net(seq), proto), 2373 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2374 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2375 proto->max_header, 2376 proto->slab == NULL ? "no" : "yes", 2377 module_name(proto->owner), 2378 proto_method_implemented(proto->close), 2379 proto_method_implemented(proto->connect), 2380 proto_method_implemented(proto->disconnect), 2381 proto_method_implemented(proto->accept), 2382 proto_method_implemented(proto->ioctl), 2383 proto_method_implemented(proto->init), 2384 proto_method_implemented(proto->destroy), 2385 proto_method_implemented(proto->shutdown), 2386 proto_method_implemented(proto->setsockopt), 2387 proto_method_implemented(proto->getsockopt), 2388 proto_method_implemented(proto->sendmsg), 2389 proto_method_implemented(proto->recvmsg), 2390 proto_method_implemented(proto->sendpage), 2391 proto_method_implemented(proto->bind), 2392 proto_method_implemented(proto->backlog_rcv), 2393 proto_method_implemented(proto->hash), 2394 proto_method_implemented(proto->unhash), 2395 proto_method_implemented(proto->get_port), 2396 proto_method_implemented(proto->enter_memory_pressure)); 2397} 2398 2399static int proto_seq_show(struct seq_file *seq, void *v) 2400{ 2401 if (v == &proto_list) 2402 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2403 "protocol", 2404 "size", 2405 "sockets", 2406 "memory", 2407 "press", 2408 "maxhdr", 2409 "slab", 2410 "module", 2411 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2412 else 2413 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2414 return 0; 2415} 2416 2417static const struct seq_operations proto_seq_ops = { 2418 .start = proto_seq_start, 2419 .next = proto_seq_next, 2420 .stop = proto_seq_stop, 2421 .show = proto_seq_show, 2422}; 2423 2424static int proto_seq_open(struct inode *inode, struct file *file) 2425{ 2426 return seq_open_net(inode, file, &proto_seq_ops, 2427 sizeof(struct seq_net_private)); 2428} 2429 2430static const struct file_operations proto_seq_fops = { 2431 .owner = THIS_MODULE, 2432 .open = proto_seq_open, 2433 .read = seq_read, 2434 .llseek = seq_lseek, 2435 .release = seq_release_net, 2436}; 2437 2438static __net_init int proto_init_net(struct net *net) 2439{ 2440 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2441 return -ENOMEM; 2442 2443 return 0; 2444} 2445 2446static __net_exit void proto_exit_net(struct net *net) 2447{ 2448 proc_net_remove(net, "protocols"); 2449} 2450 2451 2452static __net_initdata struct pernet_operations proto_net_ops = { 2453 .init = proto_init_net, 2454 .exit = proto_exit_net, 2455}; 2456 2457static int __init proto_init(void) 2458{ 2459 return register_pernet_subsys(&proto_net_ops); 2460} 2461 2462subsys_initcall(proto_init); 2463 2464#endif /* PROC_FS */ 2465