sock.c revision f845172531fb7410c7fb7780b1a6e51ee6df7d52
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113 114#include <asm/uaccess.h> 115#include <asm/system.h> 116 117#include <linux/netdevice.h> 118#include <net/protocol.h> 119#include <linux/skbuff.h> 120#include <net/net_namespace.h> 121#include <net/request_sock.h> 122#include <net/sock.h> 123#include <linux/net_tstamp.h> 124#include <net/xfrm.h> 125#include <linux/ipsec.h> 126#include <net/cls_cgroup.h> 127 128#include <linux/filter.h> 129 130#ifdef CONFIG_INET 131#include <net/tcp.h> 132#endif 133 134/* 135 * Each address family might have different locking rules, so we have 136 * one slock key per address family: 137 */ 138static struct lock_class_key af_family_keys[AF_MAX]; 139static struct lock_class_key af_family_slock_keys[AF_MAX]; 140 141/* 142 * Make lock validator output more readable. (we pre-construct these 143 * strings build-time, so that runtime initialization of socket 144 * locks is fast): 145 */ 146static const char *const af_family_key_strings[AF_MAX+1] = { 147 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 148 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 149 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 150 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 151 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 152 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 153 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 154 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 155 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 156 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 157 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 158 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 159 "sk_lock-AF_IEEE802154", 160 "sk_lock-AF_MAX" 161}; 162static const char *const af_family_slock_key_strings[AF_MAX+1] = { 163 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 164 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 165 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 166 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 167 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 168 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 169 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 170 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 171 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 172 "slock-27" , "slock-28" , "slock-AF_CAN" , 173 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 174 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 175 "slock-AF_IEEE802154", 176 "slock-AF_MAX" 177}; 178static const char *const af_family_clock_key_strings[AF_MAX+1] = { 179 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 180 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 181 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 182 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 183 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 184 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 185 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 186 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 187 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 188 "clock-27" , "clock-28" , "clock-AF_CAN" , 189 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 190 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 191 "clock-AF_IEEE802154", 192 "clock-AF_MAX" 193}; 194 195/* 196 * sk_callback_lock locking rules are per-address-family, 197 * so split the lock classes by using a per-AF key: 198 */ 199static struct lock_class_key af_callback_keys[AF_MAX]; 200 201/* Take into consideration the size of the struct sk_buff overhead in the 202 * determination of these values, since that is non-constant across 203 * platforms. This makes socket queueing behavior and performance 204 * not depend upon such differences. 205 */ 206#define _SK_MEM_PACKETS 256 207#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 208#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 209#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 210 211/* Run time adjustable parameters. */ 212__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 213__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 214__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 215__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 216 217/* Maximal space eaten by iovec or ancilliary data plus some space */ 218int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219EXPORT_SYMBOL(sysctl_optmem_max); 220 221#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 222int net_cls_subsys_id = -1; 223EXPORT_SYMBOL_GPL(net_cls_subsys_id); 224#endif 225 226static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 227{ 228 struct timeval tv; 229 230 if (optlen < sizeof(tv)) 231 return -EINVAL; 232 if (copy_from_user(&tv, optval, sizeof(tv))) 233 return -EFAULT; 234 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 235 return -EDOM; 236 237 if (tv.tv_sec < 0) { 238 static int warned __read_mostly; 239 240 *timeo_p = 0; 241 if (warned < 10 && net_ratelimit()) { 242 warned++; 243 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 244 "tries to set negative timeout\n", 245 current->comm, task_pid_nr(current)); 246 } 247 return 0; 248 } 249 *timeo_p = MAX_SCHEDULE_TIMEOUT; 250 if (tv.tv_sec == 0 && tv.tv_usec == 0) 251 return 0; 252 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 253 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 254 return 0; 255} 256 257static void sock_warn_obsolete_bsdism(const char *name) 258{ 259 static int warned; 260 static char warncomm[TASK_COMM_LEN]; 261 if (strcmp(warncomm, current->comm) && warned < 5) { 262 strcpy(warncomm, current->comm); 263 printk(KERN_WARNING "process `%s' is using obsolete " 264 "%s SO_BSDCOMPAT\n", warncomm, name); 265 warned++; 266 } 267} 268 269static void sock_disable_timestamp(struct sock *sk, int flag) 270{ 271 if (sock_flag(sk, flag)) { 272 sock_reset_flag(sk, flag); 273 if (!sock_flag(sk, SOCK_TIMESTAMP) && 274 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { 275 net_disable_timestamp(); 276 } 277 } 278} 279 280 281int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 282{ 283 int err; 284 int skb_len; 285 unsigned long flags; 286 struct sk_buff_head *list = &sk->sk_receive_queue; 287 288 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 289 number of warnings when compiling with -W --ANK 290 */ 291 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 292 (unsigned)sk->sk_rcvbuf) { 293 atomic_inc(&sk->sk_drops); 294 return -ENOMEM; 295 } 296 297 err = sk_filter(sk, skb); 298 if (err) 299 return err; 300 301 if (!sk_rmem_schedule(sk, skb->truesize)) { 302 atomic_inc(&sk->sk_drops); 303 return -ENOBUFS; 304 } 305 306 skb->dev = NULL; 307 skb_set_owner_r(skb, sk); 308 309 /* Cache the SKB length before we tack it onto the receive 310 * queue. Once it is added it no longer belongs to us and 311 * may be freed by other threads of control pulling packets 312 * from the queue. 313 */ 314 skb_len = skb->len; 315 316 /* we escape from rcu protected region, make sure we dont leak 317 * a norefcounted dst 318 */ 319 skb_dst_force(skb); 320 321 spin_lock_irqsave(&list->lock, flags); 322 skb->dropcount = atomic_read(&sk->sk_drops); 323 __skb_queue_tail(list, skb); 324 spin_unlock_irqrestore(&list->lock, flags); 325 326 if (!sock_flag(sk, SOCK_DEAD)) 327 sk->sk_data_ready(sk, skb_len); 328 return 0; 329} 330EXPORT_SYMBOL(sock_queue_rcv_skb); 331 332int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 333{ 334 int rc = NET_RX_SUCCESS; 335 336 if (sk_filter(sk, skb)) 337 goto discard_and_relse; 338 339 skb->dev = NULL; 340 341 if (sk_rcvqueues_full(sk, skb)) { 342 atomic_inc(&sk->sk_drops); 343 goto discard_and_relse; 344 } 345 if (nested) 346 bh_lock_sock_nested(sk); 347 else 348 bh_lock_sock(sk); 349 if (!sock_owned_by_user(sk)) { 350 /* 351 * trylock + unlock semantics: 352 */ 353 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 354 355 rc = sk_backlog_rcv(sk, skb); 356 357 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 358 } else if (sk_add_backlog(sk, skb)) { 359 bh_unlock_sock(sk); 360 atomic_inc(&sk->sk_drops); 361 goto discard_and_relse; 362 } 363 364 bh_unlock_sock(sk); 365out: 366 sock_put(sk); 367 return rc; 368discard_and_relse: 369 kfree_skb(skb); 370 goto out; 371} 372EXPORT_SYMBOL(sk_receive_skb); 373 374void sk_reset_txq(struct sock *sk) 375{ 376 sk_tx_queue_clear(sk); 377} 378EXPORT_SYMBOL(sk_reset_txq); 379 380struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 381{ 382 struct dst_entry *dst = __sk_dst_get(sk); 383 384 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 385 sk_tx_queue_clear(sk); 386 rcu_assign_pointer(sk->sk_dst_cache, NULL); 387 dst_release(dst); 388 return NULL; 389 } 390 391 return dst; 392} 393EXPORT_SYMBOL(__sk_dst_check); 394 395struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 396{ 397 struct dst_entry *dst = sk_dst_get(sk); 398 399 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 400 sk_dst_reset(sk); 401 dst_release(dst); 402 return NULL; 403 } 404 405 return dst; 406} 407EXPORT_SYMBOL(sk_dst_check); 408 409static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 410{ 411 int ret = -ENOPROTOOPT; 412#ifdef CONFIG_NETDEVICES 413 struct net *net = sock_net(sk); 414 char devname[IFNAMSIZ]; 415 int index; 416 417 /* Sorry... */ 418 ret = -EPERM; 419 if (!capable(CAP_NET_RAW)) 420 goto out; 421 422 ret = -EINVAL; 423 if (optlen < 0) 424 goto out; 425 426 /* Bind this socket to a particular device like "eth0", 427 * as specified in the passed interface name. If the 428 * name is "" or the option length is zero the socket 429 * is not bound. 430 */ 431 if (optlen > IFNAMSIZ - 1) 432 optlen = IFNAMSIZ - 1; 433 memset(devname, 0, sizeof(devname)); 434 435 ret = -EFAULT; 436 if (copy_from_user(devname, optval, optlen)) 437 goto out; 438 439 index = 0; 440 if (devname[0] != '\0') { 441 struct net_device *dev; 442 443 rcu_read_lock(); 444 dev = dev_get_by_name_rcu(net, devname); 445 if (dev) 446 index = dev->ifindex; 447 rcu_read_unlock(); 448 ret = -ENODEV; 449 if (!dev) 450 goto out; 451 } 452 453 lock_sock(sk); 454 sk->sk_bound_dev_if = index; 455 sk_dst_reset(sk); 456 release_sock(sk); 457 458 ret = 0; 459 460out: 461#endif 462 463 return ret; 464} 465 466static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 467{ 468 if (valbool) 469 sock_set_flag(sk, bit); 470 else 471 sock_reset_flag(sk, bit); 472} 473 474/* 475 * This is meant for all protocols to use and covers goings on 476 * at the socket level. Everything here is generic. 477 */ 478 479int sock_setsockopt(struct socket *sock, int level, int optname, 480 char __user *optval, unsigned int optlen) 481{ 482 struct sock *sk = sock->sk; 483 int val; 484 int valbool; 485 struct linger ling; 486 int ret = 0; 487 488 /* 489 * Options without arguments 490 */ 491 492 if (optname == SO_BINDTODEVICE) 493 return sock_bindtodevice(sk, optval, optlen); 494 495 if (optlen < sizeof(int)) 496 return -EINVAL; 497 498 if (get_user(val, (int __user *)optval)) 499 return -EFAULT; 500 501 valbool = val ? 1 : 0; 502 503 lock_sock(sk); 504 505 switch (optname) { 506 case SO_DEBUG: 507 if (val && !capable(CAP_NET_ADMIN)) 508 ret = -EACCES; 509 else 510 sock_valbool_flag(sk, SOCK_DBG, valbool); 511 break; 512 case SO_REUSEADDR: 513 sk->sk_reuse = valbool; 514 break; 515 case SO_TYPE: 516 case SO_PROTOCOL: 517 case SO_DOMAIN: 518 case SO_ERROR: 519 ret = -ENOPROTOOPT; 520 break; 521 case SO_DONTROUTE: 522 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 523 break; 524 case SO_BROADCAST: 525 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 526 break; 527 case SO_SNDBUF: 528 /* Don't error on this BSD doesn't and if you think 529 about it this is right. Otherwise apps have to 530 play 'guess the biggest size' games. RCVBUF/SNDBUF 531 are treated in BSD as hints */ 532 533 if (val > sysctl_wmem_max) 534 val = sysctl_wmem_max; 535set_sndbuf: 536 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 537 if ((val * 2) < SOCK_MIN_SNDBUF) 538 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 539 else 540 sk->sk_sndbuf = val * 2; 541 542 /* 543 * Wake up sending tasks if we 544 * upped the value. 545 */ 546 sk->sk_write_space(sk); 547 break; 548 549 case SO_SNDBUFFORCE: 550 if (!capable(CAP_NET_ADMIN)) { 551 ret = -EPERM; 552 break; 553 } 554 goto set_sndbuf; 555 556 case SO_RCVBUF: 557 /* Don't error on this BSD doesn't and if you think 558 about it this is right. Otherwise apps have to 559 play 'guess the biggest size' games. RCVBUF/SNDBUF 560 are treated in BSD as hints */ 561 562 if (val > sysctl_rmem_max) 563 val = sysctl_rmem_max; 564set_rcvbuf: 565 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 566 /* 567 * We double it on the way in to account for 568 * "struct sk_buff" etc. overhead. Applications 569 * assume that the SO_RCVBUF setting they make will 570 * allow that much actual data to be received on that 571 * socket. 572 * 573 * Applications are unaware that "struct sk_buff" and 574 * other overheads allocate from the receive buffer 575 * during socket buffer allocation. 576 * 577 * And after considering the possible alternatives, 578 * returning the value we actually used in getsockopt 579 * is the most desirable behavior. 580 */ 581 if ((val * 2) < SOCK_MIN_RCVBUF) 582 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 583 else 584 sk->sk_rcvbuf = val * 2; 585 break; 586 587 case SO_RCVBUFFORCE: 588 if (!capable(CAP_NET_ADMIN)) { 589 ret = -EPERM; 590 break; 591 } 592 goto set_rcvbuf; 593 594 case SO_KEEPALIVE: 595#ifdef CONFIG_INET 596 if (sk->sk_protocol == IPPROTO_TCP) 597 tcp_set_keepalive(sk, valbool); 598#endif 599 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 600 break; 601 602 case SO_OOBINLINE: 603 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 604 break; 605 606 case SO_NO_CHECK: 607 sk->sk_no_check = valbool; 608 break; 609 610 case SO_PRIORITY: 611 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 612 sk->sk_priority = val; 613 else 614 ret = -EPERM; 615 break; 616 617 case SO_LINGER: 618 if (optlen < sizeof(ling)) { 619 ret = -EINVAL; /* 1003.1g */ 620 break; 621 } 622 if (copy_from_user(&ling, optval, sizeof(ling))) { 623 ret = -EFAULT; 624 break; 625 } 626 if (!ling.l_onoff) 627 sock_reset_flag(sk, SOCK_LINGER); 628 else { 629#if (BITS_PER_LONG == 32) 630 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 631 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 632 else 633#endif 634 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 635 sock_set_flag(sk, SOCK_LINGER); 636 } 637 break; 638 639 case SO_BSDCOMPAT: 640 sock_warn_obsolete_bsdism("setsockopt"); 641 break; 642 643 case SO_PASSCRED: 644 if (valbool) 645 set_bit(SOCK_PASSCRED, &sock->flags); 646 else 647 clear_bit(SOCK_PASSCRED, &sock->flags); 648 break; 649 650 case SO_TIMESTAMP: 651 case SO_TIMESTAMPNS: 652 if (valbool) { 653 if (optname == SO_TIMESTAMP) 654 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 655 else 656 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 657 sock_set_flag(sk, SOCK_RCVTSTAMP); 658 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 659 } else { 660 sock_reset_flag(sk, SOCK_RCVTSTAMP); 661 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 662 } 663 break; 664 665 case SO_TIMESTAMPING: 666 if (val & ~SOF_TIMESTAMPING_MASK) { 667 ret = -EINVAL; 668 break; 669 } 670 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 671 val & SOF_TIMESTAMPING_TX_HARDWARE); 672 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 673 val & SOF_TIMESTAMPING_TX_SOFTWARE); 674 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 675 val & SOF_TIMESTAMPING_RX_HARDWARE); 676 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 677 sock_enable_timestamp(sk, 678 SOCK_TIMESTAMPING_RX_SOFTWARE); 679 else 680 sock_disable_timestamp(sk, 681 SOCK_TIMESTAMPING_RX_SOFTWARE); 682 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 683 val & SOF_TIMESTAMPING_SOFTWARE); 684 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 685 val & SOF_TIMESTAMPING_SYS_HARDWARE); 686 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 687 val & SOF_TIMESTAMPING_RAW_HARDWARE); 688 break; 689 690 case SO_RCVLOWAT: 691 if (val < 0) 692 val = INT_MAX; 693 sk->sk_rcvlowat = val ? : 1; 694 break; 695 696 case SO_RCVTIMEO: 697 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 698 break; 699 700 case SO_SNDTIMEO: 701 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 702 break; 703 704 case SO_ATTACH_FILTER: 705 ret = -EINVAL; 706 if (optlen == sizeof(struct sock_fprog)) { 707 struct sock_fprog fprog; 708 709 ret = -EFAULT; 710 if (copy_from_user(&fprog, optval, sizeof(fprog))) 711 break; 712 713 ret = sk_attach_filter(&fprog, sk); 714 } 715 break; 716 717 case SO_DETACH_FILTER: 718 ret = sk_detach_filter(sk); 719 break; 720 721 case SO_PASSSEC: 722 if (valbool) 723 set_bit(SOCK_PASSSEC, &sock->flags); 724 else 725 clear_bit(SOCK_PASSSEC, &sock->flags); 726 break; 727 case SO_MARK: 728 if (!capable(CAP_NET_ADMIN)) 729 ret = -EPERM; 730 else 731 sk->sk_mark = val; 732 break; 733 734 /* We implement the SO_SNDLOWAT etc to 735 not be settable (1003.1g 5.3) */ 736 case SO_RXQ_OVFL: 737 if (valbool) 738 sock_set_flag(sk, SOCK_RXQ_OVFL); 739 else 740 sock_reset_flag(sk, SOCK_RXQ_OVFL); 741 break; 742 default: 743 ret = -ENOPROTOOPT; 744 break; 745 } 746 release_sock(sk); 747 return ret; 748} 749EXPORT_SYMBOL(sock_setsockopt); 750 751 752int sock_getsockopt(struct socket *sock, int level, int optname, 753 char __user *optval, int __user *optlen) 754{ 755 struct sock *sk = sock->sk; 756 757 union { 758 int val; 759 struct linger ling; 760 struct timeval tm; 761 } v; 762 763 int lv = sizeof(int); 764 int len; 765 766 if (get_user(len, optlen)) 767 return -EFAULT; 768 if (len < 0) 769 return -EINVAL; 770 771 memset(&v, 0, sizeof(v)); 772 773 switch (optname) { 774 case SO_DEBUG: 775 v.val = sock_flag(sk, SOCK_DBG); 776 break; 777 778 case SO_DONTROUTE: 779 v.val = sock_flag(sk, SOCK_LOCALROUTE); 780 break; 781 782 case SO_BROADCAST: 783 v.val = !!sock_flag(sk, SOCK_BROADCAST); 784 break; 785 786 case SO_SNDBUF: 787 v.val = sk->sk_sndbuf; 788 break; 789 790 case SO_RCVBUF: 791 v.val = sk->sk_rcvbuf; 792 break; 793 794 case SO_REUSEADDR: 795 v.val = sk->sk_reuse; 796 break; 797 798 case SO_KEEPALIVE: 799 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 800 break; 801 802 case SO_TYPE: 803 v.val = sk->sk_type; 804 break; 805 806 case SO_PROTOCOL: 807 v.val = sk->sk_protocol; 808 break; 809 810 case SO_DOMAIN: 811 v.val = sk->sk_family; 812 break; 813 814 case SO_ERROR: 815 v.val = -sock_error(sk); 816 if (v.val == 0) 817 v.val = xchg(&sk->sk_err_soft, 0); 818 break; 819 820 case SO_OOBINLINE: 821 v.val = !!sock_flag(sk, SOCK_URGINLINE); 822 break; 823 824 case SO_NO_CHECK: 825 v.val = sk->sk_no_check; 826 break; 827 828 case SO_PRIORITY: 829 v.val = sk->sk_priority; 830 break; 831 832 case SO_LINGER: 833 lv = sizeof(v.ling); 834 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 835 v.ling.l_linger = sk->sk_lingertime / HZ; 836 break; 837 838 case SO_BSDCOMPAT: 839 sock_warn_obsolete_bsdism("getsockopt"); 840 break; 841 842 case SO_TIMESTAMP: 843 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 844 !sock_flag(sk, SOCK_RCVTSTAMPNS); 845 break; 846 847 case SO_TIMESTAMPNS: 848 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 849 break; 850 851 case SO_TIMESTAMPING: 852 v.val = 0; 853 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 854 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 855 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 856 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 857 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 858 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 859 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 860 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 861 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 862 v.val |= SOF_TIMESTAMPING_SOFTWARE; 863 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 864 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 865 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 866 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 867 break; 868 869 case SO_RCVTIMEO: 870 lv = sizeof(struct timeval); 871 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 872 v.tm.tv_sec = 0; 873 v.tm.tv_usec = 0; 874 } else { 875 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 876 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 877 } 878 break; 879 880 case SO_SNDTIMEO: 881 lv = sizeof(struct timeval); 882 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 883 v.tm.tv_sec = 0; 884 v.tm.tv_usec = 0; 885 } else { 886 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 887 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 888 } 889 break; 890 891 case SO_RCVLOWAT: 892 v.val = sk->sk_rcvlowat; 893 break; 894 895 case SO_SNDLOWAT: 896 v.val = 1; 897 break; 898 899 case SO_PASSCRED: 900 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 901 break; 902 903 case SO_PEERCRED: 904 if (len > sizeof(sk->sk_peercred)) 905 len = sizeof(sk->sk_peercred); 906 if (copy_to_user(optval, &sk->sk_peercred, len)) 907 return -EFAULT; 908 goto lenout; 909 910 case SO_PEERNAME: 911 { 912 char address[128]; 913 914 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 915 return -ENOTCONN; 916 if (lv < len) 917 return -EINVAL; 918 if (copy_to_user(optval, address, len)) 919 return -EFAULT; 920 goto lenout; 921 } 922 923 /* Dubious BSD thing... Probably nobody even uses it, but 924 * the UNIX standard wants it for whatever reason... -DaveM 925 */ 926 case SO_ACCEPTCONN: 927 v.val = sk->sk_state == TCP_LISTEN; 928 break; 929 930 case SO_PASSSEC: 931 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 932 break; 933 934 case SO_PEERSEC: 935 return security_socket_getpeersec_stream(sock, optval, optlen, len); 936 937 case SO_MARK: 938 v.val = sk->sk_mark; 939 break; 940 941 case SO_RXQ_OVFL: 942 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 943 break; 944 945 default: 946 return -ENOPROTOOPT; 947 } 948 949 if (len > lv) 950 len = lv; 951 if (copy_to_user(optval, &v, len)) 952 return -EFAULT; 953lenout: 954 if (put_user(len, optlen)) 955 return -EFAULT; 956 return 0; 957} 958 959/* 960 * Initialize an sk_lock. 961 * 962 * (We also register the sk_lock with the lock validator.) 963 */ 964static inline void sock_lock_init(struct sock *sk) 965{ 966 sock_lock_init_class_and_name(sk, 967 af_family_slock_key_strings[sk->sk_family], 968 af_family_slock_keys + sk->sk_family, 969 af_family_key_strings[sk->sk_family], 970 af_family_keys + sk->sk_family); 971} 972 973/* 974 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 975 * even temporarly, because of RCU lookups. sk_node should also be left as is. 976 */ 977static void sock_copy(struct sock *nsk, const struct sock *osk) 978{ 979#ifdef CONFIG_SECURITY_NETWORK 980 void *sptr = nsk->sk_security; 981#endif 982 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 983 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + 984 sizeof(osk->sk_tx_queue_mapping)); 985 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 986 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 987#ifdef CONFIG_SECURITY_NETWORK 988 nsk->sk_security = sptr; 989 security_sk_clone(osk, nsk); 990#endif 991} 992 993static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 994 int family) 995{ 996 struct sock *sk; 997 struct kmem_cache *slab; 998 999 slab = prot->slab; 1000 if (slab != NULL) { 1001 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1002 if (!sk) 1003 return sk; 1004 if (priority & __GFP_ZERO) { 1005 /* 1006 * caches using SLAB_DESTROY_BY_RCU should let 1007 * sk_node.next un-modified. Special care is taken 1008 * when initializing object to zero. 1009 */ 1010 if (offsetof(struct sock, sk_node.next) != 0) 1011 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1012 memset(&sk->sk_node.pprev, 0, 1013 prot->obj_size - offsetof(struct sock, 1014 sk_node.pprev)); 1015 } 1016 } 1017 else 1018 sk = kmalloc(prot->obj_size, priority); 1019 1020 if (sk != NULL) { 1021 kmemcheck_annotate_bitfield(sk, flags); 1022 1023 if (security_sk_alloc(sk, family, priority)) 1024 goto out_free; 1025 1026 if (!try_module_get(prot->owner)) 1027 goto out_free_sec; 1028 sk_tx_queue_clear(sk); 1029 } 1030 1031 return sk; 1032 1033out_free_sec: 1034 security_sk_free(sk); 1035out_free: 1036 if (slab != NULL) 1037 kmem_cache_free(slab, sk); 1038 else 1039 kfree(sk); 1040 return NULL; 1041} 1042 1043static void sk_prot_free(struct proto *prot, struct sock *sk) 1044{ 1045 struct kmem_cache *slab; 1046 struct module *owner; 1047 1048 owner = prot->owner; 1049 slab = prot->slab; 1050 1051 security_sk_free(sk); 1052 if (slab != NULL) 1053 kmem_cache_free(slab, sk); 1054 else 1055 kfree(sk); 1056 module_put(owner); 1057} 1058 1059#ifdef CONFIG_CGROUPS 1060void sock_update_classid(struct sock *sk) 1061{ 1062 u32 classid = task_cls_classid(current); 1063 1064 if (classid && classid != sk->sk_classid) 1065 sk->sk_classid = classid; 1066} 1067#endif 1068 1069/** 1070 * sk_alloc - All socket objects are allocated here 1071 * @net: the applicable net namespace 1072 * @family: protocol family 1073 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1074 * @prot: struct proto associated with this new sock instance 1075 */ 1076struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1077 struct proto *prot) 1078{ 1079 struct sock *sk; 1080 1081 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1082 if (sk) { 1083 sk->sk_family = family; 1084 /* 1085 * See comment in struct sock definition to understand 1086 * why we need sk_prot_creator -acme 1087 */ 1088 sk->sk_prot = sk->sk_prot_creator = prot; 1089 sock_lock_init(sk); 1090 sock_net_set(sk, get_net(net)); 1091 atomic_set(&sk->sk_wmem_alloc, 1); 1092 1093 sock_update_classid(sk); 1094 } 1095 1096 return sk; 1097} 1098EXPORT_SYMBOL(sk_alloc); 1099 1100static void __sk_free(struct sock *sk) 1101{ 1102 struct sk_filter *filter; 1103 1104 if (sk->sk_destruct) 1105 sk->sk_destruct(sk); 1106 1107 filter = rcu_dereference_check(sk->sk_filter, 1108 atomic_read(&sk->sk_wmem_alloc) == 0); 1109 if (filter) { 1110 sk_filter_uncharge(sk, filter); 1111 rcu_assign_pointer(sk->sk_filter, NULL); 1112 } 1113 1114 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1115 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); 1116 1117 if (atomic_read(&sk->sk_omem_alloc)) 1118 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1119 __func__, atomic_read(&sk->sk_omem_alloc)); 1120 1121 put_net(sock_net(sk)); 1122 sk_prot_free(sk->sk_prot_creator, sk); 1123} 1124 1125void sk_free(struct sock *sk) 1126{ 1127 /* 1128 * We substract one from sk_wmem_alloc and can know if 1129 * some packets are still in some tx queue. 1130 * If not null, sock_wfree() will call __sk_free(sk) later 1131 */ 1132 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1133 __sk_free(sk); 1134} 1135EXPORT_SYMBOL(sk_free); 1136 1137/* 1138 * Last sock_put should drop referrence to sk->sk_net. It has already 1139 * been dropped in sk_change_net. Taking referrence to stopping namespace 1140 * is not an option. 1141 * Take referrence to a socket to remove it from hash _alive_ and after that 1142 * destroy it in the context of init_net. 1143 */ 1144void sk_release_kernel(struct sock *sk) 1145{ 1146 if (sk == NULL || sk->sk_socket == NULL) 1147 return; 1148 1149 sock_hold(sk); 1150 sock_release(sk->sk_socket); 1151 release_net(sock_net(sk)); 1152 sock_net_set(sk, get_net(&init_net)); 1153 sock_put(sk); 1154} 1155EXPORT_SYMBOL(sk_release_kernel); 1156 1157struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1158{ 1159 struct sock *newsk; 1160 1161 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1162 if (newsk != NULL) { 1163 struct sk_filter *filter; 1164 1165 sock_copy(newsk, sk); 1166 1167 /* SANITY */ 1168 get_net(sock_net(newsk)); 1169 sk_node_init(&newsk->sk_node); 1170 sock_lock_init(newsk); 1171 bh_lock_sock(newsk); 1172 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1173 newsk->sk_backlog.len = 0; 1174 1175 atomic_set(&newsk->sk_rmem_alloc, 0); 1176 /* 1177 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1178 */ 1179 atomic_set(&newsk->sk_wmem_alloc, 1); 1180 atomic_set(&newsk->sk_omem_alloc, 0); 1181 skb_queue_head_init(&newsk->sk_receive_queue); 1182 skb_queue_head_init(&newsk->sk_write_queue); 1183#ifdef CONFIG_NET_DMA 1184 skb_queue_head_init(&newsk->sk_async_wait_queue); 1185#endif 1186 1187 spin_lock_init(&newsk->sk_dst_lock); 1188 rwlock_init(&newsk->sk_callback_lock); 1189 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1190 af_callback_keys + newsk->sk_family, 1191 af_family_clock_key_strings[newsk->sk_family]); 1192 1193 newsk->sk_dst_cache = NULL; 1194 newsk->sk_wmem_queued = 0; 1195 newsk->sk_forward_alloc = 0; 1196 newsk->sk_send_head = NULL; 1197 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1198 1199 sock_reset_flag(newsk, SOCK_DONE); 1200 skb_queue_head_init(&newsk->sk_error_queue); 1201 1202 filter = newsk->sk_filter; 1203 if (filter != NULL) 1204 sk_filter_charge(newsk, filter); 1205 1206 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1207 /* It is still raw copy of parent, so invalidate 1208 * destructor and make plain sk_free() */ 1209 newsk->sk_destruct = NULL; 1210 sk_free(newsk); 1211 newsk = NULL; 1212 goto out; 1213 } 1214 1215 newsk->sk_err = 0; 1216 newsk->sk_priority = 0; 1217 /* 1218 * Before updating sk_refcnt, we must commit prior changes to memory 1219 * (Documentation/RCU/rculist_nulls.txt for details) 1220 */ 1221 smp_wmb(); 1222 atomic_set(&newsk->sk_refcnt, 2); 1223 1224 /* 1225 * Increment the counter in the same struct proto as the master 1226 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1227 * is the same as sk->sk_prot->socks, as this field was copied 1228 * with memcpy). 1229 * 1230 * This _changes_ the previous behaviour, where 1231 * tcp_create_openreq_child always was incrementing the 1232 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1233 * to be taken into account in all callers. -acme 1234 */ 1235 sk_refcnt_debug_inc(newsk); 1236 sk_set_socket(newsk, NULL); 1237 newsk->sk_wq = NULL; 1238 1239 if (newsk->sk_prot->sockets_allocated) 1240 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1241 1242 if (sock_flag(newsk, SOCK_TIMESTAMP) || 1243 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1244 net_enable_timestamp(); 1245 } 1246out: 1247 return newsk; 1248} 1249EXPORT_SYMBOL_GPL(sk_clone); 1250 1251void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1252{ 1253 __sk_dst_set(sk, dst); 1254 sk->sk_route_caps = dst->dev->features; 1255 if (sk->sk_route_caps & NETIF_F_GSO) 1256 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1257 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1258 if (sk_can_gso(sk)) { 1259 if (dst->header_len) { 1260 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1261 } else { 1262 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1263 sk->sk_gso_max_size = dst->dev->gso_max_size; 1264 } 1265 } 1266} 1267EXPORT_SYMBOL_GPL(sk_setup_caps); 1268 1269void __init sk_init(void) 1270{ 1271 if (totalram_pages <= 4096) { 1272 sysctl_wmem_max = 32767; 1273 sysctl_rmem_max = 32767; 1274 sysctl_wmem_default = 32767; 1275 sysctl_rmem_default = 32767; 1276 } else if (totalram_pages >= 131072) { 1277 sysctl_wmem_max = 131071; 1278 sysctl_rmem_max = 131071; 1279 } 1280} 1281 1282/* 1283 * Simple resource managers for sockets. 1284 */ 1285 1286 1287/* 1288 * Write buffer destructor automatically called from kfree_skb. 1289 */ 1290void sock_wfree(struct sk_buff *skb) 1291{ 1292 struct sock *sk = skb->sk; 1293 unsigned int len = skb->truesize; 1294 1295 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1296 /* 1297 * Keep a reference on sk_wmem_alloc, this will be released 1298 * after sk_write_space() call 1299 */ 1300 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1301 sk->sk_write_space(sk); 1302 len = 1; 1303 } 1304 /* 1305 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1306 * could not do because of in-flight packets 1307 */ 1308 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1309 __sk_free(sk); 1310} 1311EXPORT_SYMBOL(sock_wfree); 1312 1313/* 1314 * Read buffer destructor automatically called from kfree_skb. 1315 */ 1316void sock_rfree(struct sk_buff *skb) 1317{ 1318 struct sock *sk = skb->sk; 1319 1320 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1321 sk_mem_uncharge(skb->sk, skb->truesize); 1322} 1323EXPORT_SYMBOL(sock_rfree); 1324 1325 1326int sock_i_uid(struct sock *sk) 1327{ 1328 int uid; 1329 1330 read_lock(&sk->sk_callback_lock); 1331 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1332 read_unlock(&sk->sk_callback_lock); 1333 return uid; 1334} 1335EXPORT_SYMBOL(sock_i_uid); 1336 1337unsigned long sock_i_ino(struct sock *sk) 1338{ 1339 unsigned long ino; 1340 1341 read_lock(&sk->sk_callback_lock); 1342 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1343 read_unlock(&sk->sk_callback_lock); 1344 return ino; 1345} 1346EXPORT_SYMBOL(sock_i_ino); 1347 1348/* 1349 * Allocate a skb from the socket's send buffer. 1350 */ 1351struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1352 gfp_t priority) 1353{ 1354 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1355 struct sk_buff *skb = alloc_skb(size, priority); 1356 if (skb) { 1357 skb_set_owner_w(skb, sk); 1358 return skb; 1359 } 1360 } 1361 return NULL; 1362} 1363EXPORT_SYMBOL(sock_wmalloc); 1364 1365/* 1366 * Allocate a skb from the socket's receive buffer. 1367 */ 1368struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1369 gfp_t priority) 1370{ 1371 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1372 struct sk_buff *skb = alloc_skb(size, priority); 1373 if (skb) { 1374 skb_set_owner_r(skb, sk); 1375 return skb; 1376 } 1377 } 1378 return NULL; 1379} 1380 1381/* 1382 * Allocate a memory block from the socket's option memory buffer. 1383 */ 1384void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1385{ 1386 if ((unsigned)size <= sysctl_optmem_max && 1387 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1388 void *mem; 1389 /* First do the add, to avoid the race if kmalloc 1390 * might sleep. 1391 */ 1392 atomic_add(size, &sk->sk_omem_alloc); 1393 mem = kmalloc(size, priority); 1394 if (mem) 1395 return mem; 1396 atomic_sub(size, &sk->sk_omem_alloc); 1397 } 1398 return NULL; 1399} 1400EXPORT_SYMBOL(sock_kmalloc); 1401 1402/* 1403 * Free an option memory block. 1404 */ 1405void sock_kfree_s(struct sock *sk, void *mem, int size) 1406{ 1407 kfree(mem); 1408 atomic_sub(size, &sk->sk_omem_alloc); 1409} 1410EXPORT_SYMBOL(sock_kfree_s); 1411 1412/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1413 I think, these locks should be removed for datagram sockets. 1414 */ 1415static long sock_wait_for_wmem(struct sock *sk, long timeo) 1416{ 1417 DEFINE_WAIT(wait); 1418 1419 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1420 for (;;) { 1421 if (!timeo) 1422 break; 1423 if (signal_pending(current)) 1424 break; 1425 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1426 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1427 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1428 break; 1429 if (sk->sk_shutdown & SEND_SHUTDOWN) 1430 break; 1431 if (sk->sk_err) 1432 break; 1433 timeo = schedule_timeout(timeo); 1434 } 1435 finish_wait(sk_sleep(sk), &wait); 1436 return timeo; 1437} 1438 1439 1440/* 1441 * Generic send/receive buffer handlers 1442 */ 1443 1444struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1445 unsigned long data_len, int noblock, 1446 int *errcode) 1447{ 1448 struct sk_buff *skb; 1449 gfp_t gfp_mask; 1450 long timeo; 1451 int err; 1452 1453 gfp_mask = sk->sk_allocation; 1454 if (gfp_mask & __GFP_WAIT) 1455 gfp_mask |= __GFP_REPEAT; 1456 1457 timeo = sock_sndtimeo(sk, noblock); 1458 while (1) { 1459 err = sock_error(sk); 1460 if (err != 0) 1461 goto failure; 1462 1463 err = -EPIPE; 1464 if (sk->sk_shutdown & SEND_SHUTDOWN) 1465 goto failure; 1466 1467 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1468 skb = alloc_skb(header_len, gfp_mask); 1469 if (skb) { 1470 int npages; 1471 int i; 1472 1473 /* No pages, we're done... */ 1474 if (!data_len) 1475 break; 1476 1477 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1478 skb->truesize += data_len; 1479 skb_shinfo(skb)->nr_frags = npages; 1480 for (i = 0; i < npages; i++) { 1481 struct page *page; 1482 skb_frag_t *frag; 1483 1484 page = alloc_pages(sk->sk_allocation, 0); 1485 if (!page) { 1486 err = -ENOBUFS; 1487 skb_shinfo(skb)->nr_frags = i; 1488 kfree_skb(skb); 1489 goto failure; 1490 } 1491 1492 frag = &skb_shinfo(skb)->frags[i]; 1493 frag->page = page; 1494 frag->page_offset = 0; 1495 frag->size = (data_len >= PAGE_SIZE ? 1496 PAGE_SIZE : 1497 data_len); 1498 data_len -= PAGE_SIZE; 1499 } 1500 1501 /* Full success... */ 1502 break; 1503 } 1504 err = -ENOBUFS; 1505 goto failure; 1506 } 1507 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1508 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1509 err = -EAGAIN; 1510 if (!timeo) 1511 goto failure; 1512 if (signal_pending(current)) 1513 goto interrupted; 1514 timeo = sock_wait_for_wmem(sk, timeo); 1515 } 1516 1517 skb_set_owner_w(skb, sk); 1518 return skb; 1519 1520interrupted: 1521 err = sock_intr_errno(timeo); 1522failure: 1523 *errcode = err; 1524 return NULL; 1525} 1526EXPORT_SYMBOL(sock_alloc_send_pskb); 1527 1528struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1529 int noblock, int *errcode) 1530{ 1531 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1532} 1533EXPORT_SYMBOL(sock_alloc_send_skb); 1534 1535static void __lock_sock(struct sock *sk) 1536{ 1537 DEFINE_WAIT(wait); 1538 1539 for (;;) { 1540 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1541 TASK_UNINTERRUPTIBLE); 1542 spin_unlock_bh(&sk->sk_lock.slock); 1543 schedule(); 1544 spin_lock_bh(&sk->sk_lock.slock); 1545 if (!sock_owned_by_user(sk)) 1546 break; 1547 } 1548 finish_wait(&sk->sk_lock.wq, &wait); 1549} 1550 1551static void __release_sock(struct sock *sk) 1552{ 1553 struct sk_buff *skb = sk->sk_backlog.head; 1554 1555 do { 1556 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1557 bh_unlock_sock(sk); 1558 1559 do { 1560 struct sk_buff *next = skb->next; 1561 1562 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1563 skb->next = NULL; 1564 sk_backlog_rcv(sk, skb); 1565 1566 /* 1567 * We are in process context here with softirqs 1568 * disabled, use cond_resched_softirq() to preempt. 1569 * This is safe to do because we've taken the backlog 1570 * queue private: 1571 */ 1572 cond_resched_softirq(); 1573 1574 skb = next; 1575 } while (skb != NULL); 1576 1577 bh_lock_sock(sk); 1578 } while ((skb = sk->sk_backlog.head) != NULL); 1579 1580 /* 1581 * Doing the zeroing here guarantee we can not loop forever 1582 * while a wild producer attempts to flood us. 1583 */ 1584 sk->sk_backlog.len = 0; 1585} 1586 1587/** 1588 * sk_wait_data - wait for data to arrive at sk_receive_queue 1589 * @sk: sock to wait on 1590 * @timeo: for how long 1591 * 1592 * Now socket state including sk->sk_err is changed only under lock, 1593 * hence we may omit checks after joining wait queue. 1594 * We check receive queue before schedule() only as optimization; 1595 * it is very likely that release_sock() added new data. 1596 */ 1597int sk_wait_data(struct sock *sk, long *timeo) 1598{ 1599 int rc; 1600 DEFINE_WAIT(wait); 1601 1602 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1603 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1604 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1605 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1606 finish_wait(sk_sleep(sk), &wait); 1607 return rc; 1608} 1609EXPORT_SYMBOL(sk_wait_data); 1610 1611/** 1612 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1613 * @sk: socket 1614 * @size: memory size to allocate 1615 * @kind: allocation type 1616 * 1617 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1618 * rmem allocation. This function assumes that protocols which have 1619 * memory_pressure use sk_wmem_queued as write buffer accounting. 1620 */ 1621int __sk_mem_schedule(struct sock *sk, int size, int kind) 1622{ 1623 struct proto *prot = sk->sk_prot; 1624 int amt = sk_mem_pages(size); 1625 int allocated; 1626 1627 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1628 allocated = atomic_add_return(amt, prot->memory_allocated); 1629 1630 /* Under limit. */ 1631 if (allocated <= prot->sysctl_mem[0]) { 1632 if (prot->memory_pressure && *prot->memory_pressure) 1633 *prot->memory_pressure = 0; 1634 return 1; 1635 } 1636 1637 /* Under pressure. */ 1638 if (allocated > prot->sysctl_mem[1]) 1639 if (prot->enter_memory_pressure) 1640 prot->enter_memory_pressure(sk); 1641 1642 /* Over hard limit. */ 1643 if (allocated > prot->sysctl_mem[2]) 1644 goto suppress_allocation; 1645 1646 /* guarantee minimum buffer size under pressure */ 1647 if (kind == SK_MEM_RECV) { 1648 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1649 return 1; 1650 } else { /* SK_MEM_SEND */ 1651 if (sk->sk_type == SOCK_STREAM) { 1652 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1653 return 1; 1654 } else if (atomic_read(&sk->sk_wmem_alloc) < 1655 prot->sysctl_wmem[0]) 1656 return 1; 1657 } 1658 1659 if (prot->memory_pressure) { 1660 int alloc; 1661 1662 if (!*prot->memory_pressure) 1663 return 1; 1664 alloc = percpu_counter_read_positive(prot->sockets_allocated); 1665 if (prot->sysctl_mem[2] > alloc * 1666 sk_mem_pages(sk->sk_wmem_queued + 1667 atomic_read(&sk->sk_rmem_alloc) + 1668 sk->sk_forward_alloc)) 1669 return 1; 1670 } 1671 1672suppress_allocation: 1673 1674 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1675 sk_stream_moderate_sndbuf(sk); 1676 1677 /* Fail only if socket is _under_ its sndbuf. 1678 * In this case we cannot block, so that we have to fail. 1679 */ 1680 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1681 return 1; 1682 } 1683 1684 /* Alas. Undo changes. */ 1685 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1686 atomic_sub(amt, prot->memory_allocated); 1687 return 0; 1688} 1689EXPORT_SYMBOL(__sk_mem_schedule); 1690 1691/** 1692 * __sk_reclaim - reclaim memory_allocated 1693 * @sk: socket 1694 */ 1695void __sk_mem_reclaim(struct sock *sk) 1696{ 1697 struct proto *prot = sk->sk_prot; 1698 1699 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1700 prot->memory_allocated); 1701 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1702 1703 if (prot->memory_pressure && *prot->memory_pressure && 1704 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1705 *prot->memory_pressure = 0; 1706} 1707EXPORT_SYMBOL(__sk_mem_reclaim); 1708 1709 1710/* 1711 * Set of default routines for initialising struct proto_ops when 1712 * the protocol does not support a particular function. In certain 1713 * cases where it makes no sense for a protocol to have a "do nothing" 1714 * function, some default processing is provided. 1715 */ 1716 1717int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1718{ 1719 return -EOPNOTSUPP; 1720} 1721EXPORT_SYMBOL(sock_no_bind); 1722 1723int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1724 int len, int flags) 1725{ 1726 return -EOPNOTSUPP; 1727} 1728EXPORT_SYMBOL(sock_no_connect); 1729 1730int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1731{ 1732 return -EOPNOTSUPP; 1733} 1734EXPORT_SYMBOL(sock_no_socketpair); 1735 1736int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1737{ 1738 return -EOPNOTSUPP; 1739} 1740EXPORT_SYMBOL(sock_no_accept); 1741 1742int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1743 int *len, int peer) 1744{ 1745 return -EOPNOTSUPP; 1746} 1747EXPORT_SYMBOL(sock_no_getname); 1748 1749unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1750{ 1751 return 0; 1752} 1753EXPORT_SYMBOL(sock_no_poll); 1754 1755int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1756{ 1757 return -EOPNOTSUPP; 1758} 1759EXPORT_SYMBOL(sock_no_ioctl); 1760 1761int sock_no_listen(struct socket *sock, int backlog) 1762{ 1763 return -EOPNOTSUPP; 1764} 1765EXPORT_SYMBOL(sock_no_listen); 1766 1767int sock_no_shutdown(struct socket *sock, int how) 1768{ 1769 return -EOPNOTSUPP; 1770} 1771EXPORT_SYMBOL(sock_no_shutdown); 1772 1773int sock_no_setsockopt(struct socket *sock, int level, int optname, 1774 char __user *optval, unsigned int optlen) 1775{ 1776 return -EOPNOTSUPP; 1777} 1778EXPORT_SYMBOL(sock_no_setsockopt); 1779 1780int sock_no_getsockopt(struct socket *sock, int level, int optname, 1781 char __user *optval, int __user *optlen) 1782{ 1783 return -EOPNOTSUPP; 1784} 1785EXPORT_SYMBOL(sock_no_getsockopt); 1786 1787int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1788 size_t len) 1789{ 1790 return -EOPNOTSUPP; 1791} 1792EXPORT_SYMBOL(sock_no_sendmsg); 1793 1794int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1795 size_t len, int flags) 1796{ 1797 return -EOPNOTSUPP; 1798} 1799EXPORT_SYMBOL(sock_no_recvmsg); 1800 1801int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1802{ 1803 /* Mirror missing mmap method error code */ 1804 return -ENODEV; 1805} 1806EXPORT_SYMBOL(sock_no_mmap); 1807 1808ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1809{ 1810 ssize_t res; 1811 struct msghdr msg = {.msg_flags = flags}; 1812 struct kvec iov; 1813 char *kaddr = kmap(page); 1814 iov.iov_base = kaddr + offset; 1815 iov.iov_len = size; 1816 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1817 kunmap(page); 1818 return res; 1819} 1820EXPORT_SYMBOL(sock_no_sendpage); 1821 1822/* 1823 * Default Socket Callbacks 1824 */ 1825 1826static void sock_def_wakeup(struct sock *sk) 1827{ 1828 struct socket_wq *wq; 1829 1830 rcu_read_lock(); 1831 wq = rcu_dereference(sk->sk_wq); 1832 if (wq_has_sleeper(wq)) 1833 wake_up_interruptible_all(&wq->wait); 1834 rcu_read_unlock(); 1835} 1836 1837static void sock_def_error_report(struct sock *sk) 1838{ 1839 struct socket_wq *wq; 1840 1841 rcu_read_lock(); 1842 wq = rcu_dereference(sk->sk_wq); 1843 if (wq_has_sleeper(wq)) 1844 wake_up_interruptible_poll(&wq->wait, POLLERR); 1845 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1846 rcu_read_unlock(); 1847} 1848 1849static void sock_def_readable(struct sock *sk, int len) 1850{ 1851 struct socket_wq *wq; 1852 1853 rcu_read_lock(); 1854 wq = rcu_dereference(sk->sk_wq); 1855 if (wq_has_sleeper(wq)) 1856 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1857 POLLRDNORM | POLLRDBAND); 1858 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1859 rcu_read_unlock(); 1860} 1861 1862static void sock_def_write_space(struct sock *sk) 1863{ 1864 struct socket_wq *wq; 1865 1866 rcu_read_lock(); 1867 1868 /* Do not wake up a writer until he can make "significant" 1869 * progress. --DaveM 1870 */ 1871 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1872 wq = rcu_dereference(sk->sk_wq); 1873 if (wq_has_sleeper(wq)) 1874 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1875 POLLWRNORM | POLLWRBAND); 1876 1877 /* Should agree with poll, otherwise some programs break */ 1878 if (sock_writeable(sk)) 1879 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1880 } 1881 1882 rcu_read_unlock(); 1883} 1884 1885static void sock_def_destruct(struct sock *sk) 1886{ 1887 kfree(sk->sk_protinfo); 1888} 1889 1890void sk_send_sigurg(struct sock *sk) 1891{ 1892 if (sk->sk_socket && sk->sk_socket->file) 1893 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1894 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1895} 1896EXPORT_SYMBOL(sk_send_sigurg); 1897 1898void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1899 unsigned long expires) 1900{ 1901 if (!mod_timer(timer, expires)) 1902 sock_hold(sk); 1903} 1904EXPORT_SYMBOL(sk_reset_timer); 1905 1906void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1907{ 1908 if (timer_pending(timer) && del_timer(timer)) 1909 __sock_put(sk); 1910} 1911EXPORT_SYMBOL(sk_stop_timer); 1912 1913void sock_init_data(struct socket *sock, struct sock *sk) 1914{ 1915 skb_queue_head_init(&sk->sk_receive_queue); 1916 skb_queue_head_init(&sk->sk_write_queue); 1917 skb_queue_head_init(&sk->sk_error_queue); 1918#ifdef CONFIG_NET_DMA 1919 skb_queue_head_init(&sk->sk_async_wait_queue); 1920#endif 1921 1922 sk->sk_send_head = NULL; 1923 1924 init_timer(&sk->sk_timer); 1925 1926 sk->sk_allocation = GFP_KERNEL; 1927 sk->sk_rcvbuf = sysctl_rmem_default; 1928 sk->sk_sndbuf = sysctl_wmem_default; 1929 sk->sk_state = TCP_CLOSE; 1930 sk_set_socket(sk, sock); 1931 1932 sock_set_flag(sk, SOCK_ZAPPED); 1933 1934 if (sock) { 1935 sk->sk_type = sock->type; 1936 sk->sk_wq = sock->wq; 1937 sock->sk = sk; 1938 } else 1939 sk->sk_wq = NULL; 1940 1941 spin_lock_init(&sk->sk_dst_lock); 1942 rwlock_init(&sk->sk_callback_lock); 1943 lockdep_set_class_and_name(&sk->sk_callback_lock, 1944 af_callback_keys + sk->sk_family, 1945 af_family_clock_key_strings[sk->sk_family]); 1946 1947 sk->sk_state_change = sock_def_wakeup; 1948 sk->sk_data_ready = sock_def_readable; 1949 sk->sk_write_space = sock_def_write_space; 1950 sk->sk_error_report = sock_def_error_report; 1951 sk->sk_destruct = sock_def_destruct; 1952 1953 sk->sk_sndmsg_page = NULL; 1954 sk->sk_sndmsg_off = 0; 1955 1956 sk->sk_peercred.pid = 0; 1957 sk->sk_peercred.uid = -1; 1958 sk->sk_peercred.gid = -1; 1959 sk->sk_write_pending = 0; 1960 sk->sk_rcvlowat = 1; 1961 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1962 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1963 1964 sk->sk_stamp = ktime_set(-1L, 0); 1965 1966 /* 1967 * Before updating sk_refcnt, we must commit prior changes to memory 1968 * (Documentation/RCU/rculist_nulls.txt for details) 1969 */ 1970 smp_wmb(); 1971 atomic_set(&sk->sk_refcnt, 1); 1972 atomic_set(&sk->sk_drops, 0); 1973} 1974EXPORT_SYMBOL(sock_init_data); 1975 1976void lock_sock_nested(struct sock *sk, int subclass) 1977{ 1978 might_sleep(); 1979 spin_lock_bh(&sk->sk_lock.slock); 1980 if (sk->sk_lock.owned) 1981 __lock_sock(sk); 1982 sk->sk_lock.owned = 1; 1983 spin_unlock(&sk->sk_lock.slock); 1984 /* 1985 * The sk_lock has mutex_lock() semantics here: 1986 */ 1987 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1988 local_bh_enable(); 1989} 1990EXPORT_SYMBOL(lock_sock_nested); 1991 1992void release_sock(struct sock *sk) 1993{ 1994 /* 1995 * The sk_lock has mutex_unlock() semantics: 1996 */ 1997 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1998 1999 spin_lock_bh(&sk->sk_lock.slock); 2000 if (sk->sk_backlog.tail) 2001 __release_sock(sk); 2002 sk->sk_lock.owned = 0; 2003 if (waitqueue_active(&sk->sk_lock.wq)) 2004 wake_up(&sk->sk_lock.wq); 2005 spin_unlock_bh(&sk->sk_lock.slock); 2006} 2007EXPORT_SYMBOL(release_sock); 2008 2009int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2010{ 2011 struct timeval tv; 2012 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2013 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2014 tv = ktime_to_timeval(sk->sk_stamp); 2015 if (tv.tv_sec == -1) 2016 return -ENOENT; 2017 if (tv.tv_sec == 0) { 2018 sk->sk_stamp = ktime_get_real(); 2019 tv = ktime_to_timeval(sk->sk_stamp); 2020 } 2021 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2022} 2023EXPORT_SYMBOL(sock_get_timestamp); 2024 2025int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2026{ 2027 struct timespec ts; 2028 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2029 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2030 ts = ktime_to_timespec(sk->sk_stamp); 2031 if (ts.tv_sec == -1) 2032 return -ENOENT; 2033 if (ts.tv_sec == 0) { 2034 sk->sk_stamp = ktime_get_real(); 2035 ts = ktime_to_timespec(sk->sk_stamp); 2036 } 2037 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2038} 2039EXPORT_SYMBOL(sock_get_timestampns); 2040 2041void sock_enable_timestamp(struct sock *sk, int flag) 2042{ 2043 if (!sock_flag(sk, flag)) { 2044 sock_set_flag(sk, flag); 2045 /* 2046 * we just set one of the two flags which require net 2047 * time stamping, but time stamping might have been on 2048 * already because of the other one 2049 */ 2050 if (!sock_flag(sk, 2051 flag == SOCK_TIMESTAMP ? 2052 SOCK_TIMESTAMPING_RX_SOFTWARE : 2053 SOCK_TIMESTAMP)) 2054 net_enable_timestamp(); 2055 } 2056} 2057 2058/* 2059 * Get a socket option on an socket. 2060 * 2061 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2062 * asynchronous errors should be reported by getsockopt. We assume 2063 * this means if you specify SO_ERROR (otherwise whats the point of it). 2064 */ 2065int sock_common_getsockopt(struct socket *sock, int level, int optname, 2066 char __user *optval, int __user *optlen) 2067{ 2068 struct sock *sk = sock->sk; 2069 2070 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2071} 2072EXPORT_SYMBOL(sock_common_getsockopt); 2073 2074#ifdef CONFIG_COMPAT 2075int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2076 char __user *optval, int __user *optlen) 2077{ 2078 struct sock *sk = sock->sk; 2079 2080 if (sk->sk_prot->compat_getsockopt != NULL) 2081 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2082 optval, optlen); 2083 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2084} 2085EXPORT_SYMBOL(compat_sock_common_getsockopt); 2086#endif 2087 2088int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2089 struct msghdr *msg, size_t size, int flags) 2090{ 2091 struct sock *sk = sock->sk; 2092 int addr_len = 0; 2093 int err; 2094 2095 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2096 flags & ~MSG_DONTWAIT, &addr_len); 2097 if (err >= 0) 2098 msg->msg_namelen = addr_len; 2099 return err; 2100} 2101EXPORT_SYMBOL(sock_common_recvmsg); 2102 2103/* 2104 * Set socket options on an inet socket. 2105 */ 2106int sock_common_setsockopt(struct socket *sock, int level, int optname, 2107 char __user *optval, unsigned int optlen) 2108{ 2109 struct sock *sk = sock->sk; 2110 2111 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2112} 2113EXPORT_SYMBOL(sock_common_setsockopt); 2114 2115#ifdef CONFIG_COMPAT 2116int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2117 char __user *optval, unsigned int optlen) 2118{ 2119 struct sock *sk = sock->sk; 2120 2121 if (sk->sk_prot->compat_setsockopt != NULL) 2122 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2123 optval, optlen); 2124 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2125} 2126EXPORT_SYMBOL(compat_sock_common_setsockopt); 2127#endif 2128 2129void sk_common_release(struct sock *sk) 2130{ 2131 if (sk->sk_prot->destroy) 2132 sk->sk_prot->destroy(sk); 2133 2134 /* 2135 * Observation: when sock_common_release is called, processes have 2136 * no access to socket. But net still has. 2137 * Step one, detach it from networking: 2138 * 2139 * A. Remove from hash tables. 2140 */ 2141 2142 sk->sk_prot->unhash(sk); 2143 2144 /* 2145 * In this point socket cannot receive new packets, but it is possible 2146 * that some packets are in flight because some CPU runs receiver and 2147 * did hash table lookup before we unhashed socket. They will achieve 2148 * receive queue and will be purged by socket destructor. 2149 * 2150 * Also we still have packets pending on receive queue and probably, 2151 * our own packets waiting in device queues. sock_destroy will drain 2152 * receive queue, but transmitted packets will delay socket destruction 2153 * until the last reference will be released. 2154 */ 2155 2156 sock_orphan(sk); 2157 2158 xfrm_sk_free_policy(sk); 2159 2160 sk_refcnt_debug_release(sk); 2161 sock_put(sk); 2162} 2163EXPORT_SYMBOL(sk_common_release); 2164 2165static DEFINE_RWLOCK(proto_list_lock); 2166static LIST_HEAD(proto_list); 2167 2168#ifdef CONFIG_PROC_FS 2169#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2170struct prot_inuse { 2171 int val[PROTO_INUSE_NR]; 2172}; 2173 2174static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2175 2176#ifdef CONFIG_NET_NS 2177void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2178{ 2179 int cpu = smp_processor_id(); 2180 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; 2181} 2182EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2183 2184int sock_prot_inuse_get(struct net *net, struct proto *prot) 2185{ 2186 int cpu, idx = prot->inuse_idx; 2187 int res = 0; 2188 2189 for_each_possible_cpu(cpu) 2190 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2191 2192 return res >= 0 ? res : 0; 2193} 2194EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2195 2196static int __net_init sock_inuse_init_net(struct net *net) 2197{ 2198 net->core.inuse = alloc_percpu(struct prot_inuse); 2199 return net->core.inuse ? 0 : -ENOMEM; 2200} 2201 2202static void __net_exit sock_inuse_exit_net(struct net *net) 2203{ 2204 free_percpu(net->core.inuse); 2205} 2206 2207static struct pernet_operations net_inuse_ops = { 2208 .init = sock_inuse_init_net, 2209 .exit = sock_inuse_exit_net, 2210}; 2211 2212static __init int net_inuse_init(void) 2213{ 2214 if (register_pernet_subsys(&net_inuse_ops)) 2215 panic("Cannot initialize net inuse counters"); 2216 2217 return 0; 2218} 2219 2220core_initcall(net_inuse_init); 2221#else 2222static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2223 2224void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2225{ 2226 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; 2227} 2228EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2229 2230int sock_prot_inuse_get(struct net *net, struct proto *prot) 2231{ 2232 int cpu, idx = prot->inuse_idx; 2233 int res = 0; 2234 2235 for_each_possible_cpu(cpu) 2236 res += per_cpu(prot_inuse, cpu).val[idx]; 2237 2238 return res >= 0 ? res : 0; 2239} 2240EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2241#endif 2242 2243static void assign_proto_idx(struct proto *prot) 2244{ 2245 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2246 2247 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2248 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2249 return; 2250 } 2251 2252 set_bit(prot->inuse_idx, proto_inuse_idx); 2253} 2254 2255static void release_proto_idx(struct proto *prot) 2256{ 2257 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2258 clear_bit(prot->inuse_idx, proto_inuse_idx); 2259} 2260#else 2261static inline void assign_proto_idx(struct proto *prot) 2262{ 2263} 2264 2265static inline void release_proto_idx(struct proto *prot) 2266{ 2267} 2268#endif 2269 2270int proto_register(struct proto *prot, int alloc_slab) 2271{ 2272 if (alloc_slab) { 2273 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2274 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2275 NULL); 2276 2277 if (prot->slab == NULL) { 2278 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2279 prot->name); 2280 goto out; 2281 } 2282 2283 if (prot->rsk_prot != NULL) { 2284 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2285 if (prot->rsk_prot->slab_name == NULL) 2286 goto out_free_sock_slab; 2287 2288 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2289 prot->rsk_prot->obj_size, 0, 2290 SLAB_HWCACHE_ALIGN, NULL); 2291 2292 if (prot->rsk_prot->slab == NULL) { 2293 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2294 prot->name); 2295 goto out_free_request_sock_slab_name; 2296 } 2297 } 2298 2299 if (prot->twsk_prot != NULL) { 2300 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2301 2302 if (prot->twsk_prot->twsk_slab_name == NULL) 2303 goto out_free_request_sock_slab; 2304 2305 prot->twsk_prot->twsk_slab = 2306 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2307 prot->twsk_prot->twsk_obj_size, 2308 0, 2309 SLAB_HWCACHE_ALIGN | 2310 prot->slab_flags, 2311 NULL); 2312 if (prot->twsk_prot->twsk_slab == NULL) 2313 goto out_free_timewait_sock_slab_name; 2314 } 2315 } 2316 2317 write_lock(&proto_list_lock); 2318 list_add(&prot->node, &proto_list); 2319 assign_proto_idx(prot); 2320 write_unlock(&proto_list_lock); 2321 return 0; 2322 2323out_free_timewait_sock_slab_name: 2324 kfree(prot->twsk_prot->twsk_slab_name); 2325out_free_request_sock_slab: 2326 if (prot->rsk_prot && prot->rsk_prot->slab) { 2327 kmem_cache_destroy(prot->rsk_prot->slab); 2328 prot->rsk_prot->slab = NULL; 2329 } 2330out_free_request_sock_slab_name: 2331 if (prot->rsk_prot) 2332 kfree(prot->rsk_prot->slab_name); 2333out_free_sock_slab: 2334 kmem_cache_destroy(prot->slab); 2335 prot->slab = NULL; 2336out: 2337 return -ENOBUFS; 2338} 2339EXPORT_SYMBOL(proto_register); 2340 2341void proto_unregister(struct proto *prot) 2342{ 2343 write_lock(&proto_list_lock); 2344 release_proto_idx(prot); 2345 list_del(&prot->node); 2346 write_unlock(&proto_list_lock); 2347 2348 if (prot->slab != NULL) { 2349 kmem_cache_destroy(prot->slab); 2350 prot->slab = NULL; 2351 } 2352 2353 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2354 kmem_cache_destroy(prot->rsk_prot->slab); 2355 kfree(prot->rsk_prot->slab_name); 2356 prot->rsk_prot->slab = NULL; 2357 } 2358 2359 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2360 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2361 kfree(prot->twsk_prot->twsk_slab_name); 2362 prot->twsk_prot->twsk_slab = NULL; 2363 } 2364} 2365EXPORT_SYMBOL(proto_unregister); 2366 2367#ifdef CONFIG_PROC_FS 2368static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2369 __acquires(proto_list_lock) 2370{ 2371 read_lock(&proto_list_lock); 2372 return seq_list_start_head(&proto_list, *pos); 2373} 2374 2375static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2376{ 2377 return seq_list_next(v, &proto_list, pos); 2378} 2379 2380static void proto_seq_stop(struct seq_file *seq, void *v) 2381 __releases(proto_list_lock) 2382{ 2383 read_unlock(&proto_list_lock); 2384} 2385 2386static char proto_method_implemented(const void *method) 2387{ 2388 return method == NULL ? 'n' : 'y'; 2389} 2390 2391static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2392{ 2393 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2394 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2395 proto->name, 2396 proto->obj_size, 2397 sock_prot_inuse_get(seq_file_net(seq), proto), 2398 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2399 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2400 proto->max_header, 2401 proto->slab == NULL ? "no" : "yes", 2402 module_name(proto->owner), 2403 proto_method_implemented(proto->close), 2404 proto_method_implemented(proto->connect), 2405 proto_method_implemented(proto->disconnect), 2406 proto_method_implemented(proto->accept), 2407 proto_method_implemented(proto->ioctl), 2408 proto_method_implemented(proto->init), 2409 proto_method_implemented(proto->destroy), 2410 proto_method_implemented(proto->shutdown), 2411 proto_method_implemented(proto->setsockopt), 2412 proto_method_implemented(proto->getsockopt), 2413 proto_method_implemented(proto->sendmsg), 2414 proto_method_implemented(proto->recvmsg), 2415 proto_method_implemented(proto->sendpage), 2416 proto_method_implemented(proto->bind), 2417 proto_method_implemented(proto->backlog_rcv), 2418 proto_method_implemented(proto->hash), 2419 proto_method_implemented(proto->unhash), 2420 proto_method_implemented(proto->get_port), 2421 proto_method_implemented(proto->enter_memory_pressure)); 2422} 2423 2424static int proto_seq_show(struct seq_file *seq, void *v) 2425{ 2426 if (v == &proto_list) 2427 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2428 "protocol", 2429 "size", 2430 "sockets", 2431 "memory", 2432 "press", 2433 "maxhdr", 2434 "slab", 2435 "module", 2436 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2437 else 2438 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2439 return 0; 2440} 2441 2442static const struct seq_operations proto_seq_ops = { 2443 .start = proto_seq_start, 2444 .next = proto_seq_next, 2445 .stop = proto_seq_stop, 2446 .show = proto_seq_show, 2447}; 2448 2449static int proto_seq_open(struct inode *inode, struct file *file) 2450{ 2451 return seq_open_net(inode, file, &proto_seq_ops, 2452 sizeof(struct seq_net_private)); 2453} 2454 2455static const struct file_operations proto_seq_fops = { 2456 .owner = THIS_MODULE, 2457 .open = proto_seq_open, 2458 .read = seq_read, 2459 .llseek = seq_lseek, 2460 .release = seq_release_net, 2461}; 2462 2463static __net_init int proto_init_net(struct net *net) 2464{ 2465 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2466 return -ENOMEM; 2467 2468 return 0; 2469} 2470 2471static __net_exit void proto_exit_net(struct net *net) 2472{ 2473 proc_net_remove(net, "protocols"); 2474} 2475 2476 2477static __net_initdata struct pernet_operations proto_net_ops = { 2478 .init = proto_init_net, 2479 .exit = proto_exit_net, 2480}; 2481 2482static int __init proto_init(void) 2483{ 2484 return register_pernet_subsys(&proto_net_ops); 2485} 2486 2487subsys_initcall(proto_init); 2488 2489#endif /* PROC_FS */ 2490