sock.c revision 8d987e5c75107ca7515fa19e857cfa24aab6ec8f
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113#include <linux/user_namespace.h> 114 115#include <asm/uaccess.h> 116#include <asm/system.h> 117 118#include <linux/netdevice.h> 119#include <net/protocol.h> 120#include <linux/skbuff.h> 121#include <net/net_namespace.h> 122#include <net/request_sock.h> 123#include <net/sock.h> 124#include <linux/net_tstamp.h> 125#include <net/xfrm.h> 126#include <linux/ipsec.h> 127#include <net/cls_cgroup.h> 128 129#include <linux/filter.h> 130 131#ifdef CONFIG_INET 132#include <net/tcp.h> 133#endif 134 135/* 136 * Each address family might have different locking rules, so we have 137 * one slock key per address family: 138 */ 139static struct lock_class_key af_family_keys[AF_MAX]; 140static struct lock_class_key af_family_slock_keys[AF_MAX]; 141 142/* 143 * Make lock validator output more readable. (we pre-construct these 144 * strings build-time, so that runtime initialization of socket 145 * locks is fast): 146 */ 147static const char *const af_family_key_strings[AF_MAX+1] = { 148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , 161 "sk_lock-AF_MAX" 162}; 163static const char *const af_family_slock_key_strings[AF_MAX+1] = { 164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 173 "slock-27" , "slock-28" , "slock-AF_CAN" , 174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 176 "slock-AF_IEEE802154", "slock-AF_CAIF" , 177 "slock-AF_MAX" 178}; 179static const char *const af_family_clock_key_strings[AF_MAX+1] = { 180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 189 "clock-27" , "clock-28" , "clock-AF_CAN" , 190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 192 "clock-AF_IEEE802154", "clock-AF_CAIF" , 193 "clock-AF_MAX" 194}; 195 196/* 197 * sk_callback_lock locking rules are per-address-family, 198 * so split the lock classes by using a per-AF key: 199 */ 200static struct lock_class_key af_callback_keys[AF_MAX]; 201 202/* Take into consideration the size of the struct sk_buff overhead in the 203 * determination of these values, since that is non-constant across 204 * platforms. This makes socket queueing behavior and performance 205 * not depend upon such differences. 206 */ 207#define _SK_MEM_PACKETS 256 208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 211 212/* Run time adjustable parameters. */ 213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 217 218/* Maximal space eaten by iovec or ancilliary data plus some space */ 219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 220EXPORT_SYMBOL(sysctl_optmem_max); 221 222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 223int net_cls_subsys_id = -1; 224EXPORT_SYMBOL_GPL(net_cls_subsys_id); 225#endif 226 227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 228{ 229 struct timeval tv; 230 231 if (optlen < sizeof(tv)) 232 return -EINVAL; 233 if (copy_from_user(&tv, optval, sizeof(tv))) 234 return -EFAULT; 235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 236 return -EDOM; 237 238 if (tv.tv_sec < 0) { 239 static int warned __read_mostly; 240 241 *timeo_p = 0; 242 if (warned < 10 && net_ratelimit()) { 243 warned++; 244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 245 "tries to set negative timeout\n", 246 current->comm, task_pid_nr(current)); 247 } 248 return 0; 249 } 250 *timeo_p = MAX_SCHEDULE_TIMEOUT; 251 if (tv.tv_sec == 0 && tv.tv_usec == 0) 252 return 0; 253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 255 return 0; 256} 257 258static void sock_warn_obsolete_bsdism(const char *name) 259{ 260 static int warned; 261 static char warncomm[TASK_COMM_LEN]; 262 if (strcmp(warncomm, current->comm) && warned < 5) { 263 strcpy(warncomm, current->comm); 264 printk(KERN_WARNING "process `%s' is using obsolete " 265 "%s SO_BSDCOMPAT\n", warncomm, name); 266 warned++; 267 } 268} 269 270static void sock_disable_timestamp(struct sock *sk, int flag) 271{ 272 if (sock_flag(sk, flag)) { 273 sock_reset_flag(sk, flag); 274 if (!sock_flag(sk, SOCK_TIMESTAMP) && 275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { 276 net_disable_timestamp(); 277 } 278 } 279} 280 281 282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 283{ 284 int err; 285 int skb_len; 286 unsigned long flags; 287 struct sk_buff_head *list = &sk->sk_receive_queue; 288 289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 290 number of warnings when compiling with -W --ANK 291 */ 292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 293 (unsigned)sk->sk_rcvbuf) { 294 atomic_inc(&sk->sk_drops); 295 return -ENOMEM; 296 } 297 298 err = sk_filter(sk, skb); 299 if (err) 300 return err; 301 302 if (!sk_rmem_schedule(sk, skb->truesize)) { 303 atomic_inc(&sk->sk_drops); 304 return -ENOBUFS; 305 } 306 307 skb->dev = NULL; 308 skb_set_owner_r(skb, sk); 309 310 /* Cache the SKB length before we tack it onto the receive 311 * queue. Once it is added it no longer belongs to us and 312 * may be freed by other threads of control pulling packets 313 * from the queue. 314 */ 315 skb_len = skb->len; 316 317 /* we escape from rcu protected region, make sure we dont leak 318 * a norefcounted dst 319 */ 320 skb_dst_force(skb); 321 322 spin_lock_irqsave(&list->lock, flags); 323 skb->dropcount = atomic_read(&sk->sk_drops); 324 __skb_queue_tail(list, skb); 325 spin_unlock_irqrestore(&list->lock, flags); 326 327 if (!sock_flag(sk, SOCK_DEAD)) 328 sk->sk_data_ready(sk, skb_len); 329 return 0; 330} 331EXPORT_SYMBOL(sock_queue_rcv_skb); 332 333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 334{ 335 int rc = NET_RX_SUCCESS; 336 337 if (sk_filter(sk, skb)) 338 goto discard_and_relse; 339 340 skb->dev = NULL; 341 342 if (sk_rcvqueues_full(sk, skb)) { 343 atomic_inc(&sk->sk_drops); 344 goto discard_and_relse; 345 } 346 if (nested) 347 bh_lock_sock_nested(sk); 348 else 349 bh_lock_sock(sk); 350 if (!sock_owned_by_user(sk)) { 351 /* 352 * trylock + unlock semantics: 353 */ 354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 355 356 rc = sk_backlog_rcv(sk, skb); 357 358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 359 } else if (sk_add_backlog(sk, skb)) { 360 bh_unlock_sock(sk); 361 atomic_inc(&sk->sk_drops); 362 goto discard_and_relse; 363 } 364 365 bh_unlock_sock(sk); 366out: 367 sock_put(sk); 368 return rc; 369discard_and_relse: 370 kfree_skb(skb); 371 goto out; 372} 373EXPORT_SYMBOL(sk_receive_skb); 374 375void sk_reset_txq(struct sock *sk) 376{ 377 sk_tx_queue_clear(sk); 378} 379EXPORT_SYMBOL(sk_reset_txq); 380 381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 382{ 383 struct dst_entry *dst = __sk_dst_get(sk); 384 385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 386 sk_tx_queue_clear(sk); 387 rcu_assign_pointer(sk->sk_dst_cache, NULL); 388 dst_release(dst); 389 return NULL; 390 } 391 392 return dst; 393} 394EXPORT_SYMBOL(__sk_dst_check); 395 396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 397{ 398 struct dst_entry *dst = sk_dst_get(sk); 399 400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 401 sk_dst_reset(sk); 402 dst_release(dst); 403 return NULL; 404 } 405 406 return dst; 407} 408EXPORT_SYMBOL(sk_dst_check); 409 410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 411{ 412 int ret = -ENOPROTOOPT; 413#ifdef CONFIG_NETDEVICES 414 struct net *net = sock_net(sk); 415 char devname[IFNAMSIZ]; 416 int index; 417 418 /* Sorry... */ 419 ret = -EPERM; 420 if (!capable(CAP_NET_RAW)) 421 goto out; 422 423 ret = -EINVAL; 424 if (optlen < 0) 425 goto out; 426 427 /* Bind this socket to a particular device like "eth0", 428 * as specified in the passed interface name. If the 429 * name is "" or the option length is zero the socket 430 * is not bound. 431 */ 432 if (optlen > IFNAMSIZ - 1) 433 optlen = IFNAMSIZ - 1; 434 memset(devname, 0, sizeof(devname)); 435 436 ret = -EFAULT; 437 if (copy_from_user(devname, optval, optlen)) 438 goto out; 439 440 index = 0; 441 if (devname[0] != '\0') { 442 struct net_device *dev; 443 444 rcu_read_lock(); 445 dev = dev_get_by_name_rcu(net, devname); 446 if (dev) 447 index = dev->ifindex; 448 rcu_read_unlock(); 449 ret = -ENODEV; 450 if (!dev) 451 goto out; 452 } 453 454 lock_sock(sk); 455 sk->sk_bound_dev_if = index; 456 sk_dst_reset(sk); 457 release_sock(sk); 458 459 ret = 0; 460 461out: 462#endif 463 464 return ret; 465} 466 467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 468{ 469 if (valbool) 470 sock_set_flag(sk, bit); 471 else 472 sock_reset_flag(sk, bit); 473} 474 475/* 476 * This is meant for all protocols to use and covers goings on 477 * at the socket level. Everything here is generic. 478 */ 479 480int sock_setsockopt(struct socket *sock, int level, int optname, 481 char __user *optval, unsigned int optlen) 482{ 483 struct sock *sk = sock->sk; 484 int val; 485 int valbool; 486 struct linger ling; 487 int ret = 0; 488 489 /* 490 * Options without arguments 491 */ 492 493 if (optname == SO_BINDTODEVICE) 494 return sock_bindtodevice(sk, optval, optlen); 495 496 if (optlen < sizeof(int)) 497 return -EINVAL; 498 499 if (get_user(val, (int __user *)optval)) 500 return -EFAULT; 501 502 valbool = val ? 1 : 0; 503 504 lock_sock(sk); 505 506 switch (optname) { 507 case SO_DEBUG: 508 if (val && !capable(CAP_NET_ADMIN)) 509 ret = -EACCES; 510 else 511 sock_valbool_flag(sk, SOCK_DBG, valbool); 512 break; 513 case SO_REUSEADDR: 514 sk->sk_reuse = valbool; 515 break; 516 case SO_TYPE: 517 case SO_PROTOCOL: 518 case SO_DOMAIN: 519 case SO_ERROR: 520 ret = -ENOPROTOOPT; 521 break; 522 case SO_DONTROUTE: 523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 524 break; 525 case SO_BROADCAST: 526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 527 break; 528 case SO_SNDBUF: 529 /* Don't error on this BSD doesn't and if you think 530 about it this is right. Otherwise apps have to 531 play 'guess the biggest size' games. RCVBUF/SNDBUF 532 are treated in BSD as hints */ 533 534 if (val > sysctl_wmem_max) 535 val = sysctl_wmem_max; 536set_sndbuf: 537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 538 if ((val * 2) < SOCK_MIN_SNDBUF) 539 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 540 else 541 sk->sk_sndbuf = val * 2; 542 543 /* 544 * Wake up sending tasks if we 545 * upped the value. 546 */ 547 sk->sk_write_space(sk); 548 break; 549 550 case SO_SNDBUFFORCE: 551 if (!capable(CAP_NET_ADMIN)) { 552 ret = -EPERM; 553 break; 554 } 555 goto set_sndbuf; 556 557 case SO_RCVBUF: 558 /* Don't error on this BSD doesn't and if you think 559 about it this is right. Otherwise apps have to 560 play 'guess the biggest size' games. RCVBUF/SNDBUF 561 are treated in BSD as hints */ 562 563 if (val > sysctl_rmem_max) 564 val = sysctl_rmem_max; 565set_rcvbuf: 566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 567 /* 568 * We double it on the way in to account for 569 * "struct sk_buff" etc. overhead. Applications 570 * assume that the SO_RCVBUF setting they make will 571 * allow that much actual data to be received on that 572 * socket. 573 * 574 * Applications are unaware that "struct sk_buff" and 575 * other overheads allocate from the receive buffer 576 * during socket buffer allocation. 577 * 578 * And after considering the possible alternatives, 579 * returning the value we actually used in getsockopt 580 * is the most desirable behavior. 581 */ 582 if ((val * 2) < SOCK_MIN_RCVBUF) 583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 584 else 585 sk->sk_rcvbuf = val * 2; 586 break; 587 588 case SO_RCVBUFFORCE: 589 if (!capable(CAP_NET_ADMIN)) { 590 ret = -EPERM; 591 break; 592 } 593 goto set_rcvbuf; 594 595 case SO_KEEPALIVE: 596#ifdef CONFIG_INET 597 if (sk->sk_protocol == IPPROTO_TCP) 598 tcp_set_keepalive(sk, valbool); 599#endif 600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 601 break; 602 603 case SO_OOBINLINE: 604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 605 break; 606 607 case SO_NO_CHECK: 608 sk->sk_no_check = valbool; 609 break; 610 611 case SO_PRIORITY: 612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 613 sk->sk_priority = val; 614 else 615 ret = -EPERM; 616 break; 617 618 case SO_LINGER: 619 if (optlen < sizeof(ling)) { 620 ret = -EINVAL; /* 1003.1g */ 621 break; 622 } 623 if (copy_from_user(&ling, optval, sizeof(ling))) { 624 ret = -EFAULT; 625 break; 626 } 627 if (!ling.l_onoff) 628 sock_reset_flag(sk, SOCK_LINGER); 629 else { 630#if (BITS_PER_LONG == 32) 631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 633 else 634#endif 635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 636 sock_set_flag(sk, SOCK_LINGER); 637 } 638 break; 639 640 case SO_BSDCOMPAT: 641 sock_warn_obsolete_bsdism("setsockopt"); 642 break; 643 644 case SO_PASSCRED: 645 if (valbool) 646 set_bit(SOCK_PASSCRED, &sock->flags); 647 else 648 clear_bit(SOCK_PASSCRED, &sock->flags); 649 break; 650 651 case SO_TIMESTAMP: 652 case SO_TIMESTAMPNS: 653 if (valbool) { 654 if (optname == SO_TIMESTAMP) 655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 656 else 657 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 658 sock_set_flag(sk, SOCK_RCVTSTAMP); 659 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 660 } else { 661 sock_reset_flag(sk, SOCK_RCVTSTAMP); 662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 663 } 664 break; 665 666 case SO_TIMESTAMPING: 667 if (val & ~SOF_TIMESTAMPING_MASK) { 668 ret = -EINVAL; 669 break; 670 } 671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 672 val & SOF_TIMESTAMPING_TX_HARDWARE); 673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 674 val & SOF_TIMESTAMPING_TX_SOFTWARE); 675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 676 val & SOF_TIMESTAMPING_RX_HARDWARE); 677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 678 sock_enable_timestamp(sk, 679 SOCK_TIMESTAMPING_RX_SOFTWARE); 680 else 681 sock_disable_timestamp(sk, 682 SOCK_TIMESTAMPING_RX_SOFTWARE); 683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 684 val & SOF_TIMESTAMPING_SOFTWARE); 685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 686 val & SOF_TIMESTAMPING_SYS_HARDWARE); 687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 688 val & SOF_TIMESTAMPING_RAW_HARDWARE); 689 break; 690 691 case SO_RCVLOWAT: 692 if (val < 0) 693 val = INT_MAX; 694 sk->sk_rcvlowat = val ? : 1; 695 break; 696 697 case SO_RCVTIMEO: 698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 699 break; 700 701 case SO_SNDTIMEO: 702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 703 break; 704 705 case SO_ATTACH_FILTER: 706 ret = -EINVAL; 707 if (optlen == sizeof(struct sock_fprog)) { 708 struct sock_fprog fprog; 709 710 ret = -EFAULT; 711 if (copy_from_user(&fprog, optval, sizeof(fprog))) 712 break; 713 714 ret = sk_attach_filter(&fprog, sk); 715 } 716 break; 717 718 case SO_DETACH_FILTER: 719 ret = sk_detach_filter(sk); 720 break; 721 722 case SO_PASSSEC: 723 if (valbool) 724 set_bit(SOCK_PASSSEC, &sock->flags); 725 else 726 clear_bit(SOCK_PASSSEC, &sock->flags); 727 break; 728 case SO_MARK: 729 if (!capable(CAP_NET_ADMIN)) 730 ret = -EPERM; 731 else 732 sk->sk_mark = val; 733 break; 734 735 /* We implement the SO_SNDLOWAT etc to 736 not be settable (1003.1g 5.3) */ 737 case SO_RXQ_OVFL: 738 if (valbool) 739 sock_set_flag(sk, SOCK_RXQ_OVFL); 740 else 741 sock_reset_flag(sk, SOCK_RXQ_OVFL); 742 break; 743 default: 744 ret = -ENOPROTOOPT; 745 break; 746 } 747 release_sock(sk); 748 return ret; 749} 750EXPORT_SYMBOL(sock_setsockopt); 751 752 753void cred_to_ucred(struct pid *pid, const struct cred *cred, 754 struct ucred *ucred) 755{ 756 ucred->pid = pid_vnr(pid); 757 ucred->uid = ucred->gid = -1; 758 if (cred) { 759 struct user_namespace *current_ns = current_user_ns(); 760 761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 763 } 764} 765EXPORT_SYMBOL_GPL(cred_to_ucred); 766 767int sock_getsockopt(struct socket *sock, int level, int optname, 768 char __user *optval, int __user *optlen) 769{ 770 struct sock *sk = sock->sk; 771 772 union { 773 int val; 774 struct linger ling; 775 struct timeval tm; 776 } v; 777 778 int lv = sizeof(int); 779 int len; 780 781 if (get_user(len, optlen)) 782 return -EFAULT; 783 if (len < 0) 784 return -EINVAL; 785 786 memset(&v, 0, sizeof(v)); 787 788 switch (optname) { 789 case SO_DEBUG: 790 v.val = sock_flag(sk, SOCK_DBG); 791 break; 792 793 case SO_DONTROUTE: 794 v.val = sock_flag(sk, SOCK_LOCALROUTE); 795 break; 796 797 case SO_BROADCAST: 798 v.val = !!sock_flag(sk, SOCK_BROADCAST); 799 break; 800 801 case SO_SNDBUF: 802 v.val = sk->sk_sndbuf; 803 break; 804 805 case SO_RCVBUF: 806 v.val = sk->sk_rcvbuf; 807 break; 808 809 case SO_REUSEADDR: 810 v.val = sk->sk_reuse; 811 break; 812 813 case SO_KEEPALIVE: 814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 815 break; 816 817 case SO_TYPE: 818 v.val = sk->sk_type; 819 break; 820 821 case SO_PROTOCOL: 822 v.val = sk->sk_protocol; 823 break; 824 825 case SO_DOMAIN: 826 v.val = sk->sk_family; 827 break; 828 829 case SO_ERROR: 830 v.val = -sock_error(sk); 831 if (v.val == 0) 832 v.val = xchg(&sk->sk_err_soft, 0); 833 break; 834 835 case SO_OOBINLINE: 836 v.val = !!sock_flag(sk, SOCK_URGINLINE); 837 break; 838 839 case SO_NO_CHECK: 840 v.val = sk->sk_no_check; 841 break; 842 843 case SO_PRIORITY: 844 v.val = sk->sk_priority; 845 break; 846 847 case SO_LINGER: 848 lv = sizeof(v.ling); 849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 850 v.ling.l_linger = sk->sk_lingertime / HZ; 851 break; 852 853 case SO_BSDCOMPAT: 854 sock_warn_obsolete_bsdism("getsockopt"); 855 break; 856 857 case SO_TIMESTAMP: 858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 859 !sock_flag(sk, SOCK_RCVTSTAMPNS); 860 break; 861 862 case SO_TIMESTAMPNS: 863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 864 break; 865 866 case SO_TIMESTAMPING: 867 v.val = 0; 868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 877 v.val |= SOF_TIMESTAMPING_SOFTWARE; 878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 882 break; 883 884 case SO_RCVTIMEO: 885 lv = sizeof(struct timeval); 886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 887 v.tm.tv_sec = 0; 888 v.tm.tv_usec = 0; 889 } else { 890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 892 } 893 break; 894 895 case SO_SNDTIMEO: 896 lv = sizeof(struct timeval); 897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 898 v.tm.tv_sec = 0; 899 v.tm.tv_usec = 0; 900 } else { 901 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 903 } 904 break; 905 906 case SO_RCVLOWAT: 907 v.val = sk->sk_rcvlowat; 908 break; 909 910 case SO_SNDLOWAT: 911 v.val = 1; 912 break; 913 914 case SO_PASSCRED: 915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 916 break; 917 918 case SO_PEERCRED: 919 { 920 struct ucred peercred; 921 if (len > sizeof(peercred)) 922 len = sizeof(peercred); 923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 924 if (copy_to_user(optval, &peercred, len)) 925 return -EFAULT; 926 goto lenout; 927 } 928 929 case SO_PEERNAME: 930 { 931 char address[128]; 932 933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 934 return -ENOTCONN; 935 if (lv < len) 936 return -EINVAL; 937 if (copy_to_user(optval, address, len)) 938 return -EFAULT; 939 goto lenout; 940 } 941 942 /* Dubious BSD thing... Probably nobody even uses it, but 943 * the UNIX standard wants it for whatever reason... -DaveM 944 */ 945 case SO_ACCEPTCONN: 946 v.val = sk->sk_state == TCP_LISTEN; 947 break; 948 949 case SO_PASSSEC: 950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 951 break; 952 953 case SO_PEERSEC: 954 return security_socket_getpeersec_stream(sock, optval, optlen, len); 955 956 case SO_MARK: 957 v.val = sk->sk_mark; 958 break; 959 960 case SO_RXQ_OVFL: 961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 962 break; 963 964 default: 965 return -ENOPROTOOPT; 966 } 967 968 if (len > lv) 969 len = lv; 970 if (copy_to_user(optval, &v, len)) 971 return -EFAULT; 972lenout: 973 if (put_user(len, optlen)) 974 return -EFAULT; 975 return 0; 976} 977 978/* 979 * Initialize an sk_lock. 980 * 981 * (We also register the sk_lock with the lock validator.) 982 */ 983static inline void sock_lock_init(struct sock *sk) 984{ 985 sock_lock_init_class_and_name(sk, 986 af_family_slock_key_strings[sk->sk_family], 987 af_family_slock_keys + sk->sk_family, 988 af_family_key_strings[sk->sk_family], 989 af_family_keys + sk->sk_family); 990} 991 992/* 993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 994 * even temporarly, because of RCU lookups. sk_node should also be left as is. 995 */ 996static void sock_copy(struct sock *nsk, const struct sock *osk) 997{ 998#ifdef CONFIG_SECURITY_NETWORK 999 void *sptr = nsk->sk_security; 1000#endif 1001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 1002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + 1003 sizeof(osk->sk_tx_queue_mapping)); 1004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 1006#ifdef CONFIG_SECURITY_NETWORK 1007 nsk->sk_security = sptr; 1008 security_sk_clone(osk, nsk); 1009#endif 1010} 1011 1012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1013 int family) 1014{ 1015 struct sock *sk; 1016 struct kmem_cache *slab; 1017 1018 slab = prot->slab; 1019 if (slab != NULL) { 1020 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1021 if (!sk) 1022 return sk; 1023 if (priority & __GFP_ZERO) { 1024 /* 1025 * caches using SLAB_DESTROY_BY_RCU should let 1026 * sk_node.next un-modified. Special care is taken 1027 * when initializing object to zero. 1028 */ 1029 if (offsetof(struct sock, sk_node.next) != 0) 1030 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1031 memset(&sk->sk_node.pprev, 0, 1032 prot->obj_size - offsetof(struct sock, 1033 sk_node.pprev)); 1034 } 1035 } 1036 else 1037 sk = kmalloc(prot->obj_size, priority); 1038 1039 if (sk != NULL) { 1040 kmemcheck_annotate_bitfield(sk, flags); 1041 1042 if (security_sk_alloc(sk, family, priority)) 1043 goto out_free; 1044 1045 if (!try_module_get(prot->owner)) 1046 goto out_free_sec; 1047 sk_tx_queue_clear(sk); 1048 } 1049 1050 return sk; 1051 1052out_free_sec: 1053 security_sk_free(sk); 1054out_free: 1055 if (slab != NULL) 1056 kmem_cache_free(slab, sk); 1057 else 1058 kfree(sk); 1059 return NULL; 1060} 1061 1062static void sk_prot_free(struct proto *prot, struct sock *sk) 1063{ 1064 struct kmem_cache *slab; 1065 struct module *owner; 1066 1067 owner = prot->owner; 1068 slab = prot->slab; 1069 1070 security_sk_free(sk); 1071 if (slab != NULL) 1072 kmem_cache_free(slab, sk); 1073 else 1074 kfree(sk); 1075 module_put(owner); 1076} 1077 1078#ifdef CONFIG_CGROUPS 1079void sock_update_classid(struct sock *sk) 1080{ 1081 u32 classid; 1082 1083 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1084 classid = task_cls_classid(current); 1085 rcu_read_unlock(); 1086 if (classid && classid != sk->sk_classid) 1087 sk->sk_classid = classid; 1088} 1089EXPORT_SYMBOL(sock_update_classid); 1090#endif 1091 1092/** 1093 * sk_alloc - All socket objects are allocated here 1094 * @net: the applicable net namespace 1095 * @family: protocol family 1096 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1097 * @prot: struct proto associated with this new sock instance 1098 */ 1099struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1100 struct proto *prot) 1101{ 1102 struct sock *sk; 1103 1104 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1105 if (sk) { 1106 sk->sk_family = family; 1107 /* 1108 * See comment in struct sock definition to understand 1109 * why we need sk_prot_creator -acme 1110 */ 1111 sk->sk_prot = sk->sk_prot_creator = prot; 1112 sock_lock_init(sk); 1113 sock_net_set(sk, get_net(net)); 1114 atomic_set(&sk->sk_wmem_alloc, 1); 1115 1116 sock_update_classid(sk); 1117 } 1118 1119 return sk; 1120} 1121EXPORT_SYMBOL(sk_alloc); 1122 1123static void __sk_free(struct sock *sk) 1124{ 1125 struct sk_filter *filter; 1126 1127 if (sk->sk_destruct) 1128 sk->sk_destruct(sk); 1129 1130 filter = rcu_dereference_check(sk->sk_filter, 1131 atomic_read(&sk->sk_wmem_alloc) == 0); 1132 if (filter) { 1133 sk_filter_uncharge(sk, filter); 1134 rcu_assign_pointer(sk->sk_filter, NULL); 1135 } 1136 1137 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1138 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); 1139 1140 if (atomic_read(&sk->sk_omem_alloc)) 1141 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1142 __func__, atomic_read(&sk->sk_omem_alloc)); 1143 1144 if (sk->sk_peer_cred) 1145 put_cred(sk->sk_peer_cred); 1146 put_pid(sk->sk_peer_pid); 1147 put_net(sock_net(sk)); 1148 sk_prot_free(sk->sk_prot_creator, sk); 1149} 1150 1151void sk_free(struct sock *sk) 1152{ 1153 /* 1154 * We substract one from sk_wmem_alloc and can know if 1155 * some packets are still in some tx queue. 1156 * If not null, sock_wfree() will call __sk_free(sk) later 1157 */ 1158 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1159 __sk_free(sk); 1160} 1161EXPORT_SYMBOL(sk_free); 1162 1163/* 1164 * Last sock_put should drop referrence to sk->sk_net. It has already 1165 * been dropped in sk_change_net. Taking referrence to stopping namespace 1166 * is not an option. 1167 * Take referrence to a socket to remove it from hash _alive_ and after that 1168 * destroy it in the context of init_net. 1169 */ 1170void sk_release_kernel(struct sock *sk) 1171{ 1172 if (sk == NULL || sk->sk_socket == NULL) 1173 return; 1174 1175 sock_hold(sk); 1176 sock_release(sk->sk_socket); 1177 release_net(sock_net(sk)); 1178 sock_net_set(sk, get_net(&init_net)); 1179 sock_put(sk); 1180} 1181EXPORT_SYMBOL(sk_release_kernel); 1182 1183struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1184{ 1185 struct sock *newsk; 1186 1187 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1188 if (newsk != NULL) { 1189 struct sk_filter *filter; 1190 1191 sock_copy(newsk, sk); 1192 1193 /* SANITY */ 1194 get_net(sock_net(newsk)); 1195 sk_node_init(&newsk->sk_node); 1196 sock_lock_init(newsk); 1197 bh_lock_sock(newsk); 1198 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1199 newsk->sk_backlog.len = 0; 1200 1201 atomic_set(&newsk->sk_rmem_alloc, 0); 1202 /* 1203 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1204 */ 1205 atomic_set(&newsk->sk_wmem_alloc, 1); 1206 atomic_set(&newsk->sk_omem_alloc, 0); 1207 skb_queue_head_init(&newsk->sk_receive_queue); 1208 skb_queue_head_init(&newsk->sk_write_queue); 1209#ifdef CONFIG_NET_DMA 1210 skb_queue_head_init(&newsk->sk_async_wait_queue); 1211#endif 1212 1213 spin_lock_init(&newsk->sk_dst_lock); 1214 rwlock_init(&newsk->sk_callback_lock); 1215 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1216 af_callback_keys + newsk->sk_family, 1217 af_family_clock_key_strings[newsk->sk_family]); 1218 1219 newsk->sk_dst_cache = NULL; 1220 newsk->sk_wmem_queued = 0; 1221 newsk->sk_forward_alloc = 0; 1222 newsk->sk_send_head = NULL; 1223 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1224 1225 sock_reset_flag(newsk, SOCK_DONE); 1226 skb_queue_head_init(&newsk->sk_error_queue); 1227 1228 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1229 if (filter != NULL) 1230 sk_filter_charge(newsk, filter); 1231 1232 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1233 /* It is still raw copy of parent, so invalidate 1234 * destructor and make plain sk_free() */ 1235 newsk->sk_destruct = NULL; 1236 sk_free(newsk); 1237 newsk = NULL; 1238 goto out; 1239 } 1240 1241 newsk->sk_err = 0; 1242 newsk->sk_priority = 0; 1243 /* 1244 * Before updating sk_refcnt, we must commit prior changes to memory 1245 * (Documentation/RCU/rculist_nulls.txt for details) 1246 */ 1247 smp_wmb(); 1248 atomic_set(&newsk->sk_refcnt, 2); 1249 1250 /* 1251 * Increment the counter in the same struct proto as the master 1252 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1253 * is the same as sk->sk_prot->socks, as this field was copied 1254 * with memcpy). 1255 * 1256 * This _changes_ the previous behaviour, where 1257 * tcp_create_openreq_child always was incrementing the 1258 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1259 * to be taken into account in all callers. -acme 1260 */ 1261 sk_refcnt_debug_inc(newsk); 1262 sk_set_socket(newsk, NULL); 1263 newsk->sk_wq = NULL; 1264 1265 if (newsk->sk_prot->sockets_allocated) 1266 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1267 1268 if (sock_flag(newsk, SOCK_TIMESTAMP) || 1269 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1270 net_enable_timestamp(); 1271 } 1272out: 1273 return newsk; 1274} 1275EXPORT_SYMBOL_GPL(sk_clone); 1276 1277void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1278{ 1279 __sk_dst_set(sk, dst); 1280 sk->sk_route_caps = dst->dev->features; 1281 if (sk->sk_route_caps & NETIF_F_GSO) 1282 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1283 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1284 if (sk_can_gso(sk)) { 1285 if (dst->header_len) { 1286 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1287 } else { 1288 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1289 sk->sk_gso_max_size = dst->dev->gso_max_size; 1290 } 1291 } 1292} 1293EXPORT_SYMBOL_GPL(sk_setup_caps); 1294 1295void __init sk_init(void) 1296{ 1297 if (totalram_pages <= 4096) { 1298 sysctl_wmem_max = 32767; 1299 sysctl_rmem_max = 32767; 1300 sysctl_wmem_default = 32767; 1301 sysctl_rmem_default = 32767; 1302 } else if (totalram_pages >= 131072) { 1303 sysctl_wmem_max = 131071; 1304 sysctl_rmem_max = 131071; 1305 } 1306} 1307 1308/* 1309 * Simple resource managers for sockets. 1310 */ 1311 1312 1313/* 1314 * Write buffer destructor automatically called from kfree_skb. 1315 */ 1316void sock_wfree(struct sk_buff *skb) 1317{ 1318 struct sock *sk = skb->sk; 1319 unsigned int len = skb->truesize; 1320 1321 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1322 /* 1323 * Keep a reference on sk_wmem_alloc, this will be released 1324 * after sk_write_space() call 1325 */ 1326 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1327 sk->sk_write_space(sk); 1328 len = 1; 1329 } 1330 /* 1331 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1332 * could not do because of in-flight packets 1333 */ 1334 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1335 __sk_free(sk); 1336} 1337EXPORT_SYMBOL(sock_wfree); 1338 1339/* 1340 * Read buffer destructor automatically called from kfree_skb. 1341 */ 1342void sock_rfree(struct sk_buff *skb) 1343{ 1344 struct sock *sk = skb->sk; 1345 unsigned int len = skb->truesize; 1346 1347 atomic_sub(len, &sk->sk_rmem_alloc); 1348 sk_mem_uncharge(sk, len); 1349} 1350EXPORT_SYMBOL(sock_rfree); 1351 1352 1353int sock_i_uid(struct sock *sk) 1354{ 1355 int uid; 1356 1357 read_lock_bh(&sk->sk_callback_lock); 1358 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1359 read_unlock_bh(&sk->sk_callback_lock); 1360 return uid; 1361} 1362EXPORT_SYMBOL(sock_i_uid); 1363 1364unsigned long sock_i_ino(struct sock *sk) 1365{ 1366 unsigned long ino; 1367 1368 read_lock_bh(&sk->sk_callback_lock); 1369 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1370 read_unlock_bh(&sk->sk_callback_lock); 1371 return ino; 1372} 1373EXPORT_SYMBOL(sock_i_ino); 1374 1375/* 1376 * Allocate a skb from the socket's send buffer. 1377 */ 1378struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1379 gfp_t priority) 1380{ 1381 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1382 struct sk_buff *skb = alloc_skb(size, priority); 1383 if (skb) { 1384 skb_set_owner_w(skb, sk); 1385 return skb; 1386 } 1387 } 1388 return NULL; 1389} 1390EXPORT_SYMBOL(sock_wmalloc); 1391 1392/* 1393 * Allocate a skb from the socket's receive buffer. 1394 */ 1395struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1396 gfp_t priority) 1397{ 1398 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1399 struct sk_buff *skb = alloc_skb(size, priority); 1400 if (skb) { 1401 skb_set_owner_r(skb, sk); 1402 return skb; 1403 } 1404 } 1405 return NULL; 1406} 1407 1408/* 1409 * Allocate a memory block from the socket's option memory buffer. 1410 */ 1411void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1412{ 1413 if ((unsigned)size <= sysctl_optmem_max && 1414 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1415 void *mem; 1416 /* First do the add, to avoid the race if kmalloc 1417 * might sleep. 1418 */ 1419 atomic_add(size, &sk->sk_omem_alloc); 1420 mem = kmalloc(size, priority); 1421 if (mem) 1422 return mem; 1423 atomic_sub(size, &sk->sk_omem_alloc); 1424 } 1425 return NULL; 1426} 1427EXPORT_SYMBOL(sock_kmalloc); 1428 1429/* 1430 * Free an option memory block. 1431 */ 1432void sock_kfree_s(struct sock *sk, void *mem, int size) 1433{ 1434 kfree(mem); 1435 atomic_sub(size, &sk->sk_omem_alloc); 1436} 1437EXPORT_SYMBOL(sock_kfree_s); 1438 1439/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1440 I think, these locks should be removed for datagram sockets. 1441 */ 1442static long sock_wait_for_wmem(struct sock *sk, long timeo) 1443{ 1444 DEFINE_WAIT(wait); 1445 1446 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1447 for (;;) { 1448 if (!timeo) 1449 break; 1450 if (signal_pending(current)) 1451 break; 1452 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1453 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1454 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1455 break; 1456 if (sk->sk_shutdown & SEND_SHUTDOWN) 1457 break; 1458 if (sk->sk_err) 1459 break; 1460 timeo = schedule_timeout(timeo); 1461 } 1462 finish_wait(sk_sleep(sk), &wait); 1463 return timeo; 1464} 1465 1466 1467/* 1468 * Generic send/receive buffer handlers 1469 */ 1470 1471struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1472 unsigned long data_len, int noblock, 1473 int *errcode) 1474{ 1475 struct sk_buff *skb; 1476 gfp_t gfp_mask; 1477 long timeo; 1478 int err; 1479 1480 gfp_mask = sk->sk_allocation; 1481 if (gfp_mask & __GFP_WAIT) 1482 gfp_mask |= __GFP_REPEAT; 1483 1484 timeo = sock_sndtimeo(sk, noblock); 1485 while (1) { 1486 err = sock_error(sk); 1487 if (err != 0) 1488 goto failure; 1489 1490 err = -EPIPE; 1491 if (sk->sk_shutdown & SEND_SHUTDOWN) 1492 goto failure; 1493 1494 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1495 skb = alloc_skb(header_len, gfp_mask); 1496 if (skb) { 1497 int npages; 1498 int i; 1499 1500 /* No pages, we're done... */ 1501 if (!data_len) 1502 break; 1503 1504 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1505 skb->truesize += data_len; 1506 skb_shinfo(skb)->nr_frags = npages; 1507 for (i = 0; i < npages; i++) { 1508 struct page *page; 1509 skb_frag_t *frag; 1510 1511 page = alloc_pages(sk->sk_allocation, 0); 1512 if (!page) { 1513 err = -ENOBUFS; 1514 skb_shinfo(skb)->nr_frags = i; 1515 kfree_skb(skb); 1516 goto failure; 1517 } 1518 1519 frag = &skb_shinfo(skb)->frags[i]; 1520 frag->page = page; 1521 frag->page_offset = 0; 1522 frag->size = (data_len >= PAGE_SIZE ? 1523 PAGE_SIZE : 1524 data_len); 1525 data_len -= PAGE_SIZE; 1526 } 1527 1528 /* Full success... */ 1529 break; 1530 } 1531 err = -ENOBUFS; 1532 goto failure; 1533 } 1534 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1535 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1536 err = -EAGAIN; 1537 if (!timeo) 1538 goto failure; 1539 if (signal_pending(current)) 1540 goto interrupted; 1541 timeo = sock_wait_for_wmem(sk, timeo); 1542 } 1543 1544 skb_set_owner_w(skb, sk); 1545 return skb; 1546 1547interrupted: 1548 err = sock_intr_errno(timeo); 1549failure: 1550 *errcode = err; 1551 return NULL; 1552} 1553EXPORT_SYMBOL(sock_alloc_send_pskb); 1554 1555struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1556 int noblock, int *errcode) 1557{ 1558 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1559} 1560EXPORT_SYMBOL(sock_alloc_send_skb); 1561 1562static void __lock_sock(struct sock *sk) 1563 __releases(&sk->sk_lock.slock) 1564 __acquires(&sk->sk_lock.slock) 1565{ 1566 DEFINE_WAIT(wait); 1567 1568 for (;;) { 1569 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1570 TASK_UNINTERRUPTIBLE); 1571 spin_unlock_bh(&sk->sk_lock.slock); 1572 schedule(); 1573 spin_lock_bh(&sk->sk_lock.slock); 1574 if (!sock_owned_by_user(sk)) 1575 break; 1576 } 1577 finish_wait(&sk->sk_lock.wq, &wait); 1578} 1579 1580static void __release_sock(struct sock *sk) 1581 __releases(&sk->sk_lock.slock) 1582 __acquires(&sk->sk_lock.slock) 1583{ 1584 struct sk_buff *skb = sk->sk_backlog.head; 1585 1586 do { 1587 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1588 bh_unlock_sock(sk); 1589 1590 do { 1591 struct sk_buff *next = skb->next; 1592 1593 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1594 skb->next = NULL; 1595 sk_backlog_rcv(sk, skb); 1596 1597 /* 1598 * We are in process context here with softirqs 1599 * disabled, use cond_resched_softirq() to preempt. 1600 * This is safe to do because we've taken the backlog 1601 * queue private: 1602 */ 1603 cond_resched_softirq(); 1604 1605 skb = next; 1606 } while (skb != NULL); 1607 1608 bh_lock_sock(sk); 1609 } while ((skb = sk->sk_backlog.head) != NULL); 1610 1611 /* 1612 * Doing the zeroing here guarantee we can not loop forever 1613 * while a wild producer attempts to flood us. 1614 */ 1615 sk->sk_backlog.len = 0; 1616} 1617 1618/** 1619 * sk_wait_data - wait for data to arrive at sk_receive_queue 1620 * @sk: sock to wait on 1621 * @timeo: for how long 1622 * 1623 * Now socket state including sk->sk_err is changed only under lock, 1624 * hence we may omit checks after joining wait queue. 1625 * We check receive queue before schedule() only as optimization; 1626 * it is very likely that release_sock() added new data. 1627 */ 1628int sk_wait_data(struct sock *sk, long *timeo) 1629{ 1630 int rc; 1631 DEFINE_WAIT(wait); 1632 1633 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1634 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1635 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1636 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1637 finish_wait(sk_sleep(sk), &wait); 1638 return rc; 1639} 1640EXPORT_SYMBOL(sk_wait_data); 1641 1642/** 1643 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1644 * @sk: socket 1645 * @size: memory size to allocate 1646 * @kind: allocation type 1647 * 1648 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1649 * rmem allocation. This function assumes that protocols which have 1650 * memory_pressure use sk_wmem_queued as write buffer accounting. 1651 */ 1652int __sk_mem_schedule(struct sock *sk, int size, int kind) 1653{ 1654 struct proto *prot = sk->sk_prot; 1655 int amt = sk_mem_pages(size); 1656 long allocated; 1657 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1659 allocated = atomic_long_add_return(amt, prot->memory_allocated); 1660 1661 /* Under limit. */ 1662 if (allocated <= prot->sysctl_mem[0]) { 1663 if (prot->memory_pressure && *prot->memory_pressure) 1664 *prot->memory_pressure = 0; 1665 return 1; 1666 } 1667 1668 /* Under pressure. */ 1669 if (allocated > prot->sysctl_mem[1]) 1670 if (prot->enter_memory_pressure) 1671 prot->enter_memory_pressure(sk); 1672 1673 /* Over hard limit. */ 1674 if (allocated > prot->sysctl_mem[2]) 1675 goto suppress_allocation; 1676 1677 /* guarantee minimum buffer size under pressure */ 1678 if (kind == SK_MEM_RECV) { 1679 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1680 return 1; 1681 } else { /* SK_MEM_SEND */ 1682 if (sk->sk_type == SOCK_STREAM) { 1683 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1684 return 1; 1685 } else if (atomic_read(&sk->sk_wmem_alloc) < 1686 prot->sysctl_wmem[0]) 1687 return 1; 1688 } 1689 1690 if (prot->memory_pressure) { 1691 int alloc; 1692 1693 if (!*prot->memory_pressure) 1694 return 1; 1695 alloc = percpu_counter_read_positive(prot->sockets_allocated); 1696 if (prot->sysctl_mem[2] > alloc * 1697 sk_mem_pages(sk->sk_wmem_queued + 1698 atomic_read(&sk->sk_rmem_alloc) + 1699 sk->sk_forward_alloc)) 1700 return 1; 1701 } 1702 1703suppress_allocation: 1704 1705 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1706 sk_stream_moderate_sndbuf(sk); 1707 1708 /* Fail only if socket is _under_ its sndbuf. 1709 * In this case we cannot block, so that we have to fail. 1710 */ 1711 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1712 return 1; 1713 } 1714 1715 /* Alas. Undo changes. */ 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1717 atomic_long_sub(amt, prot->memory_allocated); 1718 return 0; 1719} 1720EXPORT_SYMBOL(__sk_mem_schedule); 1721 1722/** 1723 * __sk_reclaim - reclaim memory_allocated 1724 * @sk: socket 1725 */ 1726void __sk_mem_reclaim(struct sock *sk) 1727{ 1728 struct proto *prot = sk->sk_prot; 1729 1730 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1731 prot->memory_allocated); 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1733 1734 if (prot->memory_pressure && *prot->memory_pressure && 1735 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1736 *prot->memory_pressure = 0; 1737} 1738EXPORT_SYMBOL(__sk_mem_reclaim); 1739 1740 1741/* 1742 * Set of default routines for initialising struct proto_ops when 1743 * the protocol does not support a particular function. In certain 1744 * cases where it makes no sense for a protocol to have a "do nothing" 1745 * function, some default processing is provided. 1746 */ 1747 1748int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1749{ 1750 return -EOPNOTSUPP; 1751} 1752EXPORT_SYMBOL(sock_no_bind); 1753 1754int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1755 int len, int flags) 1756{ 1757 return -EOPNOTSUPP; 1758} 1759EXPORT_SYMBOL(sock_no_connect); 1760 1761int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1762{ 1763 return -EOPNOTSUPP; 1764} 1765EXPORT_SYMBOL(sock_no_socketpair); 1766 1767int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1768{ 1769 return -EOPNOTSUPP; 1770} 1771EXPORT_SYMBOL(sock_no_accept); 1772 1773int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1774 int *len, int peer) 1775{ 1776 return -EOPNOTSUPP; 1777} 1778EXPORT_SYMBOL(sock_no_getname); 1779 1780unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1781{ 1782 return 0; 1783} 1784EXPORT_SYMBOL(sock_no_poll); 1785 1786int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1787{ 1788 return -EOPNOTSUPP; 1789} 1790EXPORT_SYMBOL(sock_no_ioctl); 1791 1792int sock_no_listen(struct socket *sock, int backlog) 1793{ 1794 return -EOPNOTSUPP; 1795} 1796EXPORT_SYMBOL(sock_no_listen); 1797 1798int sock_no_shutdown(struct socket *sock, int how) 1799{ 1800 return -EOPNOTSUPP; 1801} 1802EXPORT_SYMBOL(sock_no_shutdown); 1803 1804int sock_no_setsockopt(struct socket *sock, int level, int optname, 1805 char __user *optval, unsigned int optlen) 1806{ 1807 return -EOPNOTSUPP; 1808} 1809EXPORT_SYMBOL(sock_no_setsockopt); 1810 1811int sock_no_getsockopt(struct socket *sock, int level, int optname, 1812 char __user *optval, int __user *optlen) 1813{ 1814 return -EOPNOTSUPP; 1815} 1816EXPORT_SYMBOL(sock_no_getsockopt); 1817 1818int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1819 size_t len) 1820{ 1821 return -EOPNOTSUPP; 1822} 1823EXPORT_SYMBOL(sock_no_sendmsg); 1824 1825int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1826 size_t len, int flags) 1827{ 1828 return -EOPNOTSUPP; 1829} 1830EXPORT_SYMBOL(sock_no_recvmsg); 1831 1832int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1833{ 1834 /* Mirror missing mmap method error code */ 1835 return -ENODEV; 1836} 1837EXPORT_SYMBOL(sock_no_mmap); 1838 1839ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1840{ 1841 ssize_t res; 1842 struct msghdr msg = {.msg_flags = flags}; 1843 struct kvec iov; 1844 char *kaddr = kmap(page); 1845 iov.iov_base = kaddr + offset; 1846 iov.iov_len = size; 1847 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1848 kunmap(page); 1849 return res; 1850} 1851EXPORT_SYMBOL(sock_no_sendpage); 1852 1853/* 1854 * Default Socket Callbacks 1855 */ 1856 1857static void sock_def_wakeup(struct sock *sk) 1858{ 1859 struct socket_wq *wq; 1860 1861 rcu_read_lock(); 1862 wq = rcu_dereference(sk->sk_wq); 1863 if (wq_has_sleeper(wq)) 1864 wake_up_interruptible_all(&wq->wait); 1865 rcu_read_unlock(); 1866} 1867 1868static void sock_def_error_report(struct sock *sk) 1869{ 1870 struct socket_wq *wq; 1871 1872 rcu_read_lock(); 1873 wq = rcu_dereference(sk->sk_wq); 1874 if (wq_has_sleeper(wq)) 1875 wake_up_interruptible_poll(&wq->wait, POLLERR); 1876 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1877 rcu_read_unlock(); 1878} 1879 1880static void sock_def_readable(struct sock *sk, int len) 1881{ 1882 struct socket_wq *wq; 1883 1884 rcu_read_lock(); 1885 wq = rcu_dereference(sk->sk_wq); 1886 if (wq_has_sleeper(wq)) 1887 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1888 POLLRDNORM | POLLRDBAND); 1889 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1890 rcu_read_unlock(); 1891} 1892 1893static void sock_def_write_space(struct sock *sk) 1894{ 1895 struct socket_wq *wq; 1896 1897 rcu_read_lock(); 1898 1899 /* Do not wake up a writer until he can make "significant" 1900 * progress. --DaveM 1901 */ 1902 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1903 wq = rcu_dereference(sk->sk_wq); 1904 if (wq_has_sleeper(wq)) 1905 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1906 POLLWRNORM | POLLWRBAND); 1907 1908 /* Should agree with poll, otherwise some programs break */ 1909 if (sock_writeable(sk)) 1910 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1911 } 1912 1913 rcu_read_unlock(); 1914} 1915 1916static void sock_def_destruct(struct sock *sk) 1917{ 1918 kfree(sk->sk_protinfo); 1919} 1920 1921void sk_send_sigurg(struct sock *sk) 1922{ 1923 if (sk->sk_socket && sk->sk_socket->file) 1924 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1925 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1926} 1927EXPORT_SYMBOL(sk_send_sigurg); 1928 1929void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1930 unsigned long expires) 1931{ 1932 if (!mod_timer(timer, expires)) 1933 sock_hold(sk); 1934} 1935EXPORT_SYMBOL(sk_reset_timer); 1936 1937void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1938{ 1939 if (timer_pending(timer) && del_timer(timer)) 1940 __sock_put(sk); 1941} 1942EXPORT_SYMBOL(sk_stop_timer); 1943 1944void sock_init_data(struct socket *sock, struct sock *sk) 1945{ 1946 skb_queue_head_init(&sk->sk_receive_queue); 1947 skb_queue_head_init(&sk->sk_write_queue); 1948 skb_queue_head_init(&sk->sk_error_queue); 1949#ifdef CONFIG_NET_DMA 1950 skb_queue_head_init(&sk->sk_async_wait_queue); 1951#endif 1952 1953 sk->sk_send_head = NULL; 1954 1955 init_timer(&sk->sk_timer); 1956 1957 sk->sk_allocation = GFP_KERNEL; 1958 sk->sk_rcvbuf = sysctl_rmem_default; 1959 sk->sk_sndbuf = sysctl_wmem_default; 1960 sk->sk_state = TCP_CLOSE; 1961 sk_set_socket(sk, sock); 1962 1963 sock_set_flag(sk, SOCK_ZAPPED); 1964 1965 if (sock) { 1966 sk->sk_type = sock->type; 1967 sk->sk_wq = sock->wq; 1968 sock->sk = sk; 1969 } else 1970 sk->sk_wq = NULL; 1971 1972 spin_lock_init(&sk->sk_dst_lock); 1973 rwlock_init(&sk->sk_callback_lock); 1974 lockdep_set_class_and_name(&sk->sk_callback_lock, 1975 af_callback_keys + sk->sk_family, 1976 af_family_clock_key_strings[sk->sk_family]); 1977 1978 sk->sk_state_change = sock_def_wakeup; 1979 sk->sk_data_ready = sock_def_readable; 1980 sk->sk_write_space = sock_def_write_space; 1981 sk->sk_error_report = sock_def_error_report; 1982 sk->sk_destruct = sock_def_destruct; 1983 1984 sk->sk_sndmsg_page = NULL; 1985 sk->sk_sndmsg_off = 0; 1986 1987 sk->sk_peer_pid = NULL; 1988 sk->sk_peer_cred = NULL; 1989 sk->sk_write_pending = 0; 1990 sk->sk_rcvlowat = 1; 1991 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1992 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1993 1994 sk->sk_stamp = ktime_set(-1L, 0); 1995 1996 /* 1997 * Before updating sk_refcnt, we must commit prior changes to memory 1998 * (Documentation/RCU/rculist_nulls.txt for details) 1999 */ 2000 smp_wmb(); 2001 atomic_set(&sk->sk_refcnt, 1); 2002 atomic_set(&sk->sk_drops, 0); 2003} 2004EXPORT_SYMBOL(sock_init_data); 2005 2006void lock_sock_nested(struct sock *sk, int subclass) 2007{ 2008 might_sleep(); 2009 spin_lock_bh(&sk->sk_lock.slock); 2010 if (sk->sk_lock.owned) 2011 __lock_sock(sk); 2012 sk->sk_lock.owned = 1; 2013 spin_unlock(&sk->sk_lock.slock); 2014 /* 2015 * The sk_lock has mutex_lock() semantics here: 2016 */ 2017 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2018 local_bh_enable(); 2019} 2020EXPORT_SYMBOL(lock_sock_nested); 2021 2022void release_sock(struct sock *sk) 2023{ 2024 /* 2025 * The sk_lock has mutex_unlock() semantics: 2026 */ 2027 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2028 2029 spin_lock_bh(&sk->sk_lock.slock); 2030 if (sk->sk_backlog.tail) 2031 __release_sock(sk); 2032 sk->sk_lock.owned = 0; 2033 if (waitqueue_active(&sk->sk_lock.wq)) 2034 wake_up(&sk->sk_lock.wq); 2035 spin_unlock_bh(&sk->sk_lock.slock); 2036} 2037EXPORT_SYMBOL(release_sock); 2038 2039/** 2040 * lock_sock_fast - fast version of lock_sock 2041 * @sk: socket 2042 * 2043 * This version should be used for very small section, where process wont block 2044 * return false if fast path is taken 2045 * sk_lock.slock locked, owned = 0, BH disabled 2046 * return true if slow path is taken 2047 * sk_lock.slock unlocked, owned = 1, BH enabled 2048 */ 2049bool lock_sock_fast(struct sock *sk) 2050{ 2051 might_sleep(); 2052 spin_lock_bh(&sk->sk_lock.slock); 2053 2054 if (!sk->sk_lock.owned) 2055 /* 2056 * Note : We must disable BH 2057 */ 2058 return false; 2059 2060 __lock_sock(sk); 2061 sk->sk_lock.owned = 1; 2062 spin_unlock(&sk->sk_lock.slock); 2063 /* 2064 * The sk_lock has mutex_lock() semantics here: 2065 */ 2066 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2067 local_bh_enable(); 2068 return true; 2069} 2070EXPORT_SYMBOL(lock_sock_fast); 2071 2072int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2073{ 2074 struct timeval tv; 2075 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2076 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2077 tv = ktime_to_timeval(sk->sk_stamp); 2078 if (tv.tv_sec == -1) 2079 return -ENOENT; 2080 if (tv.tv_sec == 0) { 2081 sk->sk_stamp = ktime_get_real(); 2082 tv = ktime_to_timeval(sk->sk_stamp); 2083 } 2084 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2085} 2086EXPORT_SYMBOL(sock_get_timestamp); 2087 2088int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2089{ 2090 struct timespec ts; 2091 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2092 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2093 ts = ktime_to_timespec(sk->sk_stamp); 2094 if (ts.tv_sec == -1) 2095 return -ENOENT; 2096 if (ts.tv_sec == 0) { 2097 sk->sk_stamp = ktime_get_real(); 2098 ts = ktime_to_timespec(sk->sk_stamp); 2099 } 2100 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2101} 2102EXPORT_SYMBOL(sock_get_timestampns); 2103 2104void sock_enable_timestamp(struct sock *sk, int flag) 2105{ 2106 if (!sock_flag(sk, flag)) { 2107 sock_set_flag(sk, flag); 2108 /* 2109 * we just set one of the two flags which require net 2110 * time stamping, but time stamping might have been on 2111 * already because of the other one 2112 */ 2113 if (!sock_flag(sk, 2114 flag == SOCK_TIMESTAMP ? 2115 SOCK_TIMESTAMPING_RX_SOFTWARE : 2116 SOCK_TIMESTAMP)) 2117 net_enable_timestamp(); 2118 } 2119} 2120 2121/* 2122 * Get a socket option on an socket. 2123 * 2124 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2125 * asynchronous errors should be reported by getsockopt. We assume 2126 * this means if you specify SO_ERROR (otherwise whats the point of it). 2127 */ 2128int sock_common_getsockopt(struct socket *sock, int level, int optname, 2129 char __user *optval, int __user *optlen) 2130{ 2131 struct sock *sk = sock->sk; 2132 2133 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2134} 2135EXPORT_SYMBOL(sock_common_getsockopt); 2136 2137#ifdef CONFIG_COMPAT 2138int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2139 char __user *optval, int __user *optlen) 2140{ 2141 struct sock *sk = sock->sk; 2142 2143 if (sk->sk_prot->compat_getsockopt != NULL) 2144 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2145 optval, optlen); 2146 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2147} 2148EXPORT_SYMBOL(compat_sock_common_getsockopt); 2149#endif 2150 2151int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2152 struct msghdr *msg, size_t size, int flags) 2153{ 2154 struct sock *sk = sock->sk; 2155 int addr_len = 0; 2156 int err; 2157 2158 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2159 flags & ~MSG_DONTWAIT, &addr_len); 2160 if (err >= 0) 2161 msg->msg_namelen = addr_len; 2162 return err; 2163} 2164EXPORT_SYMBOL(sock_common_recvmsg); 2165 2166/* 2167 * Set socket options on an inet socket. 2168 */ 2169int sock_common_setsockopt(struct socket *sock, int level, int optname, 2170 char __user *optval, unsigned int optlen) 2171{ 2172 struct sock *sk = sock->sk; 2173 2174 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2175} 2176EXPORT_SYMBOL(sock_common_setsockopt); 2177 2178#ifdef CONFIG_COMPAT 2179int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2180 char __user *optval, unsigned int optlen) 2181{ 2182 struct sock *sk = sock->sk; 2183 2184 if (sk->sk_prot->compat_setsockopt != NULL) 2185 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2186 optval, optlen); 2187 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2188} 2189EXPORT_SYMBOL(compat_sock_common_setsockopt); 2190#endif 2191 2192void sk_common_release(struct sock *sk) 2193{ 2194 if (sk->sk_prot->destroy) 2195 sk->sk_prot->destroy(sk); 2196 2197 /* 2198 * Observation: when sock_common_release is called, processes have 2199 * no access to socket. But net still has. 2200 * Step one, detach it from networking: 2201 * 2202 * A. Remove from hash tables. 2203 */ 2204 2205 sk->sk_prot->unhash(sk); 2206 2207 /* 2208 * In this point socket cannot receive new packets, but it is possible 2209 * that some packets are in flight because some CPU runs receiver and 2210 * did hash table lookup before we unhashed socket. They will achieve 2211 * receive queue and will be purged by socket destructor. 2212 * 2213 * Also we still have packets pending on receive queue and probably, 2214 * our own packets waiting in device queues. sock_destroy will drain 2215 * receive queue, but transmitted packets will delay socket destruction 2216 * until the last reference will be released. 2217 */ 2218 2219 sock_orphan(sk); 2220 2221 xfrm_sk_free_policy(sk); 2222 2223 sk_refcnt_debug_release(sk); 2224 sock_put(sk); 2225} 2226EXPORT_SYMBOL(sk_common_release); 2227 2228static DEFINE_RWLOCK(proto_list_lock); 2229static LIST_HEAD(proto_list); 2230 2231#ifdef CONFIG_PROC_FS 2232#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2233struct prot_inuse { 2234 int val[PROTO_INUSE_NR]; 2235}; 2236 2237static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2238 2239#ifdef CONFIG_NET_NS 2240void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2241{ 2242 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2243} 2244EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2245 2246int sock_prot_inuse_get(struct net *net, struct proto *prot) 2247{ 2248 int cpu, idx = prot->inuse_idx; 2249 int res = 0; 2250 2251 for_each_possible_cpu(cpu) 2252 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2253 2254 return res >= 0 ? res : 0; 2255} 2256EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2257 2258static int __net_init sock_inuse_init_net(struct net *net) 2259{ 2260 net->core.inuse = alloc_percpu(struct prot_inuse); 2261 return net->core.inuse ? 0 : -ENOMEM; 2262} 2263 2264static void __net_exit sock_inuse_exit_net(struct net *net) 2265{ 2266 free_percpu(net->core.inuse); 2267} 2268 2269static struct pernet_operations net_inuse_ops = { 2270 .init = sock_inuse_init_net, 2271 .exit = sock_inuse_exit_net, 2272}; 2273 2274static __init int net_inuse_init(void) 2275{ 2276 if (register_pernet_subsys(&net_inuse_ops)) 2277 panic("Cannot initialize net inuse counters"); 2278 2279 return 0; 2280} 2281 2282core_initcall(net_inuse_init); 2283#else 2284static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2285 2286void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2287{ 2288 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2289} 2290EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2291 2292int sock_prot_inuse_get(struct net *net, struct proto *prot) 2293{ 2294 int cpu, idx = prot->inuse_idx; 2295 int res = 0; 2296 2297 for_each_possible_cpu(cpu) 2298 res += per_cpu(prot_inuse, cpu).val[idx]; 2299 2300 return res >= 0 ? res : 0; 2301} 2302EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2303#endif 2304 2305static void assign_proto_idx(struct proto *prot) 2306{ 2307 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2308 2309 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2310 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2311 return; 2312 } 2313 2314 set_bit(prot->inuse_idx, proto_inuse_idx); 2315} 2316 2317static void release_proto_idx(struct proto *prot) 2318{ 2319 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2320 clear_bit(prot->inuse_idx, proto_inuse_idx); 2321} 2322#else 2323static inline void assign_proto_idx(struct proto *prot) 2324{ 2325} 2326 2327static inline void release_proto_idx(struct proto *prot) 2328{ 2329} 2330#endif 2331 2332int proto_register(struct proto *prot, int alloc_slab) 2333{ 2334 if (alloc_slab) { 2335 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2336 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2337 NULL); 2338 2339 if (prot->slab == NULL) { 2340 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2341 prot->name); 2342 goto out; 2343 } 2344 2345 if (prot->rsk_prot != NULL) { 2346 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2347 if (prot->rsk_prot->slab_name == NULL) 2348 goto out_free_sock_slab; 2349 2350 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2351 prot->rsk_prot->obj_size, 0, 2352 SLAB_HWCACHE_ALIGN, NULL); 2353 2354 if (prot->rsk_prot->slab == NULL) { 2355 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2356 prot->name); 2357 goto out_free_request_sock_slab_name; 2358 } 2359 } 2360 2361 if (prot->twsk_prot != NULL) { 2362 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2363 2364 if (prot->twsk_prot->twsk_slab_name == NULL) 2365 goto out_free_request_sock_slab; 2366 2367 prot->twsk_prot->twsk_slab = 2368 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2369 prot->twsk_prot->twsk_obj_size, 2370 0, 2371 SLAB_HWCACHE_ALIGN | 2372 prot->slab_flags, 2373 NULL); 2374 if (prot->twsk_prot->twsk_slab == NULL) 2375 goto out_free_timewait_sock_slab_name; 2376 } 2377 } 2378 2379 write_lock(&proto_list_lock); 2380 list_add(&prot->node, &proto_list); 2381 assign_proto_idx(prot); 2382 write_unlock(&proto_list_lock); 2383 return 0; 2384 2385out_free_timewait_sock_slab_name: 2386 kfree(prot->twsk_prot->twsk_slab_name); 2387out_free_request_sock_slab: 2388 if (prot->rsk_prot && prot->rsk_prot->slab) { 2389 kmem_cache_destroy(prot->rsk_prot->slab); 2390 prot->rsk_prot->slab = NULL; 2391 } 2392out_free_request_sock_slab_name: 2393 if (prot->rsk_prot) 2394 kfree(prot->rsk_prot->slab_name); 2395out_free_sock_slab: 2396 kmem_cache_destroy(prot->slab); 2397 prot->slab = NULL; 2398out: 2399 return -ENOBUFS; 2400} 2401EXPORT_SYMBOL(proto_register); 2402 2403void proto_unregister(struct proto *prot) 2404{ 2405 write_lock(&proto_list_lock); 2406 release_proto_idx(prot); 2407 list_del(&prot->node); 2408 write_unlock(&proto_list_lock); 2409 2410 if (prot->slab != NULL) { 2411 kmem_cache_destroy(prot->slab); 2412 prot->slab = NULL; 2413 } 2414 2415 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2416 kmem_cache_destroy(prot->rsk_prot->slab); 2417 kfree(prot->rsk_prot->slab_name); 2418 prot->rsk_prot->slab = NULL; 2419 } 2420 2421 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2422 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2423 kfree(prot->twsk_prot->twsk_slab_name); 2424 prot->twsk_prot->twsk_slab = NULL; 2425 } 2426} 2427EXPORT_SYMBOL(proto_unregister); 2428 2429#ifdef CONFIG_PROC_FS 2430static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2431 __acquires(proto_list_lock) 2432{ 2433 read_lock(&proto_list_lock); 2434 return seq_list_start_head(&proto_list, *pos); 2435} 2436 2437static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2438{ 2439 return seq_list_next(v, &proto_list, pos); 2440} 2441 2442static void proto_seq_stop(struct seq_file *seq, void *v) 2443 __releases(proto_list_lock) 2444{ 2445 read_unlock(&proto_list_lock); 2446} 2447 2448static char proto_method_implemented(const void *method) 2449{ 2450 return method == NULL ? 'n' : 'y'; 2451} 2452 2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2454{ 2455 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2457 proto->name, 2458 proto->obj_size, 2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2460 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2462 proto->max_header, 2463 proto->slab == NULL ? "no" : "yes", 2464 module_name(proto->owner), 2465 proto_method_implemented(proto->close), 2466 proto_method_implemented(proto->connect), 2467 proto_method_implemented(proto->disconnect), 2468 proto_method_implemented(proto->accept), 2469 proto_method_implemented(proto->ioctl), 2470 proto_method_implemented(proto->init), 2471 proto_method_implemented(proto->destroy), 2472 proto_method_implemented(proto->shutdown), 2473 proto_method_implemented(proto->setsockopt), 2474 proto_method_implemented(proto->getsockopt), 2475 proto_method_implemented(proto->sendmsg), 2476 proto_method_implemented(proto->recvmsg), 2477 proto_method_implemented(proto->sendpage), 2478 proto_method_implemented(proto->bind), 2479 proto_method_implemented(proto->backlog_rcv), 2480 proto_method_implemented(proto->hash), 2481 proto_method_implemented(proto->unhash), 2482 proto_method_implemented(proto->get_port), 2483 proto_method_implemented(proto->enter_memory_pressure)); 2484} 2485 2486static int proto_seq_show(struct seq_file *seq, void *v) 2487{ 2488 if (v == &proto_list) 2489 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2490 "protocol", 2491 "size", 2492 "sockets", 2493 "memory", 2494 "press", 2495 "maxhdr", 2496 "slab", 2497 "module", 2498 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2499 else 2500 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2501 return 0; 2502} 2503 2504static const struct seq_operations proto_seq_ops = { 2505 .start = proto_seq_start, 2506 .next = proto_seq_next, 2507 .stop = proto_seq_stop, 2508 .show = proto_seq_show, 2509}; 2510 2511static int proto_seq_open(struct inode *inode, struct file *file) 2512{ 2513 return seq_open_net(inode, file, &proto_seq_ops, 2514 sizeof(struct seq_net_private)); 2515} 2516 2517static const struct file_operations proto_seq_fops = { 2518 .owner = THIS_MODULE, 2519 .open = proto_seq_open, 2520 .read = seq_read, 2521 .llseek = seq_lseek, 2522 .release = seq_release_net, 2523}; 2524 2525static __net_init int proto_init_net(struct net *net) 2526{ 2527 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2528 return -ENOMEM; 2529 2530 return 0; 2531} 2532 2533static __net_exit void proto_exit_net(struct net *net) 2534{ 2535 proc_net_remove(net, "protocols"); 2536} 2537 2538 2539static __net_initdata struct pernet_operations proto_net_ops = { 2540 .init = proto_init_net, 2541 .exit = proto_exit_net, 2542}; 2543 2544static int __init proto_init(void) 2545{ 2546 return register_pernet_subsys(&proto_net_ops); 2547} 2548 2549subsys_initcall(proto_init); 2550 2551#endif /* PROC_FS */ 2552