sock.c revision 68835aba4d9b74e2f94106d13b6a4bddc447c4c8
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113#include <linux/user_namespace.h> 114 115#include <asm/uaccess.h> 116#include <asm/system.h> 117 118#include <linux/netdevice.h> 119#include <net/protocol.h> 120#include <linux/skbuff.h> 121#include <net/net_namespace.h> 122#include <net/request_sock.h> 123#include <net/sock.h> 124#include <linux/net_tstamp.h> 125#include <net/xfrm.h> 126#include <linux/ipsec.h> 127#include <net/cls_cgroup.h> 128 129#include <linux/filter.h> 130 131#ifdef CONFIG_INET 132#include <net/tcp.h> 133#endif 134 135/* 136 * Each address family might have different locking rules, so we have 137 * one slock key per address family: 138 */ 139static struct lock_class_key af_family_keys[AF_MAX]; 140static struct lock_class_key af_family_slock_keys[AF_MAX]; 141 142/* 143 * Make lock validator output more readable. (we pre-construct these 144 * strings build-time, so that runtime initialization of socket 145 * locks is fast): 146 */ 147static const char *const af_family_key_strings[AF_MAX+1] = { 148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , 161 "sk_lock-AF_MAX" 162}; 163static const char *const af_family_slock_key_strings[AF_MAX+1] = { 164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 173 "slock-27" , "slock-28" , "slock-AF_CAN" , 174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 176 "slock-AF_IEEE802154", "slock-AF_CAIF" , 177 "slock-AF_MAX" 178}; 179static const char *const af_family_clock_key_strings[AF_MAX+1] = { 180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 189 "clock-27" , "clock-28" , "clock-AF_CAN" , 190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 192 "clock-AF_IEEE802154", "clock-AF_CAIF" , 193 "clock-AF_MAX" 194}; 195 196/* 197 * sk_callback_lock locking rules are per-address-family, 198 * so split the lock classes by using a per-AF key: 199 */ 200static struct lock_class_key af_callback_keys[AF_MAX]; 201 202/* Take into consideration the size of the struct sk_buff overhead in the 203 * determination of these values, since that is non-constant across 204 * platforms. This makes socket queueing behavior and performance 205 * not depend upon such differences. 206 */ 207#define _SK_MEM_PACKETS 256 208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 211 212/* Run time adjustable parameters. */ 213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 217 218/* Maximal space eaten by iovec or ancilliary data plus some space */ 219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 220EXPORT_SYMBOL(sysctl_optmem_max); 221 222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 223int net_cls_subsys_id = -1; 224EXPORT_SYMBOL_GPL(net_cls_subsys_id); 225#endif 226 227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 228{ 229 struct timeval tv; 230 231 if (optlen < sizeof(tv)) 232 return -EINVAL; 233 if (copy_from_user(&tv, optval, sizeof(tv))) 234 return -EFAULT; 235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 236 return -EDOM; 237 238 if (tv.tv_sec < 0) { 239 static int warned __read_mostly; 240 241 *timeo_p = 0; 242 if (warned < 10 && net_ratelimit()) { 243 warned++; 244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 245 "tries to set negative timeout\n", 246 current->comm, task_pid_nr(current)); 247 } 248 return 0; 249 } 250 *timeo_p = MAX_SCHEDULE_TIMEOUT; 251 if (tv.tv_sec == 0 && tv.tv_usec == 0) 252 return 0; 253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 255 return 0; 256} 257 258static void sock_warn_obsolete_bsdism(const char *name) 259{ 260 static int warned; 261 static char warncomm[TASK_COMM_LEN]; 262 if (strcmp(warncomm, current->comm) && warned < 5) { 263 strcpy(warncomm, current->comm); 264 printk(KERN_WARNING "process `%s' is using obsolete " 265 "%s SO_BSDCOMPAT\n", warncomm, name); 266 warned++; 267 } 268} 269 270static void sock_disable_timestamp(struct sock *sk, int flag) 271{ 272 if (sock_flag(sk, flag)) { 273 sock_reset_flag(sk, flag); 274 if (!sock_flag(sk, SOCK_TIMESTAMP) && 275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { 276 net_disable_timestamp(); 277 } 278 } 279} 280 281 282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 283{ 284 int err; 285 int skb_len; 286 unsigned long flags; 287 struct sk_buff_head *list = &sk->sk_receive_queue; 288 289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 290 number of warnings when compiling with -W --ANK 291 */ 292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 293 (unsigned)sk->sk_rcvbuf) { 294 atomic_inc(&sk->sk_drops); 295 return -ENOMEM; 296 } 297 298 err = sk_filter(sk, skb); 299 if (err) 300 return err; 301 302 if (!sk_rmem_schedule(sk, skb->truesize)) { 303 atomic_inc(&sk->sk_drops); 304 return -ENOBUFS; 305 } 306 307 skb->dev = NULL; 308 skb_set_owner_r(skb, sk); 309 310 /* Cache the SKB length before we tack it onto the receive 311 * queue. Once it is added it no longer belongs to us and 312 * may be freed by other threads of control pulling packets 313 * from the queue. 314 */ 315 skb_len = skb->len; 316 317 /* we escape from rcu protected region, make sure we dont leak 318 * a norefcounted dst 319 */ 320 skb_dst_force(skb); 321 322 spin_lock_irqsave(&list->lock, flags); 323 skb->dropcount = atomic_read(&sk->sk_drops); 324 __skb_queue_tail(list, skb); 325 spin_unlock_irqrestore(&list->lock, flags); 326 327 if (!sock_flag(sk, SOCK_DEAD)) 328 sk->sk_data_ready(sk, skb_len); 329 return 0; 330} 331EXPORT_SYMBOL(sock_queue_rcv_skb); 332 333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 334{ 335 int rc = NET_RX_SUCCESS; 336 337 if (sk_filter(sk, skb)) 338 goto discard_and_relse; 339 340 skb->dev = NULL; 341 342 if (sk_rcvqueues_full(sk, skb)) { 343 atomic_inc(&sk->sk_drops); 344 goto discard_and_relse; 345 } 346 if (nested) 347 bh_lock_sock_nested(sk); 348 else 349 bh_lock_sock(sk); 350 if (!sock_owned_by_user(sk)) { 351 /* 352 * trylock + unlock semantics: 353 */ 354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 355 356 rc = sk_backlog_rcv(sk, skb); 357 358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 359 } else if (sk_add_backlog(sk, skb)) { 360 bh_unlock_sock(sk); 361 atomic_inc(&sk->sk_drops); 362 goto discard_and_relse; 363 } 364 365 bh_unlock_sock(sk); 366out: 367 sock_put(sk); 368 return rc; 369discard_and_relse: 370 kfree_skb(skb); 371 goto out; 372} 373EXPORT_SYMBOL(sk_receive_skb); 374 375void sk_reset_txq(struct sock *sk) 376{ 377 sk_tx_queue_clear(sk); 378} 379EXPORT_SYMBOL(sk_reset_txq); 380 381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 382{ 383 struct dst_entry *dst = __sk_dst_get(sk); 384 385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 386 sk_tx_queue_clear(sk); 387 rcu_assign_pointer(sk->sk_dst_cache, NULL); 388 dst_release(dst); 389 return NULL; 390 } 391 392 return dst; 393} 394EXPORT_SYMBOL(__sk_dst_check); 395 396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 397{ 398 struct dst_entry *dst = sk_dst_get(sk); 399 400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 401 sk_dst_reset(sk); 402 dst_release(dst); 403 return NULL; 404 } 405 406 return dst; 407} 408EXPORT_SYMBOL(sk_dst_check); 409 410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 411{ 412 int ret = -ENOPROTOOPT; 413#ifdef CONFIG_NETDEVICES 414 struct net *net = sock_net(sk); 415 char devname[IFNAMSIZ]; 416 int index; 417 418 /* Sorry... */ 419 ret = -EPERM; 420 if (!capable(CAP_NET_RAW)) 421 goto out; 422 423 ret = -EINVAL; 424 if (optlen < 0) 425 goto out; 426 427 /* Bind this socket to a particular device like "eth0", 428 * as specified in the passed interface name. If the 429 * name is "" or the option length is zero the socket 430 * is not bound. 431 */ 432 if (optlen > IFNAMSIZ - 1) 433 optlen = IFNAMSIZ - 1; 434 memset(devname, 0, sizeof(devname)); 435 436 ret = -EFAULT; 437 if (copy_from_user(devname, optval, optlen)) 438 goto out; 439 440 index = 0; 441 if (devname[0] != '\0') { 442 struct net_device *dev; 443 444 rcu_read_lock(); 445 dev = dev_get_by_name_rcu(net, devname); 446 if (dev) 447 index = dev->ifindex; 448 rcu_read_unlock(); 449 ret = -ENODEV; 450 if (!dev) 451 goto out; 452 } 453 454 lock_sock(sk); 455 sk->sk_bound_dev_if = index; 456 sk_dst_reset(sk); 457 release_sock(sk); 458 459 ret = 0; 460 461out: 462#endif 463 464 return ret; 465} 466 467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 468{ 469 if (valbool) 470 sock_set_flag(sk, bit); 471 else 472 sock_reset_flag(sk, bit); 473} 474 475/* 476 * This is meant for all protocols to use and covers goings on 477 * at the socket level. Everything here is generic. 478 */ 479 480int sock_setsockopt(struct socket *sock, int level, int optname, 481 char __user *optval, unsigned int optlen) 482{ 483 struct sock *sk = sock->sk; 484 int val; 485 int valbool; 486 struct linger ling; 487 int ret = 0; 488 489 /* 490 * Options without arguments 491 */ 492 493 if (optname == SO_BINDTODEVICE) 494 return sock_bindtodevice(sk, optval, optlen); 495 496 if (optlen < sizeof(int)) 497 return -EINVAL; 498 499 if (get_user(val, (int __user *)optval)) 500 return -EFAULT; 501 502 valbool = val ? 1 : 0; 503 504 lock_sock(sk); 505 506 switch (optname) { 507 case SO_DEBUG: 508 if (val && !capable(CAP_NET_ADMIN)) 509 ret = -EACCES; 510 else 511 sock_valbool_flag(sk, SOCK_DBG, valbool); 512 break; 513 case SO_REUSEADDR: 514 sk->sk_reuse = valbool; 515 break; 516 case SO_TYPE: 517 case SO_PROTOCOL: 518 case SO_DOMAIN: 519 case SO_ERROR: 520 ret = -ENOPROTOOPT; 521 break; 522 case SO_DONTROUTE: 523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 524 break; 525 case SO_BROADCAST: 526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 527 break; 528 case SO_SNDBUF: 529 /* Don't error on this BSD doesn't and if you think 530 about it this is right. Otherwise apps have to 531 play 'guess the biggest size' games. RCVBUF/SNDBUF 532 are treated in BSD as hints */ 533 534 if (val > sysctl_wmem_max) 535 val = sysctl_wmem_max; 536set_sndbuf: 537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 538 if ((val * 2) < SOCK_MIN_SNDBUF) 539 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 540 else 541 sk->sk_sndbuf = val * 2; 542 543 /* 544 * Wake up sending tasks if we 545 * upped the value. 546 */ 547 sk->sk_write_space(sk); 548 break; 549 550 case SO_SNDBUFFORCE: 551 if (!capable(CAP_NET_ADMIN)) { 552 ret = -EPERM; 553 break; 554 } 555 goto set_sndbuf; 556 557 case SO_RCVBUF: 558 /* Don't error on this BSD doesn't and if you think 559 about it this is right. Otherwise apps have to 560 play 'guess the biggest size' games. RCVBUF/SNDBUF 561 are treated in BSD as hints */ 562 563 if (val > sysctl_rmem_max) 564 val = sysctl_rmem_max; 565set_rcvbuf: 566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 567 /* 568 * We double it on the way in to account for 569 * "struct sk_buff" etc. overhead. Applications 570 * assume that the SO_RCVBUF setting they make will 571 * allow that much actual data to be received on that 572 * socket. 573 * 574 * Applications are unaware that "struct sk_buff" and 575 * other overheads allocate from the receive buffer 576 * during socket buffer allocation. 577 * 578 * And after considering the possible alternatives, 579 * returning the value we actually used in getsockopt 580 * is the most desirable behavior. 581 */ 582 if ((val * 2) < SOCK_MIN_RCVBUF) 583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 584 else 585 sk->sk_rcvbuf = val * 2; 586 break; 587 588 case SO_RCVBUFFORCE: 589 if (!capable(CAP_NET_ADMIN)) { 590 ret = -EPERM; 591 break; 592 } 593 goto set_rcvbuf; 594 595 case SO_KEEPALIVE: 596#ifdef CONFIG_INET 597 if (sk->sk_protocol == IPPROTO_TCP) 598 tcp_set_keepalive(sk, valbool); 599#endif 600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 601 break; 602 603 case SO_OOBINLINE: 604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 605 break; 606 607 case SO_NO_CHECK: 608 sk->sk_no_check = valbool; 609 break; 610 611 case SO_PRIORITY: 612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 613 sk->sk_priority = val; 614 else 615 ret = -EPERM; 616 break; 617 618 case SO_LINGER: 619 if (optlen < sizeof(ling)) { 620 ret = -EINVAL; /* 1003.1g */ 621 break; 622 } 623 if (copy_from_user(&ling, optval, sizeof(ling))) { 624 ret = -EFAULT; 625 break; 626 } 627 if (!ling.l_onoff) 628 sock_reset_flag(sk, SOCK_LINGER); 629 else { 630#if (BITS_PER_LONG == 32) 631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 633 else 634#endif 635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 636 sock_set_flag(sk, SOCK_LINGER); 637 } 638 break; 639 640 case SO_BSDCOMPAT: 641 sock_warn_obsolete_bsdism("setsockopt"); 642 break; 643 644 case SO_PASSCRED: 645 if (valbool) 646 set_bit(SOCK_PASSCRED, &sock->flags); 647 else 648 clear_bit(SOCK_PASSCRED, &sock->flags); 649 break; 650 651 case SO_TIMESTAMP: 652 case SO_TIMESTAMPNS: 653 if (valbool) { 654 if (optname == SO_TIMESTAMP) 655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 656 else 657 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 658 sock_set_flag(sk, SOCK_RCVTSTAMP); 659 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 660 } else { 661 sock_reset_flag(sk, SOCK_RCVTSTAMP); 662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 663 } 664 break; 665 666 case SO_TIMESTAMPING: 667 if (val & ~SOF_TIMESTAMPING_MASK) { 668 ret = -EINVAL; 669 break; 670 } 671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 672 val & SOF_TIMESTAMPING_TX_HARDWARE); 673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 674 val & SOF_TIMESTAMPING_TX_SOFTWARE); 675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 676 val & SOF_TIMESTAMPING_RX_HARDWARE); 677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 678 sock_enable_timestamp(sk, 679 SOCK_TIMESTAMPING_RX_SOFTWARE); 680 else 681 sock_disable_timestamp(sk, 682 SOCK_TIMESTAMPING_RX_SOFTWARE); 683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 684 val & SOF_TIMESTAMPING_SOFTWARE); 685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 686 val & SOF_TIMESTAMPING_SYS_HARDWARE); 687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 688 val & SOF_TIMESTAMPING_RAW_HARDWARE); 689 break; 690 691 case SO_RCVLOWAT: 692 if (val < 0) 693 val = INT_MAX; 694 sk->sk_rcvlowat = val ? : 1; 695 break; 696 697 case SO_RCVTIMEO: 698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 699 break; 700 701 case SO_SNDTIMEO: 702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 703 break; 704 705 case SO_ATTACH_FILTER: 706 ret = -EINVAL; 707 if (optlen == sizeof(struct sock_fprog)) { 708 struct sock_fprog fprog; 709 710 ret = -EFAULT; 711 if (copy_from_user(&fprog, optval, sizeof(fprog))) 712 break; 713 714 ret = sk_attach_filter(&fprog, sk); 715 } 716 break; 717 718 case SO_DETACH_FILTER: 719 ret = sk_detach_filter(sk); 720 break; 721 722 case SO_PASSSEC: 723 if (valbool) 724 set_bit(SOCK_PASSSEC, &sock->flags); 725 else 726 clear_bit(SOCK_PASSSEC, &sock->flags); 727 break; 728 case SO_MARK: 729 if (!capable(CAP_NET_ADMIN)) 730 ret = -EPERM; 731 else 732 sk->sk_mark = val; 733 break; 734 735 /* We implement the SO_SNDLOWAT etc to 736 not be settable (1003.1g 5.3) */ 737 case SO_RXQ_OVFL: 738 if (valbool) 739 sock_set_flag(sk, SOCK_RXQ_OVFL); 740 else 741 sock_reset_flag(sk, SOCK_RXQ_OVFL); 742 break; 743 default: 744 ret = -ENOPROTOOPT; 745 break; 746 } 747 release_sock(sk); 748 return ret; 749} 750EXPORT_SYMBOL(sock_setsockopt); 751 752 753void cred_to_ucred(struct pid *pid, const struct cred *cred, 754 struct ucred *ucred) 755{ 756 ucred->pid = pid_vnr(pid); 757 ucred->uid = ucred->gid = -1; 758 if (cred) { 759 struct user_namespace *current_ns = current_user_ns(); 760 761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 763 } 764} 765EXPORT_SYMBOL_GPL(cred_to_ucred); 766 767int sock_getsockopt(struct socket *sock, int level, int optname, 768 char __user *optval, int __user *optlen) 769{ 770 struct sock *sk = sock->sk; 771 772 union { 773 int val; 774 struct linger ling; 775 struct timeval tm; 776 } v; 777 778 int lv = sizeof(int); 779 int len; 780 781 if (get_user(len, optlen)) 782 return -EFAULT; 783 if (len < 0) 784 return -EINVAL; 785 786 memset(&v, 0, sizeof(v)); 787 788 switch (optname) { 789 case SO_DEBUG: 790 v.val = sock_flag(sk, SOCK_DBG); 791 break; 792 793 case SO_DONTROUTE: 794 v.val = sock_flag(sk, SOCK_LOCALROUTE); 795 break; 796 797 case SO_BROADCAST: 798 v.val = !!sock_flag(sk, SOCK_BROADCAST); 799 break; 800 801 case SO_SNDBUF: 802 v.val = sk->sk_sndbuf; 803 break; 804 805 case SO_RCVBUF: 806 v.val = sk->sk_rcvbuf; 807 break; 808 809 case SO_REUSEADDR: 810 v.val = sk->sk_reuse; 811 break; 812 813 case SO_KEEPALIVE: 814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 815 break; 816 817 case SO_TYPE: 818 v.val = sk->sk_type; 819 break; 820 821 case SO_PROTOCOL: 822 v.val = sk->sk_protocol; 823 break; 824 825 case SO_DOMAIN: 826 v.val = sk->sk_family; 827 break; 828 829 case SO_ERROR: 830 v.val = -sock_error(sk); 831 if (v.val == 0) 832 v.val = xchg(&sk->sk_err_soft, 0); 833 break; 834 835 case SO_OOBINLINE: 836 v.val = !!sock_flag(sk, SOCK_URGINLINE); 837 break; 838 839 case SO_NO_CHECK: 840 v.val = sk->sk_no_check; 841 break; 842 843 case SO_PRIORITY: 844 v.val = sk->sk_priority; 845 break; 846 847 case SO_LINGER: 848 lv = sizeof(v.ling); 849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 850 v.ling.l_linger = sk->sk_lingertime / HZ; 851 break; 852 853 case SO_BSDCOMPAT: 854 sock_warn_obsolete_bsdism("getsockopt"); 855 break; 856 857 case SO_TIMESTAMP: 858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 859 !sock_flag(sk, SOCK_RCVTSTAMPNS); 860 break; 861 862 case SO_TIMESTAMPNS: 863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 864 break; 865 866 case SO_TIMESTAMPING: 867 v.val = 0; 868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 877 v.val |= SOF_TIMESTAMPING_SOFTWARE; 878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 882 break; 883 884 case SO_RCVTIMEO: 885 lv = sizeof(struct timeval); 886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 887 v.tm.tv_sec = 0; 888 v.tm.tv_usec = 0; 889 } else { 890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 892 } 893 break; 894 895 case SO_SNDTIMEO: 896 lv = sizeof(struct timeval); 897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 898 v.tm.tv_sec = 0; 899 v.tm.tv_usec = 0; 900 } else { 901 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 903 } 904 break; 905 906 case SO_RCVLOWAT: 907 v.val = sk->sk_rcvlowat; 908 break; 909 910 case SO_SNDLOWAT: 911 v.val = 1; 912 break; 913 914 case SO_PASSCRED: 915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 916 break; 917 918 case SO_PEERCRED: 919 { 920 struct ucred peercred; 921 if (len > sizeof(peercred)) 922 len = sizeof(peercred); 923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 924 if (copy_to_user(optval, &peercred, len)) 925 return -EFAULT; 926 goto lenout; 927 } 928 929 case SO_PEERNAME: 930 { 931 char address[128]; 932 933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 934 return -ENOTCONN; 935 if (lv < len) 936 return -EINVAL; 937 if (copy_to_user(optval, address, len)) 938 return -EFAULT; 939 goto lenout; 940 } 941 942 /* Dubious BSD thing... Probably nobody even uses it, but 943 * the UNIX standard wants it for whatever reason... -DaveM 944 */ 945 case SO_ACCEPTCONN: 946 v.val = sk->sk_state == TCP_LISTEN; 947 break; 948 949 case SO_PASSSEC: 950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 951 break; 952 953 case SO_PEERSEC: 954 return security_socket_getpeersec_stream(sock, optval, optlen, len); 955 956 case SO_MARK: 957 v.val = sk->sk_mark; 958 break; 959 960 case SO_RXQ_OVFL: 961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 962 break; 963 964 default: 965 return -ENOPROTOOPT; 966 } 967 968 if (len > lv) 969 len = lv; 970 if (copy_to_user(optval, &v, len)) 971 return -EFAULT; 972lenout: 973 if (put_user(len, optlen)) 974 return -EFAULT; 975 return 0; 976} 977 978/* 979 * Initialize an sk_lock. 980 * 981 * (We also register the sk_lock with the lock validator.) 982 */ 983static inline void sock_lock_init(struct sock *sk) 984{ 985 sock_lock_init_class_and_name(sk, 986 af_family_slock_key_strings[sk->sk_family], 987 af_family_slock_keys + sk->sk_family, 988 af_family_key_strings[sk->sk_family], 989 af_family_keys + sk->sk_family); 990} 991 992/* 993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 994 * even temporarly, because of RCU lookups. sk_node should also be left as is. 995 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 996 */ 997static void sock_copy(struct sock *nsk, const struct sock *osk) 998{ 999#ifdef CONFIG_SECURITY_NETWORK 1000 void *sptr = nsk->sk_security; 1001#endif 1002 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1003 1004 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1006 1007#ifdef CONFIG_SECURITY_NETWORK 1008 nsk->sk_security = sptr; 1009 security_sk_clone(osk, nsk); 1010#endif 1011} 1012 1013static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1014 int family) 1015{ 1016 struct sock *sk; 1017 struct kmem_cache *slab; 1018 1019 slab = prot->slab; 1020 if (slab != NULL) { 1021 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1022 if (!sk) 1023 return sk; 1024 if (priority & __GFP_ZERO) { 1025 /* 1026 * caches using SLAB_DESTROY_BY_RCU should let 1027 * sk_node.next un-modified. Special care is taken 1028 * when initializing object to zero. 1029 */ 1030 if (offsetof(struct sock, sk_node.next) != 0) 1031 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1032 memset(&sk->sk_node.pprev, 0, 1033 prot->obj_size - offsetof(struct sock, 1034 sk_node.pprev)); 1035 } 1036 } 1037 else 1038 sk = kmalloc(prot->obj_size, priority); 1039 1040 if (sk != NULL) { 1041 kmemcheck_annotate_bitfield(sk, flags); 1042 1043 if (security_sk_alloc(sk, family, priority)) 1044 goto out_free; 1045 1046 if (!try_module_get(prot->owner)) 1047 goto out_free_sec; 1048 sk_tx_queue_clear(sk); 1049 } 1050 1051 return sk; 1052 1053out_free_sec: 1054 security_sk_free(sk); 1055out_free: 1056 if (slab != NULL) 1057 kmem_cache_free(slab, sk); 1058 else 1059 kfree(sk); 1060 return NULL; 1061} 1062 1063static void sk_prot_free(struct proto *prot, struct sock *sk) 1064{ 1065 struct kmem_cache *slab; 1066 struct module *owner; 1067 1068 owner = prot->owner; 1069 slab = prot->slab; 1070 1071 security_sk_free(sk); 1072 if (slab != NULL) 1073 kmem_cache_free(slab, sk); 1074 else 1075 kfree(sk); 1076 module_put(owner); 1077} 1078 1079#ifdef CONFIG_CGROUPS 1080void sock_update_classid(struct sock *sk) 1081{ 1082 u32 classid; 1083 1084 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1085 classid = task_cls_classid(current); 1086 rcu_read_unlock(); 1087 if (classid && classid != sk->sk_classid) 1088 sk->sk_classid = classid; 1089} 1090EXPORT_SYMBOL(sock_update_classid); 1091#endif 1092 1093/** 1094 * sk_alloc - All socket objects are allocated here 1095 * @net: the applicable net namespace 1096 * @family: protocol family 1097 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1098 * @prot: struct proto associated with this new sock instance 1099 */ 1100struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1101 struct proto *prot) 1102{ 1103 struct sock *sk; 1104 1105 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1106 if (sk) { 1107 sk->sk_family = family; 1108 /* 1109 * See comment in struct sock definition to understand 1110 * why we need sk_prot_creator -acme 1111 */ 1112 sk->sk_prot = sk->sk_prot_creator = prot; 1113 sock_lock_init(sk); 1114 sock_net_set(sk, get_net(net)); 1115 atomic_set(&sk->sk_wmem_alloc, 1); 1116 1117 sock_update_classid(sk); 1118 } 1119 1120 return sk; 1121} 1122EXPORT_SYMBOL(sk_alloc); 1123 1124static void __sk_free(struct sock *sk) 1125{ 1126 struct sk_filter *filter; 1127 1128 if (sk->sk_destruct) 1129 sk->sk_destruct(sk); 1130 1131 filter = rcu_dereference_check(sk->sk_filter, 1132 atomic_read(&sk->sk_wmem_alloc) == 0); 1133 if (filter) { 1134 sk_filter_uncharge(sk, filter); 1135 rcu_assign_pointer(sk->sk_filter, NULL); 1136 } 1137 1138 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1139 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); 1140 1141 if (atomic_read(&sk->sk_omem_alloc)) 1142 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1143 __func__, atomic_read(&sk->sk_omem_alloc)); 1144 1145 if (sk->sk_peer_cred) 1146 put_cred(sk->sk_peer_cred); 1147 put_pid(sk->sk_peer_pid); 1148 put_net(sock_net(sk)); 1149 sk_prot_free(sk->sk_prot_creator, sk); 1150} 1151 1152void sk_free(struct sock *sk) 1153{ 1154 /* 1155 * We substract one from sk_wmem_alloc and can know if 1156 * some packets are still in some tx queue. 1157 * If not null, sock_wfree() will call __sk_free(sk) later 1158 */ 1159 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1160 __sk_free(sk); 1161} 1162EXPORT_SYMBOL(sk_free); 1163 1164/* 1165 * Last sock_put should drop referrence to sk->sk_net. It has already 1166 * been dropped in sk_change_net. Taking referrence to stopping namespace 1167 * is not an option. 1168 * Take referrence to a socket to remove it from hash _alive_ and after that 1169 * destroy it in the context of init_net. 1170 */ 1171void sk_release_kernel(struct sock *sk) 1172{ 1173 if (sk == NULL || sk->sk_socket == NULL) 1174 return; 1175 1176 sock_hold(sk); 1177 sock_release(sk->sk_socket); 1178 release_net(sock_net(sk)); 1179 sock_net_set(sk, get_net(&init_net)); 1180 sock_put(sk); 1181} 1182EXPORT_SYMBOL(sk_release_kernel); 1183 1184struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1185{ 1186 struct sock *newsk; 1187 1188 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1189 if (newsk != NULL) { 1190 struct sk_filter *filter; 1191 1192 sock_copy(newsk, sk); 1193 1194 /* SANITY */ 1195 get_net(sock_net(newsk)); 1196 sk_node_init(&newsk->sk_node); 1197 sock_lock_init(newsk); 1198 bh_lock_sock(newsk); 1199 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1200 newsk->sk_backlog.len = 0; 1201 1202 atomic_set(&newsk->sk_rmem_alloc, 0); 1203 /* 1204 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1205 */ 1206 atomic_set(&newsk->sk_wmem_alloc, 1); 1207 atomic_set(&newsk->sk_omem_alloc, 0); 1208 skb_queue_head_init(&newsk->sk_receive_queue); 1209 skb_queue_head_init(&newsk->sk_write_queue); 1210#ifdef CONFIG_NET_DMA 1211 skb_queue_head_init(&newsk->sk_async_wait_queue); 1212#endif 1213 1214 spin_lock_init(&newsk->sk_dst_lock); 1215 rwlock_init(&newsk->sk_callback_lock); 1216 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1217 af_callback_keys + newsk->sk_family, 1218 af_family_clock_key_strings[newsk->sk_family]); 1219 1220 newsk->sk_dst_cache = NULL; 1221 newsk->sk_wmem_queued = 0; 1222 newsk->sk_forward_alloc = 0; 1223 newsk->sk_send_head = NULL; 1224 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1225 1226 sock_reset_flag(newsk, SOCK_DONE); 1227 skb_queue_head_init(&newsk->sk_error_queue); 1228 1229 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1230 if (filter != NULL) 1231 sk_filter_charge(newsk, filter); 1232 1233 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1234 /* It is still raw copy of parent, so invalidate 1235 * destructor and make plain sk_free() */ 1236 newsk->sk_destruct = NULL; 1237 sk_free(newsk); 1238 newsk = NULL; 1239 goto out; 1240 } 1241 1242 newsk->sk_err = 0; 1243 newsk->sk_priority = 0; 1244 /* 1245 * Before updating sk_refcnt, we must commit prior changes to memory 1246 * (Documentation/RCU/rculist_nulls.txt for details) 1247 */ 1248 smp_wmb(); 1249 atomic_set(&newsk->sk_refcnt, 2); 1250 1251 /* 1252 * Increment the counter in the same struct proto as the master 1253 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1254 * is the same as sk->sk_prot->socks, as this field was copied 1255 * with memcpy). 1256 * 1257 * This _changes_ the previous behaviour, where 1258 * tcp_create_openreq_child always was incrementing the 1259 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1260 * to be taken into account in all callers. -acme 1261 */ 1262 sk_refcnt_debug_inc(newsk); 1263 sk_set_socket(newsk, NULL); 1264 newsk->sk_wq = NULL; 1265 1266 if (newsk->sk_prot->sockets_allocated) 1267 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1268 1269 if (sock_flag(newsk, SOCK_TIMESTAMP) || 1270 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1271 net_enable_timestamp(); 1272 } 1273out: 1274 return newsk; 1275} 1276EXPORT_SYMBOL_GPL(sk_clone); 1277 1278void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1279{ 1280 __sk_dst_set(sk, dst); 1281 sk->sk_route_caps = dst->dev->features; 1282 if (sk->sk_route_caps & NETIF_F_GSO) 1283 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1284 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1285 if (sk_can_gso(sk)) { 1286 if (dst->header_len) { 1287 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1288 } else { 1289 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1290 sk->sk_gso_max_size = dst->dev->gso_max_size; 1291 } 1292 } 1293} 1294EXPORT_SYMBOL_GPL(sk_setup_caps); 1295 1296void __init sk_init(void) 1297{ 1298 if (totalram_pages <= 4096) { 1299 sysctl_wmem_max = 32767; 1300 sysctl_rmem_max = 32767; 1301 sysctl_wmem_default = 32767; 1302 sysctl_rmem_default = 32767; 1303 } else if (totalram_pages >= 131072) { 1304 sysctl_wmem_max = 131071; 1305 sysctl_rmem_max = 131071; 1306 } 1307} 1308 1309/* 1310 * Simple resource managers for sockets. 1311 */ 1312 1313 1314/* 1315 * Write buffer destructor automatically called from kfree_skb. 1316 */ 1317void sock_wfree(struct sk_buff *skb) 1318{ 1319 struct sock *sk = skb->sk; 1320 unsigned int len = skb->truesize; 1321 1322 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1323 /* 1324 * Keep a reference on sk_wmem_alloc, this will be released 1325 * after sk_write_space() call 1326 */ 1327 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1328 sk->sk_write_space(sk); 1329 len = 1; 1330 } 1331 /* 1332 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1333 * could not do because of in-flight packets 1334 */ 1335 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1336 __sk_free(sk); 1337} 1338EXPORT_SYMBOL(sock_wfree); 1339 1340/* 1341 * Read buffer destructor automatically called from kfree_skb. 1342 */ 1343void sock_rfree(struct sk_buff *skb) 1344{ 1345 struct sock *sk = skb->sk; 1346 unsigned int len = skb->truesize; 1347 1348 atomic_sub(len, &sk->sk_rmem_alloc); 1349 sk_mem_uncharge(sk, len); 1350} 1351EXPORT_SYMBOL(sock_rfree); 1352 1353 1354int sock_i_uid(struct sock *sk) 1355{ 1356 int uid; 1357 1358 read_lock_bh(&sk->sk_callback_lock); 1359 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1360 read_unlock_bh(&sk->sk_callback_lock); 1361 return uid; 1362} 1363EXPORT_SYMBOL(sock_i_uid); 1364 1365unsigned long sock_i_ino(struct sock *sk) 1366{ 1367 unsigned long ino; 1368 1369 read_lock_bh(&sk->sk_callback_lock); 1370 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1371 read_unlock_bh(&sk->sk_callback_lock); 1372 return ino; 1373} 1374EXPORT_SYMBOL(sock_i_ino); 1375 1376/* 1377 * Allocate a skb from the socket's send buffer. 1378 */ 1379struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1380 gfp_t priority) 1381{ 1382 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1383 struct sk_buff *skb = alloc_skb(size, priority); 1384 if (skb) { 1385 skb_set_owner_w(skb, sk); 1386 return skb; 1387 } 1388 } 1389 return NULL; 1390} 1391EXPORT_SYMBOL(sock_wmalloc); 1392 1393/* 1394 * Allocate a skb from the socket's receive buffer. 1395 */ 1396struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1397 gfp_t priority) 1398{ 1399 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1400 struct sk_buff *skb = alloc_skb(size, priority); 1401 if (skb) { 1402 skb_set_owner_r(skb, sk); 1403 return skb; 1404 } 1405 } 1406 return NULL; 1407} 1408 1409/* 1410 * Allocate a memory block from the socket's option memory buffer. 1411 */ 1412void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1413{ 1414 if ((unsigned)size <= sysctl_optmem_max && 1415 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1416 void *mem; 1417 /* First do the add, to avoid the race if kmalloc 1418 * might sleep. 1419 */ 1420 atomic_add(size, &sk->sk_omem_alloc); 1421 mem = kmalloc(size, priority); 1422 if (mem) 1423 return mem; 1424 atomic_sub(size, &sk->sk_omem_alloc); 1425 } 1426 return NULL; 1427} 1428EXPORT_SYMBOL(sock_kmalloc); 1429 1430/* 1431 * Free an option memory block. 1432 */ 1433void sock_kfree_s(struct sock *sk, void *mem, int size) 1434{ 1435 kfree(mem); 1436 atomic_sub(size, &sk->sk_omem_alloc); 1437} 1438EXPORT_SYMBOL(sock_kfree_s); 1439 1440/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1441 I think, these locks should be removed for datagram sockets. 1442 */ 1443static long sock_wait_for_wmem(struct sock *sk, long timeo) 1444{ 1445 DEFINE_WAIT(wait); 1446 1447 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1448 for (;;) { 1449 if (!timeo) 1450 break; 1451 if (signal_pending(current)) 1452 break; 1453 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1454 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1455 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1456 break; 1457 if (sk->sk_shutdown & SEND_SHUTDOWN) 1458 break; 1459 if (sk->sk_err) 1460 break; 1461 timeo = schedule_timeout(timeo); 1462 } 1463 finish_wait(sk_sleep(sk), &wait); 1464 return timeo; 1465} 1466 1467 1468/* 1469 * Generic send/receive buffer handlers 1470 */ 1471 1472struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1473 unsigned long data_len, int noblock, 1474 int *errcode) 1475{ 1476 struct sk_buff *skb; 1477 gfp_t gfp_mask; 1478 long timeo; 1479 int err; 1480 1481 gfp_mask = sk->sk_allocation; 1482 if (gfp_mask & __GFP_WAIT) 1483 gfp_mask |= __GFP_REPEAT; 1484 1485 timeo = sock_sndtimeo(sk, noblock); 1486 while (1) { 1487 err = sock_error(sk); 1488 if (err != 0) 1489 goto failure; 1490 1491 err = -EPIPE; 1492 if (sk->sk_shutdown & SEND_SHUTDOWN) 1493 goto failure; 1494 1495 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1496 skb = alloc_skb(header_len, gfp_mask); 1497 if (skb) { 1498 int npages; 1499 int i; 1500 1501 /* No pages, we're done... */ 1502 if (!data_len) 1503 break; 1504 1505 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1506 skb->truesize += data_len; 1507 skb_shinfo(skb)->nr_frags = npages; 1508 for (i = 0; i < npages; i++) { 1509 struct page *page; 1510 skb_frag_t *frag; 1511 1512 page = alloc_pages(sk->sk_allocation, 0); 1513 if (!page) { 1514 err = -ENOBUFS; 1515 skb_shinfo(skb)->nr_frags = i; 1516 kfree_skb(skb); 1517 goto failure; 1518 } 1519 1520 frag = &skb_shinfo(skb)->frags[i]; 1521 frag->page = page; 1522 frag->page_offset = 0; 1523 frag->size = (data_len >= PAGE_SIZE ? 1524 PAGE_SIZE : 1525 data_len); 1526 data_len -= PAGE_SIZE; 1527 } 1528 1529 /* Full success... */ 1530 break; 1531 } 1532 err = -ENOBUFS; 1533 goto failure; 1534 } 1535 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1536 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1537 err = -EAGAIN; 1538 if (!timeo) 1539 goto failure; 1540 if (signal_pending(current)) 1541 goto interrupted; 1542 timeo = sock_wait_for_wmem(sk, timeo); 1543 } 1544 1545 skb_set_owner_w(skb, sk); 1546 return skb; 1547 1548interrupted: 1549 err = sock_intr_errno(timeo); 1550failure: 1551 *errcode = err; 1552 return NULL; 1553} 1554EXPORT_SYMBOL(sock_alloc_send_pskb); 1555 1556struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1557 int noblock, int *errcode) 1558{ 1559 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1560} 1561EXPORT_SYMBOL(sock_alloc_send_skb); 1562 1563static void __lock_sock(struct sock *sk) 1564 __releases(&sk->sk_lock.slock) 1565 __acquires(&sk->sk_lock.slock) 1566{ 1567 DEFINE_WAIT(wait); 1568 1569 for (;;) { 1570 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1571 TASK_UNINTERRUPTIBLE); 1572 spin_unlock_bh(&sk->sk_lock.slock); 1573 schedule(); 1574 spin_lock_bh(&sk->sk_lock.slock); 1575 if (!sock_owned_by_user(sk)) 1576 break; 1577 } 1578 finish_wait(&sk->sk_lock.wq, &wait); 1579} 1580 1581static void __release_sock(struct sock *sk) 1582 __releases(&sk->sk_lock.slock) 1583 __acquires(&sk->sk_lock.slock) 1584{ 1585 struct sk_buff *skb = sk->sk_backlog.head; 1586 1587 do { 1588 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1589 bh_unlock_sock(sk); 1590 1591 do { 1592 struct sk_buff *next = skb->next; 1593 1594 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1595 skb->next = NULL; 1596 sk_backlog_rcv(sk, skb); 1597 1598 /* 1599 * We are in process context here with softirqs 1600 * disabled, use cond_resched_softirq() to preempt. 1601 * This is safe to do because we've taken the backlog 1602 * queue private: 1603 */ 1604 cond_resched_softirq(); 1605 1606 skb = next; 1607 } while (skb != NULL); 1608 1609 bh_lock_sock(sk); 1610 } while ((skb = sk->sk_backlog.head) != NULL); 1611 1612 /* 1613 * Doing the zeroing here guarantee we can not loop forever 1614 * while a wild producer attempts to flood us. 1615 */ 1616 sk->sk_backlog.len = 0; 1617} 1618 1619/** 1620 * sk_wait_data - wait for data to arrive at sk_receive_queue 1621 * @sk: sock to wait on 1622 * @timeo: for how long 1623 * 1624 * Now socket state including sk->sk_err is changed only under lock, 1625 * hence we may omit checks after joining wait queue. 1626 * We check receive queue before schedule() only as optimization; 1627 * it is very likely that release_sock() added new data. 1628 */ 1629int sk_wait_data(struct sock *sk, long *timeo) 1630{ 1631 int rc; 1632 DEFINE_WAIT(wait); 1633 1634 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1635 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1636 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1637 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1638 finish_wait(sk_sleep(sk), &wait); 1639 return rc; 1640} 1641EXPORT_SYMBOL(sk_wait_data); 1642 1643/** 1644 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1645 * @sk: socket 1646 * @size: memory size to allocate 1647 * @kind: allocation type 1648 * 1649 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1650 * rmem allocation. This function assumes that protocols which have 1651 * memory_pressure use sk_wmem_queued as write buffer accounting. 1652 */ 1653int __sk_mem_schedule(struct sock *sk, int size, int kind) 1654{ 1655 struct proto *prot = sk->sk_prot; 1656 int amt = sk_mem_pages(size); 1657 long allocated; 1658 1659 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1660 allocated = atomic_long_add_return(amt, prot->memory_allocated); 1661 1662 /* Under limit. */ 1663 if (allocated <= prot->sysctl_mem[0]) { 1664 if (prot->memory_pressure && *prot->memory_pressure) 1665 *prot->memory_pressure = 0; 1666 return 1; 1667 } 1668 1669 /* Under pressure. */ 1670 if (allocated > prot->sysctl_mem[1]) 1671 if (prot->enter_memory_pressure) 1672 prot->enter_memory_pressure(sk); 1673 1674 /* Over hard limit. */ 1675 if (allocated > prot->sysctl_mem[2]) 1676 goto suppress_allocation; 1677 1678 /* guarantee minimum buffer size under pressure */ 1679 if (kind == SK_MEM_RECV) { 1680 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1681 return 1; 1682 } else { /* SK_MEM_SEND */ 1683 if (sk->sk_type == SOCK_STREAM) { 1684 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1685 return 1; 1686 } else if (atomic_read(&sk->sk_wmem_alloc) < 1687 prot->sysctl_wmem[0]) 1688 return 1; 1689 } 1690 1691 if (prot->memory_pressure) { 1692 int alloc; 1693 1694 if (!*prot->memory_pressure) 1695 return 1; 1696 alloc = percpu_counter_read_positive(prot->sockets_allocated); 1697 if (prot->sysctl_mem[2] > alloc * 1698 sk_mem_pages(sk->sk_wmem_queued + 1699 atomic_read(&sk->sk_rmem_alloc) + 1700 sk->sk_forward_alloc)) 1701 return 1; 1702 } 1703 1704suppress_allocation: 1705 1706 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1707 sk_stream_moderate_sndbuf(sk); 1708 1709 /* Fail only if socket is _under_ its sndbuf. 1710 * In this case we cannot block, so that we have to fail. 1711 */ 1712 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1713 return 1; 1714 } 1715 1716 /* Alas. Undo changes. */ 1717 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1718 atomic_long_sub(amt, prot->memory_allocated); 1719 return 0; 1720} 1721EXPORT_SYMBOL(__sk_mem_schedule); 1722 1723/** 1724 * __sk_reclaim - reclaim memory_allocated 1725 * @sk: socket 1726 */ 1727void __sk_mem_reclaim(struct sock *sk) 1728{ 1729 struct proto *prot = sk->sk_prot; 1730 1731 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1732 prot->memory_allocated); 1733 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1734 1735 if (prot->memory_pressure && *prot->memory_pressure && 1736 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1737 *prot->memory_pressure = 0; 1738} 1739EXPORT_SYMBOL(__sk_mem_reclaim); 1740 1741 1742/* 1743 * Set of default routines for initialising struct proto_ops when 1744 * the protocol does not support a particular function. In certain 1745 * cases where it makes no sense for a protocol to have a "do nothing" 1746 * function, some default processing is provided. 1747 */ 1748 1749int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1750{ 1751 return -EOPNOTSUPP; 1752} 1753EXPORT_SYMBOL(sock_no_bind); 1754 1755int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1756 int len, int flags) 1757{ 1758 return -EOPNOTSUPP; 1759} 1760EXPORT_SYMBOL(sock_no_connect); 1761 1762int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1763{ 1764 return -EOPNOTSUPP; 1765} 1766EXPORT_SYMBOL(sock_no_socketpair); 1767 1768int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1769{ 1770 return -EOPNOTSUPP; 1771} 1772EXPORT_SYMBOL(sock_no_accept); 1773 1774int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1775 int *len, int peer) 1776{ 1777 return -EOPNOTSUPP; 1778} 1779EXPORT_SYMBOL(sock_no_getname); 1780 1781unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1782{ 1783 return 0; 1784} 1785EXPORT_SYMBOL(sock_no_poll); 1786 1787int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1788{ 1789 return -EOPNOTSUPP; 1790} 1791EXPORT_SYMBOL(sock_no_ioctl); 1792 1793int sock_no_listen(struct socket *sock, int backlog) 1794{ 1795 return -EOPNOTSUPP; 1796} 1797EXPORT_SYMBOL(sock_no_listen); 1798 1799int sock_no_shutdown(struct socket *sock, int how) 1800{ 1801 return -EOPNOTSUPP; 1802} 1803EXPORT_SYMBOL(sock_no_shutdown); 1804 1805int sock_no_setsockopt(struct socket *sock, int level, int optname, 1806 char __user *optval, unsigned int optlen) 1807{ 1808 return -EOPNOTSUPP; 1809} 1810EXPORT_SYMBOL(sock_no_setsockopt); 1811 1812int sock_no_getsockopt(struct socket *sock, int level, int optname, 1813 char __user *optval, int __user *optlen) 1814{ 1815 return -EOPNOTSUPP; 1816} 1817EXPORT_SYMBOL(sock_no_getsockopt); 1818 1819int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1820 size_t len) 1821{ 1822 return -EOPNOTSUPP; 1823} 1824EXPORT_SYMBOL(sock_no_sendmsg); 1825 1826int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1827 size_t len, int flags) 1828{ 1829 return -EOPNOTSUPP; 1830} 1831EXPORT_SYMBOL(sock_no_recvmsg); 1832 1833int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1834{ 1835 /* Mirror missing mmap method error code */ 1836 return -ENODEV; 1837} 1838EXPORT_SYMBOL(sock_no_mmap); 1839 1840ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1841{ 1842 ssize_t res; 1843 struct msghdr msg = {.msg_flags = flags}; 1844 struct kvec iov; 1845 char *kaddr = kmap(page); 1846 iov.iov_base = kaddr + offset; 1847 iov.iov_len = size; 1848 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1849 kunmap(page); 1850 return res; 1851} 1852EXPORT_SYMBOL(sock_no_sendpage); 1853 1854/* 1855 * Default Socket Callbacks 1856 */ 1857 1858static void sock_def_wakeup(struct sock *sk) 1859{ 1860 struct socket_wq *wq; 1861 1862 rcu_read_lock(); 1863 wq = rcu_dereference(sk->sk_wq); 1864 if (wq_has_sleeper(wq)) 1865 wake_up_interruptible_all(&wq->wait); 1866 rcu_read_unlock(); 1867} 1868 1869static void sock_def_error_report(struct sock *sk) 1870{ 1871 struct socket_wq *wq; 1872 1873 rcu_read_lock(); 1874 wq = rcu_dereference(sk->sk_wq); 1875 if (wq_has_sleeper(wq)) 1876 wake_up_interruptible_poll(&wq->wait, POLLERR); 1877 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1878 rcu_read_unlock(); 1879} 1880 1881static void sock_def_readable(struct sock *sk, int len) 1882{ 1883 struct socket_wq *wq; 1884 1885 rcu_read_lock(); 1886 wq = rcu_dereference(sk->sk_wq); 1887 if (wq_has_sleeper(wq)) 1888 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1889 POLLRDNORM | POLLRDBAND); 1890 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1891 rcu_read_unlock(); 1892} 1893 1894static void sock_def_write_space(struct sock *sk) 1895{ 1896 struct socket_wq *wq; 1897 1898 rcu_read_lock(); 1899 1900 /* Do not wake up a writer until he can make "significant" 1901 * progress. --DaveM 1902 */ 1903 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1904 wq = rcu_dereference(sk->sk_wq); 1905 if (wq_has_sleeper(wq)) 1906 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1907 POLLWRNORM | POLLWRBAND); 1908 1909 /* Should agree with poll, otherwise some programs break */ 1910 if (sock_writeable(sk)) 1911 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1912 } 1913 1914 rcu_read_unlock(); 1915} 1916 1917static void sock_def_destruct(struct sock *sk) 1918{ 1919 kfree(sk->sk_protinfo); 1920} 1921 1922void sk_send_sigurg(struct sock *sk) 1923{ 1924 if (sk->sk_socket && sk->sk_socket->file) 1925 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1926 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1927} 1928EXPORT_SYMBOL(sk_send_sigurg); 1929 1930void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1931 unsigned long expires) 1932{ 1933 if (!mod_timer(timer, expires)) 1934 sock_hold(sk); 1935} 1936EXPORT_SYMBOL(sk_reset_timer); 1937 1938void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1939{ 1940 if (timer_pending(timer) && del_timer(timer)) 1941 __sock_put(sk); 1942} 1943EXPORT_SYMBOL(sk_stop_timer); 1944 1945void sock_init_data(struct socket *sock, struct sock *sk) 1946{ 1947 skb_queue_head_init(&sk->sk_receive_queue); 1948 skb_queue_head_init(&sk->sk_write_queue); 1949 skb_queue_head_init(&sk->sk_error_queue); 1950#ifdef CONFIG_NET_DMA 1951 skb_queue_head_init(&sk->sk_async_wait_queue); 1952#endif 1953 1954 sk->sk_send_head = NULL; 1955 1956 init_timer(&sk->sk_timer); 1957 1958 sk->sk_allocation = GFP_KERNEL; 1959 sk->sk_rcvbuf = sysctl_rmem_default; 1960 sk->sk_sndbuf = sysctl_wmem_default; 1961 sk->sk_state = TCP_CLOSE; 1962 sk_set_socket(sk, sock); 1963 1964 sock_set_flag(sk, SOCK_ZAPPED); 1965 1966 if (sock) { 1967 sk->sk_type = sock->type; 1968 sk->sk_wq = sock->wq; 1969 sock->sk = sk; 1970 } else 1971 sk->sk_wq = NULL; 1972 1973 spin_lock_init(&sk->sk_dst_lock); 1974 rwlock_init(&sk->sk_callback_lock); 1975 lockdep_set_class_and_name(&sk->sk_callback_lock, 1976 af_callback_keys + sk->sk_family, 1977 af_family_clock_key_strings[sk->sk_family]); 1978 1979 sk->sk_state_change = sock_def_wakeup; 1980 sk->sk_data_ready = sock_def_readable; 1981 sk->sk_write_space = sock_def_write_space; 1982 sk->sk_error_report = sock_def_error_report; 1983 sk->sk_destruct = sock_def_destruct; 1984 1985 sk->sk_sndmsg_page = NULL; 1986 sk->sk_sndmsg_off = 0; 1987 1988 sk->sk_peer_pid = NULL; 1989 sk->sk_peer_cred = NULL; 1990 sk->sk_write_pending = 0; 1991 sk->sk_rcvlowat = 1; 1992 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1993 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1994 1995 sk->sk_stamp = ktime_set(-1L, 0); 1996 1997 /* 1998 * Before updating sk_refcnt, we must commit prior changes to memory 1999 * (Documentation/RCU/rculist_nulls.txt for details) 2000 */ 2001 smp_wmb(); 2002 atomic_set(&sk->sk_refcnt, 1); 2003 atomic_set(&sk->sk_drops, 0); 2004} 2005EXPORT_SYMBOL(sock_init_data); 2006 2007void lock_sock_nested(struct sock *sk, int subclass) 2008{ 2009 might_sleep(); 2010 spin_lock_bh(&sk->sk_lock.slock); 2011 if (sk->sk_lock.owned) 2012 __lock_sock(sk); 2013 sk->sk_lock.owned = 1; 2014 spin_unlock(&sk->sk_lock.slock); 2015 /* 2016 * The sk_lock has mutex_lock() semantics here: 2017 */ 2018 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2019 local_bh_enable(); 2020} 2021EXPORT_SYMBOL(lock_sock_nested); 2022 2023void release_sock(struct sock *sk) 2024{ 2025 /* 2026 * The sk_lock has mutex_unlock() semantics: 2027 */ 2028 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2029 2030 spin_lock_bh(&sk->sk_lock.slock); 2031 if (sk->sk_backlog.tail) 2032 __release_sock(sk); 2033 sk->sk_lock.owned = 0; 2034 if (waitqueue_active(&sk->sk_lock.wq)) 2035 wake_up(&sk->sk_lock.wq); 2036 spin_unlock_bh(&sk->sk_lock.slock); 2037} 2038EXPORT_SYMBOL(release_sock); 2039 2040/** 2041 * lock_sock_fast - fast version of lock_sock 2042 * @sk: socket 2043 * 2044 * This version should be used for very small section, where process wont block 2045 * return false if fast path is taken 2046 * sk_lock.slock locked, owned = 0, BH disabled 2047 * return true if slow path is taken 2048 * sk_lock.slock unlocked, owned = 1, BH enabled 2049 */ 2050bool lock_sock_fast(struct sock *sk) 2051{ 2052 might_sleep(); 2053 spin_lock_bh(&sk->sk_lock.slock); 2054 2055 if (!sk->sk_lock.owned) 2056 /* 2057 * Note : We must disable BH 2058 */ 2059 return false; 2060 2061 __lock_sock(sk); 2062 sk->sk_lock.owned = 1; 2063 spin_unlock(&sk->sk_lock.slock); 2064 /* 2065 * The sk_lock has mutex_lock() semantics here: 2066 */ 2067 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2068 local_bh_enable(); 2069 return true; 2070} 2071EXPORT_SYMBOL(lock_sock_fast); 2072 2073int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2074{ 2075 struct timeval tv; 2076 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2077 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2078 tv = ktime_to_timeval(sk->sk_stamp); 2079 if (tv.tv_sec == -1) 2080 return -ENOENT; 2081 if (tv.tv_sec == 0) { 2082 sk->sk_stamp = ktime_get_real(); 2083 tv = ktime_to_timeval(sk->sk_stamp); 2084 } 2085 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2086} 2087EXPORT_SYMBOL(sock_get_timestamp); 2088 2089int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2090{ 2091 struct timespec ts; 2092 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2093 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2094 ts = ktime_to_timespec(sk->sk_stamp); 2095 if (ts.tv_sec == -1) 2096 return -ENOENT; 2097 if (ts.tv_sec == 0) { 2098 sk->sk_stamp = ktime_get_real(); 2099 ts = ktime_to_timespec(sk->sk_stamp); 2100 } 2101 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2102} 2103EXPORT_SYMBOL(sock_get_timestampns); 2104 2105void sock_enable_timestamp(struct sock *sk, int flag) 2106{ 2107 if (!sock_flag(sk, flag)) { 2108 sock_set_flag(sk, flag); 2109 /* 2110 * we just set one of the two flags which require net 2111 * time stamping, but time stamping might have been on 2112 * already because of the other one 2113 */ 2114 if (!sock_flag(sk, 2115 flag == SOCK_TIMESTAMP ? 2116 SOCK_TIMESTAMPING_RX_SOFTWARE : 2117 SOCK_TIMESTAMP)) 2118 net_enable_timestamp(); 2119 } 2120} 2121 2122/* 2123 * Get a socket option on an socket. 2124 * 2125 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2126 * asynchronous errors should be reported by getsockopt. We assume 2127 * this means if you specify SO_ERROR (otherwise whats the point of it). 2128 */ 2129int sock_common_getsockopt(struct socket *sock, int level, int optname, 2130 char __user *optval, int __user *optlen) 2131{ 2132 struct sock *sk = sock->sk; 2133 2134 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2135} 2136EXPORT_SYMBOL(sock_common_getsockopt); 2137 2138#ifdef CONFIG_COMPAT 2139int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2140 char __user *optval, int __user *optlen) 2141{ 2142 struct sock *sk = sock->sk; 2143 2144 if (sk->sk_prot->compat_getsockopt != NULL) 2145 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2146 optval, optlen); 2147 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2148} 2149EXPORT_SYMBOL(compat_sock_common_getsockopt); 2150#endif 2151 2152int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2153 struct msghdr *msg, size_t size, int flags) 2154{ 2155 struct sock *sk = sock->sk; 2156 int addr_len = 0; 2157 int err; 2158 2159 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2160 flags & ~MSG_DONTWAIT, &addr_len); 2161 if (err >= 0) 2162 msg->msg_namelen = addr_len; 2163 return err; 2164} 2165EXPORT_SYMBOL(sock_common_recvmsg); 2166 2167/* 2168 * Set socket options on an inet socket. 2169 */ 2170int sock_common_setsockopt(struct socket *sock, int level, int optname, 2171 char __user *optval, unsigned int optlen) 2172{ 2173 struct sock *sk = sock->sk; 2174 2175 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2176} 2177EXPORT_SYMBOL(sock_common_setsockopt); 2178 2179#ifdef CONFIG_COMPAT 2180int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2181 char __user *optval, unsigned int optlen) 2182{ 2183 struct sock *sk = sock->sk; 2184 2185 if (sk->sk_prot->compat_setsockopt != NULL) 2186 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2187 optval, optlen); 2188 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2189} 2190EXPORT_SYMBOL(compat_sock_common_setsockopt); 2191#endif 2192 2193void sk_common_release(struct sock *sk) 2194{ 2195 if (sk->sk_prot->destroy) 2196 sk->sk_prot->destroy(sk); 2197 2198 /* 2199 * Observation: when sock_common_release is called, processes have 2200 * no access to socket. But net still has. 2201 * Step one, detach it from networking: 2202 * 2203 * A. Remove from hash tables. 2204 */ 2205 2206 sk->sk_prot->unhash(sk); 2207 2208 /* 2209 * In this point socket cannot receive new packets, but it is possible 2210 * that some packets are in flight because some CPU runs receiver and 2211 * did hash table lookup before we unhashed socket. They will achieve 2212 * receive queue and will be purged by socket destructor. 2213 * 2214 * Also we still have packets pending on receive queue and probably, 2215 * our own packets waiting in device queues. sock_destroy will drain 2216 * receive queue, but transmitted packets will delay socket destruction 2217 * until the last reference will be released. 2218 */ 2219 2220 sock_orphan(sk); 2221 2222 xfrm_sk_free_policy(sk); 2223 2224 sk_refcnt_debug_release(sk); 2225 sock_put(sk); 2226} 2227EXPORT_SYMBOL(sk_common_release); 2228 2229static DEFINE_RWLOCK(proto_list_lock); 2230static LIST_HEAD(proto_list); 2231 2232#ifdef CONFIG_PROC_FS 2233#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2234struct prot_inuse { 2235 int val[PROTO_INUSE_NR]; 2236}; 2237 2238static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2239 2240#ifdef CONFIG_NET_NS 2241void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2242{ 2243 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2244} 2245EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2246 2247int sock_prot_inuse_get(struct net *net, struct proto *prot) 2248{ 2249 int cpu, idx = prot->inuse_idx; 2250 int res = 0; 2251 2252 for_each_possible_cpu(cpu) 2253 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2254 2255 return res >= 0 ? res : 0; 2256} 2257EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2258 2259static int __net_init sock_inuse_init_net(struct net *net) 2260{ 2261 net->core.inuse = alloc_percpu(struct prot_inuse); 2262 return net->core.inuse ? 0 : -ENOMEM; 2263} 2264 2265static void __net_exit sock_inuse_exit_net(struct net *net) 2266{ 2267 free_percpu(net->core.inuse); 2268} 2269 2270static struct pernet_operations net_inuse_ops = { 2271 .init = sock_inuse_init_net, 2272 .exit = sock_inuse_exit_net, 2273}; 2274 2275static __init int net_inuse_init(void) 2276{ 2277 if (register_pernet_subsys(&net_inuse_ops)) 2278 panic("Cannot initialize net inuse counters"); 2279 2280 return 0; 2281} 2282 2283core_initcall(net_inuse_init); 2284#else 2285static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2286 2287void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2288{ 2289 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2290} 2291EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2292 2293int sock_prot_inuse_get(struct net *net, struct proto *prot) 2294{ 2295 int cpu, idx = prot->inuse_idx; 2296 int res = 0; 2297 2298 for_each_possible_cpu(cpu) 2299 res += per_cpu(prot_inuse, cpu).val[idx]; 2300 2301 return res >= 0 ? res : 0; 2302} 2303EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2304#endif 2305 2306static void assign_proto_idx(struct proto *prot) 2307{ 2308 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2309 2310 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2311 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2312 return; 2313 } 2314 2315 set_bit(prot->inuse_idx, proto_inuse_idx); 2316} 2317 2318static void release_proto_idx(struct proto *prot) 2319{ 2320 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2321 clear_bit(prot->inuse_idx, proto_inuse_idx); 2322} 2323#else 2324static inline void assign_proto_idx(struct proto *prot) 2325{ 2326} 2327 2328static inline void release_proto_idx(struct proto *prot) 2329{ 2330} 2331#endif 2332 2333int proto_register(struct proto *prot, int alloc_slab) 2334{ 2335 if (alloc_slab) { 2336 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2337 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2338 NULL); 2339 2340 if (prot->slab == NULL) { 2341 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2342 prot->name); 2343 goto out; 2344 } 2345 2346 if (prot->rsk_prot != NULL) { 2347 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2348 if (prot->rsk_prot->slab_name == NULL) 2349 goto out_free_sock_slab; 2350 2351 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2352 prot->rsk_prot->obj_size, 0, 2353 SLAB_HWCACHE_ALIGN, NULL); 2354 2355 if (prot->rsk_prot->slab == NULL) { 2356 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2357 prot->name); 2358 goto out_free_request_sock_slab_name; 2359 } 2360 } 2361 2362 if (prot->twsk_prot != NULL) { 2363 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2364 2365 if (prot->twsk_prot->twsk_slab_name == NULL) 2366 goto out_free_request_sock_slab; 2367 2368 prot->twsk_prot->twsk_slab = 2369 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2370 prot->twsk_prot->twsk_obj_size, 2371 0, 2372 SLAB_HWCACHE_ALIGN | 2373 prot->slab_flags, 2374 NULL); 2375 if (prot->twsk_prot->twsk_slab == NULL) 2376 goto out_free_timewait_sock_slab_name; 2377 } 2378 } 2379 2380 write_lock(&proto_list_lock); 2381 list_add(&prot->node, &proto_list); 2382 assign_proto_idx(prot); 2383 write_unlock(&proto_list_lock); 2384 return 0; 2385 2386out_free_timewait_sock_slab_name: 2387 kfree(prot->twsk_prot->twsk_slab_name); 2388out_free_request_sock_slab: 2389 if (prot->rsk_prot && prot->rsk_prot->slab) { 2390 kmem_cache_destroy(prot->rsk_prot->slab); 2391 prot->rsk_prot->slab = NULL; 2392 } 2393out_free_request_sock_slab_name: 2394 if (prot->rsk_prot) 2395 kfree(prot->rsk_prot->slab_name); 2396out_free_sock_slab: 2397 kmem_cache_destroy(prot->slab); 2398 prot->slab = NULL; 2399out: 2400 return -ENOBUFS; 2401} 2402EXPORT_SYMBOL(proto_register); 2403 2404void proto_unregister(struct proto *prot) 2405{ 2406 write_lock(&proto_list_lock); 2407 release_proto_idx(prot); 2408 list_del(&prot->node); 2409 write_unlock(&proto_list_lock); 2410 2411 if (prot->slab != NULL) { 2412 kmem_cache_destroy(prot->slab); 2413 prot->slab = NULL; 2414 } 2415 2416 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2417 kmem_cache_destroy(prot->rsk_prot->slab); 2418 kfree(prot->rsk_prot->slab_name); 2419 prot->rsk_prot->slab = NULL; 2420 } 2421 2422 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2423 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2424 kfree(prot->twsk_prot->twsk_slab_name); 2425 prot->twsk_prot->twsk_slab = NULL; 2426 } 2427} 2428EXPORT_SYMBOL(proto_unregister); 2429 2430#ifdef CONFIG_PROC_FS 2431static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2432 __acquires(proto_list_lock) 2433{ 2434 read_lock(&proto_list_lock); 2435 return seq_list_start_head(&proto_list, *pos); 2436} 2437 2438static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2439{ 2440 return seq_list_next(v, &proto_list, pos); 2441} 2442 2443static void proto_seq_stop(struct seq_file *seq, void *v) 2444 __releases(proto_list_lock) 2445{ 2446 read_unlock(&proto_list_lock); 2447} 2448 2449static char proto_method_implemented(const void *method) 2450{ 2451 return method == NULL ? 'n' : 'y'; 2452} 2453 2454static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2455{ 2456 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2457 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2458 proto->name, 2459 proto->obj_size, 2460 sock_prot_inuse_get(seq_file_net(seq), proto), 2461 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, 2462 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2463 proto->max_header, 2464 proto->slab == NULL ? "no" : "yes", 2465 module_name(proto->owner), 2466 proto_method_implemented(proto->close), 2467 proto_method_implemented(proto->connect), 2468 proto_method_implemented(proto->disconnect), 2469 proto_method_implemented(proto->accept), 2470 proto_method_implemented(proto->ioctl), 2471 proto_method_implemented(proto->init), 2472 proto_method_implemented(proto->destroy), 2473 proto_method_implemented(proto->shutdown), 2474 proto_method_implemented(proto->setsockopt), 2475 proto_method_implemented(proto->getsockopt), 2476 proto_method_implemented(proto->sendmsg), 2477 proto_method_implemented(proto->recvmsg), 2478 proto_method_implemented(proto->sendpage), 2479 proto_method_implemented(proto->bind), 2480 proto_method_implemented(proto->backlog_rcv), 2481 proto_method_implemented(proto->hash), 2482 proto_method_implemented(proto->unhash), 2483 proto_method_implemented(proto->get_port), 2484 proto_method_implemented(proto->enter_memory_pressure)); 2485} 2486 2487static int proto_seq_show(struct seq_file *seq, void *v) 2488{ 2489 if (v == &proto_list) 2490 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2491 "protocol", 2492 "size", 2493 "sockets", 2494 "memory", 2495 "press", 2496 "maxhdr", 2497 "slab", 2498 "module", 2499 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2500 else 2501 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2502 return 0; 2503} 2504 2505static const struct seq_operations proto_seq_ops = { 2506 .start = proto_seq_start, 2507 .next = proto_seq_next, 2508 .stop = proto_seq_stop, 2509 .show = proto_seq_show, 2510}; 2511 2512static int proto_seq_open(struct inode *inode, struct file *file) 2513{ 2514 return seq_open_net(inode, file, &proto_seq_ops, 2515 sizeof(struct seq_net_private)); 2516} 2517 2518static const struct file_operations proto_seq_fops = { 2519 .owner = THIS_MODULE, 2520 .open = proto_seq_open, 2521 .read = seq_read, 2522 .llseek = seq_lseek, 2523 .release = seq_release_net, 2524}; 2525 2526static __net_init int proto_init_net(struct net *net) 2527{ 2528 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2529 return -ENOMEM; 2530 2531 return 0; 2532} 2533 2534static __net_exit void proto_exit_net(struct net *net) 2535{ 2536 proc_net_remove(net, "protocols"); 2537} 2538 2539 2540static __net_initdata struct pernet_operations proto_net_ops = { 2541 .init = proto_init_net, 2542 .exit = proto_exit_net, 2543}; 2544 2545static int __init proto_init(void) 2546{ 2547 return register_pernet_subsys(&proto_net_ops); 2548} 2549 2550subsys_initcall(proto_init); 2551 2552#endif /* PROC_FS */ 2553