sock.c revision e1aab161e0135aafcd439be20b4f35e4b0922d95
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113#include <linux/user_namespace.h> 114#include <linux/jump_label.h> 115 116#include <asm/uaccess.h> 117#include <asm/system.h> 118 119#include <linux/netdevice.h> 120#include <net/protocol.h> 121#include <linux/skbuff.h> 122#include <net/net_namespace.h> 123#include <net/request_sock.h> 124#include <net/sock.h> 125#include <linux/net_tstamp.h> 126#include <net/xfrm.h> 127#include <linux/ipsec.h> 128#include <net/cls_cgroup.h> 129#include <net/netprio_cgroup.h> 130 131#include <linux/filter.h> 132 133#include <trace/events/sock.h> 134 135#ifdef CONFIG_INET 136#include <net/tcp.h> 137#endif 138 139/* 140 * Each address family might have different locking rules, so we have 141 * one slock key per address family: 142 */ 143static struct lock_class_key af_family_keys[AF_MAX]; 144static struct lock_class_key af_family_slock_keys[AF_MAX]; 145 146struct jump_label_key memcg_socket_limit_enabled; 147EXPORT_SYMBOL(memcg_socket_limit_enabled); 148 149/* 150 * Make lock validator output more readable. (we pre-construct these 151 * strings build-time, so that runtime initialization of socket 152 * locks is fast): 153 */ 154static const char *const af_family_key_strings[AF_MAX+1] = { 155 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 156 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 157 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 158 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 159 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 160 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 161 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 162 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 163 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 164 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 165 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 166 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 167 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 168 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 169}; 170static const char *const af_family_slock_key_strings[AF_MAX+1] = { 171 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 172 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 173 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 174 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 175 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 176 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 177 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 178 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 179 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 180 "slock-27" , "slock-28" , "slock-AF_CAN" , 181 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 182 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 183 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 184 "slock-AF_NFC" , "slock-AF_MAX" 185}; 186static const char *const af_family_clock_key_strings[AF_MAX+1] = { 187 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 188 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 189 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 190 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 191 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 192 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 193 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 194 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 195 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 196 "clock-27" , "clock-28" , "clock-AF_CAN" , 197 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 198 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 199 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 200 "clock-AF_NFC" , "clock-AF_MAX" 201}; 202 203/* 204 * sk_callback_lock locking rules are per-address-family, 205 * so split the lock classes by using a per-AF key: 206 */ 207static struct lock_class_key af_callback_keys[AF_MAX]; 208 209/* Take into consideration the size of the struct sk_buff overhead in the 210 * determination of these values, since that is non-constant across 211 * platforms. This makes socket queueing behavior and performance 212 * not depend upon such differences. 213 */ 214#define _SK_MEM_PACKETS 256 215#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 216#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 217#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 218 219/* Run time adjustable parameters. */ 220__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 221__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 222__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 223__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 224 225/* Maximal space eaten by iovec or ancillary data plus some space */ 226int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 227EXPORT_SYMBOL(sysctl_optmem_max); 228 229#if defined(CONFIG_CGROUPS) 230#if !defined(CONFIG_NET_CLS_CGROUP) 231int net_cls_subsys_id = -1; 232EXPORT_SYMBOL_GPL(net_cls_subsys_id); 233#endif 234#if !defined(CONFIG_NETPRIO_CGROUP) 235int net_prio_subsys_id = -1; 236EXPORT_SYMBOL_GPL(net_prio_subsys_id); 237#endif 238#endif 239 240static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 241{ 242 struct timeval tv; 243 244 if (optlen < sizeof(tv)) 245 return -EINVAL; 246 if (copy_from_user(&tv, optval, sizeof(tv))) 247 return -EFAULT; 248 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 249 return -EDOM; 250 251 if (tv.tv_sec < 0) { 252 static int warned __read_mostly; 253 254 *timeo_p = 0; 255 if (warned < 10 && net_ratelimit()) { 256 warned++; 257 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 258 "tries to set negative timeout\n", 259 current->comm, task_pid_nr(current)); 260 } 261 return 0; 262 } 263 *timeo_p = MAX_SCHEDULE_TIMEOUT; 264 if (tv.tv_sec == 0 && tv.tv_usec == 0) 265 return 0; 266 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 267 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 268 return 0; 269} 270 271static void sock_warn_obsolete_bsdism(const char *name) 272{ 273 static int warned; 274 static char warncomm[TASK_COMM_LEN]; 275 if (strcmp(warncomm, current->comm) && warned < 5) { 276 strcpy(warncomm, current->comm); 277 printk(KERN_WARNING "process `%s' is using obsolete " 278 "%s SO_BSDCOMPAT\n", warncomm, name); 279 warned++; 280 } 281} 282 283#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 284 285static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 286{ 287 if (sk->sk_flags & flags) { 288 sk->sk_flags &= ~flags; 289 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 290 net_disable_timestamp(); 291 } 292} 293 294 295int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 296{ 297 int err; 298 int skb_len; 299 unsigned long flags; 300 struct sk_buff_head *list = &sk->sk_receive_queue; 301 302 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 303 number of warnings when compiling with -W --ANK 304 */ 305 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 306 (unsigned)sk->sk_rcvbuf) { 307 atomic_inc(&sk->sk_drops); 308 trace_sock_rcvqueue_full(sk, skb); 309 return -ENOMEM; 310 } 311 312 err = sk_filter(sk, skb); 313 if (err) 314 return err; 315 316 if (!sk_rmem_schedule(sk, skb->truesize)) { 317 atomic_inc(&sk->sk_drops); 318 return -ENOBUFS; 319 } 320 321 skb->dev = NULL; 322 skb_set_owner_r(skb, sk); 323 324 /* Cache the SKB length before we tack it onto the receive 325 * queue. Once it is added it no longer belongs to us and 326 * may be freed by other threads of control pulling packets 327 * from the queue. 328 */ 329 skb_len = skb->len; 330 331 /* we escape from rcu protected region, make sure we dont leak 332 * a norefcounted dst 333 */ 334 skb_dst_force(skb); 335 336 spin_lock_irqsave(&list->lock, flags); 337 skb->dropcount = atomic_read(&sk->sk_drops); 338 __skb_queue_tail(list, skb); 339 spin_unlock_irqrestore(&list->lock, flags); 340 341 if (!sock_flag(sk, SOCK_DEAD)) 342 sk->sk_data_ready(sk, skb_len); 343 return 0; 344} 345EXPORT_SYMBOL(sock_queue_rcv_skb); 346 347int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 348{ 349 int rc = NET_RX_SUCCESS; 350 351 if (sk_filter(sk, skb)) 352 goto discard_and_relse; 353 354 skb->dev = NULL; 355 356 if (sk_rcvqueues_full(sk, skb)) { 357 atomic_inc(&sk->sk_drops); 358 goto discard_and_relse; 359 } 360 if (nested) 361 bh_lock_sock_nested(sk); 362 else 363 bh_lock_sock(sk); 364 if (!sock_owned_by_user(sk)) { 365 /* 366 * trylock + unlock semantics: 367 */ 368 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 369 370 rc = sk_backlog_rcv(sk, skb); 371 372 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 373 } else if (sk_add_backlog(sk, skb)) { 374 bh_unlock_sock(sk); 375 atomic_inc(&sk->sk_drops); 376 goto discard_and_relse; 377 } 378 379 bh_unlock_sock(sk); 380out: 381 sock_put(sk); 382 return rc; 383discard_and_relse: 384 kfree_skb(skb); 385 goto out; 386} 387EXPORT_SYMBOL(sk_receive_skb); 388 389void sk_reset_txq(struct sock *sk) 390{ 391 sk_tx_queue_clear(sk); 392} 393EXPORT_SYMBOL(sk_reset_txq); 394 395struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 396{ 397 struct dst_entry *dst = __sk_dst_get(sk); 398 399 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 400 sk_tx_queue_clear(sk); 401 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 402 dst_release(dst); 403 return NULL; 404 } 405 406 return dst; 407} 408EXPORT_SYMBOL(__sk_dst_check); 409 410struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 411{ 412 struct dst_entry *dst = sk_dst_get(sk); 413 414 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 415 sk_dst_reset(sk); 416 dst_release(dst); 417 return NULL; 418 } 419 420 return dst; 421} 422EXPORT_SYMBOL(sk_dst_check); 423 424static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 425{ 426 int ret = -ENOPROTOOPT; 427#ifdef CONFIG_NETDEVICES 428 struct net *net = sock_net(sk); 429 char devname[IFNAMSIZ]; 430 int index; 431 432 /* Sorry... */ 433 ret = -EPERM; 434 if (!capable(CAP_NET_RAW)) 435 goto out; 436 437 ret = -EINVAL; 438 if (optlen < 0) 439 goto out; 440 441 /* Bind this socket to a particular device like "eth0", 442 * as specified in the passed interface name. If the 443 * name is "" or the option length is zero the socket 444 * is not bound. 445 */ 446 if (optlen > IFNAMSIZ - 1) 447 optlen = IFNAMSIZ - 1; 448 memset(devname, 0, sizeof(devname)); 449 450 ret = -EFAULT; 451 if (copy_from_user(devname, optval, optlen)) 452 goto out; 453 454 index = 0; 455 if (devname[0] != '\0') { 456 struct net_device *dev; 457 458 rcu_read_lock(); 459 dev = dev_get_by_name_rcu(net, devname); 460 if (dev) 461 index = dev->ifindex; 462 rcu_read_unlock(); 463 ret = -ENODEV; 464 if (!dev) 465 goto out; 466 } 467 468 lock_sock(sk); 469 sk->sk_bound_dev_if = index; 470 sk_dst_reset(sk); 471 release_sock(sk); 472 473 ret = 0; 474 475out: 476#endif 477 478 return ret; 479} 480 481static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 482{ 483 if (valbool) 484 sock_set_flag(sk, bit); 485 else 486 sock_reset_flag(sk, bit); 487} 488 489/* 490 * This is meant for all protocols to use and covers goings on 491 * at the socket level. Everything here is generic. 492 */ 493 494int sock_setsockopt(struct socket *sock, int level, int optname, 495 char __user *optval, unsigned int optlen) 496{ 497 struct sock *sk = sock->sk; 498 int val; 499 int valbool; 500 struct linger ling; 501 int ret = 0; 502 503 /* 504 * Options without arguments 505 */ 506 507 if (optname == SO_BINDTODEVICE) 508 return sock_bindtodevice(sk, optval, optlen); 509 510 if (optlen < sizeof(int)) 511 return -EINVAL; 512 513 if (get_user(val, (int __user *)optval)) 514 return -EFAULT; 515 516 valbool = val ? 1 : 0; 517 518 lock_sock(sk); 519 520 switch (optname) { 521 case SO_DEBUG: 522 if (val && !capable(CAP_NET_ADMIN)) 523 ret = -EACCES; 524 else 525 sock_valbool_flag(sk, SOCK_DBG, valbool); 526 break; 527 case SO_REUSEADDR: 528 sk->sk_reuse = valbool; 529 break; 530 case SO_TYPE: 531 case SO_PROTOCOL: 532 case SO_DOMAIN: 533 case SO_ERROR: 534 ret = -ENOPROTOOPT; 535 break; 536 case SO_DONTROUTE: 537 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 538 break; 539 case SO_BROADCAST: 540 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 541 break; 542 case SO_SNDBUF: 543 /* Don't error on this BSD doesn't and if you think 544 about it this is right. Otherwise apps have to 545 play 'guess the biggest size' games. RCVBUF/SNDBUF 546 are treated in BSD as hints */ 547 548 if (val > sysctl_wmem_max) 549 val = sysctl_wmem_max; 550set_sndbuf: 551 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 552 if ((val * 2) < SOCK_MIN_SNDBUF) 553 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 554 else 555 sk->sk_sndbuf = val * 2; 556 557 /* 558 * Wake up sending tasks if we 559 * upped the value. 560 */ 561 sk->sk_write_space(sk); 562 break; 563 564 case SO_SNDBUFFORCE: 565 if (!capable(CAP_NET_ADMIN)) { 566 ret = -EPERM; 567 break; 568 } 569 goto set_sndbuf; 570 571 case SO_RCVBUF: 572 /* Don't error on this BSD doesn't and if you think 573 about it this is right. Otherwise apps have to 574 play 'guess the biggest size' games. RCVBUF/SNDBUF 575 are treated in BSD as hints */ 576 577 if (val > sysctl_rmem_max) 578 val = sysctl_rmem_max; 579set_rcvbuf: 580 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 581 /* 582 * We double it on the way in to account for 583 * "struct sk_buff" etc. overhead. Applications 584 * assume that the SO_RCVBUF setting they make will 585 * allow that much actual data to be received on that 586 * socket. 587 * 588 * Applications are unaware that "struct sk_buff" and 589 * other overheads allocate from the receive buffer 590 * during socket buffer allocation. 591 * 592 * And after considering the possible alternatives, 593 * returning the value we actually used in getsockopt 594 * is the most desirable behavior. 595 */ 596 if ((val * 2) < SOCK_MIN_RCVBUF) 597 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 598 else 599 sk->sk_rcvbuf = val * 2; 600 break; 601 602 case SO_RCVBUFFORCE: 603 if (!capable(CAP_NET_ADMIN)) { 604 ret = -EPERM; 605 break; 606 } 607 goto set_rcvbuf; 608 609 case SO_KEEPALIVE: 610#ifdef CONFIG_INET 611 if (sk->sk_protocol == IPPROTO_TCP) 612 tcp_set_keepalive(sk, valbool); 613#endif 614 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 615 break; 616 617 case SO_OOBINLINE: 618 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 619 break; 620 621 case SO_NO_CHECK: 622 sk->sk_no_check = valbool; 623 break; 624 625 case SO_PRIORITY: 626 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 627 sk->sk_priority = val; 628 else 629 ret = -EPERM; 630 break; 631 632 case SO_LINGER: 633 if (optlen < sizeof(ling)) { 634 ret = -EINVAL; /* 1003.1g */ 635 break; 636 } 637 if (copy_from_user(&ling, optval, sizeof(ling))) { 638 ret = -EFAULT; 639 break; 640 } 641 if (!ling.l_onoff) 642 sock_reset_flag(sk, SOCK_LINGER); 643 else { 644#if (BITS_PER_LONG == 32) 645 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 646 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 647 else 648#endif 649 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 650 sock_set_flag(sk, SOCK_LINGER); 651 } 652 break; 653 654 case SO_BSDCOMPAT: 655 sock_warn_obsolete_bsdism("setsockopt"); 656 break; 657 658 case SO_PASSCRED: 659 if (valbool) 660 set_bit(SOCK_PASSCRED, &sock->flags); 661 else 662 clear_bit(SOCK_PASSCRED, &sock->flags); 663 break; 664 665 case SO_TIMESTAMP: 666 case SO_TIMESTAMPNS: 667 if (valbool) { 668 if (optname == SO_TIMESTAMP) 669 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 670 else 671 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 672 sock_set_flag(sk, SOCK_RCVTSTAMP); 673 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 674 } else { 675 sock_reset_flag(sk, SOCK_RCVTSTAMP); 676 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 677 } 678 break; 679 680 case SO_TIMESTAMPING: 681 if (val & ~SOF_TIMESTAMPING_MASK) { 682 ret = -EINVAL; 683 break; 684 } 685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 686 val & SOF_TIMESTAMPING_TX_HARDWARE); 687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 688 val & SOF_TIMESTAMPING_TX_SOFTWARE); 689 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 690 val & SOF_TIMESTAMPING_RX_HARDWARE); 691 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 692 sock_enable_timestamp(sk, 693 SOCK_TIMESTAMPING_RX_SOFTWARE); 694 else 695 sock_disable_timestamp(sk, 696 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 697 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 698 val & SOF_TIMESTAMPING_SOFTWARE); 699 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 700 val & SOF_TIMESTAMPING_SYS_HARDWARE); 701 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 702 val & SOF_TIMESTAMPING_RAW_HARDWARE); 703 break; 704 705 case SO_RCVLOWAT: 706 if (val < 0) 707 val = INT_MAX; 708 sk->sk_rcvlowat = val ? : 1; 709 break; 710 711 case SO_RCVTIMEO: 712 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 713 break; 714 715 case SO_SNDTIMEO: 716 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 717 break; 718 719 case SO_ATTACH_FILTER: 720 ret = -EINVAL; 721 if (optlen == sizeof(struct sock_fprog)) { 722 struct sock_fprog fprog; 723 724 ret = -EFAULT; 725 if (copy_from_user(&fprog, optval, sizeof(fprog))) 726 break; 727 728 ret = sk_attach_filter(&fprog, sk); 729 } 730 break; 731 732 case SO_DETACH_FILTER: 733 ret = sk_detach_filter(sk); 734 break; 735 736 case SO_PASSSEC: 737 if (valbool) 738 set_bit(SOCK_PASSSEC, &sock->flags); 739 else 740 clear_bit(SOCK_PASSSEC, &sock->flags); 741 break; 742 case SO_MARK: 743 if (!capable(CAP_NET_ADMIN)) 744 ret = -EPERM; 745 else 746 sk->sk_mark = val; 747 break; 748 749 /* We implement the SO_SNDLOWAT etc to 750 not be settable (1003.1g 5.3) */ 751 case SO_RXQ_OVFL: 752 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 753 break; 754 755 case SO_WIFI_STATUS: 756 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 757 break; 758 759 default: 760 ret = -ENOPROTOOPT; 761 break; 762 } 763 release_sock(sk); 764 return ret; 765} 766EXPORT_SYMBOL(sock_setsockopt); 767 768 769void cred_to_ucred(struct pid *pid, const struct cred *cred, 770 struct ucred *ucred) 771{ 772 ucred->pid = pid_vnr(pid); 773 ucred->uid = ucred->gid = -1; 774 if (cred) { 775 struct user_namespace *current_ns = current_user_ns(); 776 777 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 778 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 779 } 780} 781EXPORT_SYMBOL_GPL(cred_to_ucred); 782 783int sock_getsockopt(struct socket *sock, int level, int optname, 784 char __user *optval, int __user *optlen) 785{ 786 struct sock *sk = sock->sk; 787 788 union { 789 int val; 790 struct linger ling; 791 struct timeval tm; 792 } v; 793 794 int lv = sizeof(int); 795 int len; 796 797 if (get_user(len, optlen)) 798 return -EFAULT; 799 if (len < 0) 800 return -EINVAL; 801 802 memset(&v, 0, sizeof(v)); 803 804 switch (optname) { 805 case SO_DEBUG: 806 v.val = sock_flag(sk, SOCK_DBG); 807 break; 808 809 case SO_DONTROUTE: 810 v.val = sock_flag(sk, SOCK_LOCALROUTE); 811 break; 812 813 case SO_BROADCAST: 814 v.val = !!sock_flag(sk, SOCK_BROADCAST); 815 break; 816 817 case SO_SNDBUF: 818 v.val = sk->sk_sndbuf; 819 break; 820 821 case SO_RCVBUF: 822 v.val = sk->sk_rcvbuf; 823 break; 824 825 case SO_REUSEADDR: 826 v.val = sk->sk_reuse; 827 break; 828 829 case SO_KEEPALIVE: 830 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 831 break; 832 833 case SO_TYPE: 834 v.val = sk->sk_type; 835 break; 836 837 case SO_PROTOCOL: 838 v.val = sk->sk_protocol; 839 break; 840 841 case SO_DOMAIN: 842 v.val = sk->sk_family; 843 break; 844 845 case SO_ERROR: 846 v.val = -sock_error(sk); 847 if (v.val == 0) 848 v.val = xchg(&sk->sk_err_soft, 0); 849 break; 850 851 case SO_OOBINLINE: 852 v.val = !!sock_flag(sk, SOCK_URGINLINE); 853 break; 854 855 case SO_NO_CHECK: 856 v.val = sk->sk_no_check; 857 break; 858 859 case SO_PRIORITY: 860 v.val = sk->sk_priority; 861 break; 862 863 case SO_LINGER: 864 lv = sizeof(v.ling); 865 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 866 v.ling.l_linger = sk->sk_lingertime / HZ; 867 break; 868 869 case SO_BSDCOMPAT: 870 sock_warn_obsolete_bsdism("getsockopt"); 871 break; 872 873 case SO_TIMESTAMP: 874 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 875 !sock_flag(sk, SOCK_RCVTSTAMPNS); 876 break; 877 878 case SO_TIMESTAMPNS: 879 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 880 break; 881 882 case SO_TIMESTAMPING: 883 v.val = 0; 884 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 885 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 886 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 887 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 888 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 889 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 890 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 891 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 892 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 893 v.val |= SOF_TIMESTAMPING_SOFTWARE; 894 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 895 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 896 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 897 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 898 break; 899 900 case SO_RCVTIMEO: 901 lv = sizeof(struct timeval); 902 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 903 v.tm.tv_sec = 0; 904 v.tm.tv_usec = 0; 905 } else { 906 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 907 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 908 } 909 break; 910 911 case SO_SNDTIMEO: 912 lv = sizeof(struct timeval); 913 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 914 v.tm.tv_sec = 0; 915 v.tm.tv_usec = 0; 916 } else { 917 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 918 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 919 } 920 break; 921 922 case SO_RCVLOWAT: 923 v.val = sk->sk_rcvlowat; 924 break; 925 926 case SO_SNDLOWAT: 927 v.val = 1; 928 break; 929 930 case SO_PASSCRED: 931 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 932 break; 933 934 case SO_PEERCRED: 935 { 936 struct ucred peercred; 937 if (len > sizeof(peercred)) 938 len = sizeof(peercred); 939 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 940 if (copy_to_user(optval, &peercred, len)) 941 return -EFAULT; 942 goto lenout; 943 } 944 945 case SO_PEERNAME: 946 { 947 char address[128]; 948 949 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 950 return -ENOTCONN; 951 if (lv < len) 952 return -EINVAL; 953 if (copy_to_user(optval, address, len)) 954 return -EFAULT; 955 goto lenout; 956 } 957 958 /* Dubious BSD thing... Probably nobody even uses it, but 959 * the UNIX standard wants it for whatever reason... -DaveM 960 */ 961 case SO_ACCEPTCONN: 962 v.val = sk->sk_state == TCP_LISTEN; 963 break; 964 965 case SO_PASSSEC: 966 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 967 break; 968 969 case SO_PEERSEC: 970 return security_socket_getpeersec_stream(sock, optval, optlen, len); 971 972 case SO_MARK: 973 v.val = sk->sk_mark; 974 break; 975 976 case SO_RXQ_OVFL: 977 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 978 break; 979 980 case SO_WIFI_STATUS: 981 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 982 break; 983 984 default: 985 return -ENOPROTOOPT; 986 } 987 988 if (len > lv) 989 len = lv; 990 if (copy_to_user(optval, &v, len)) 991 return -EFAULT; 992lenout: 993 if (put_user(len, optlen)) 994 return -EFAULT; 995 return 0; 996} 997 998/* 999 * Initialize an sk_lock. 1000 * 1001 * (We also register the sk_lock with the lock validator.) 1002 */ 1003static inline void sock_lock_init(struct sock *sk) 1004{ 1005 sock_lock_init_class_and_name(sk, 1006 af_family_slock_key_strings[sk->sk_family], 1007 af_family_slock_keys + sk->sk_family, 1008 af_family_key_strings[sk->sk_family], 1009 af_family_keys + sk->sk_family); 1010} 1011 1012/* 1013 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1014 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1015 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1016 */ 1017static void sock_copy(struct sock *nsk, const struct sock *osk) 1018{ 1019#ifdef CONFIG_SECURITY_NETWORK 1020 void *sptr = nsk->sk_security; 1021#endif 1022 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1023 1024 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1025 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1026 1027#ifdef CONFIG_SECURITY_NETWORK 1028 nsk->sk_security = sptr; 1029 security_sk_clone(osk, nsk); 1030#endif 1031} 1032 1033/* 1034 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1035 * un-modified. Special care is taken when initializing object to zero. 1036 */ 1037static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1038{ 1039 if (offsetof(struct sock, sk_node.next) != 0) 1040 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1041 memset(&sk->sk_node.pprev, 0, 1042 size - offsetof(struct sock, sk_node.pprev)); 1043} 1044 1045void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1046{ 1047 unsigned long nulls1, nulls2; 1048 1049 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1050 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1051 if (nulls1 > nulls2) 1052 swap(nulls1, nulls2); 1053 1054 if (nulls1 != 0) 1055 memset((char *)sk, 0, nulls1); 1056 memset((char *)sk + nulls1 + sizeof(void *), 0, 1057 nulls2 - nulls1 - sizeof(void *)); 1058 memset((char *)sk + nulls2 + sizeof(void *), 0, 1059 size - nulls2 - sizeof(void *)); 1060} 1061EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1062 1063static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1064 int family) 1065{ 1066 struct sock *sk; 1067 struct kmem_cache *slab; 1068 1069 slab = prot->slab; 1070 if (slab != NULL) { 1071 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1072 if (!sk) 1073 return sk; 1074 if (priority & __GFP_ZERO) { 1075 if (prot->clear_sk) 1076 prot->clear_sk(sk, prot->obj_size); 1077 else 1078 sk_prot_clear_nulls(sk, prot->obj_size); 1079 } 1080 } else 1081 sk = kmalloc(prot->obj_size, priority); 1082 1083 if (sk != NULL) { 1084 kmemcheck_annotate_bitfield(sk, flags); 1085 1086 if (security_sk_alloc(sk, family, priority)) 1087 goto out_free; 1088 1089 if (!try_module_get(prot->owner)) 1090 goto out_free_sec; 1091 sk_tx_queue_clear(sk); 1092 } 1093 1094 return sk; 1095 1096out_free_sec: 1097 security_sk_free(sk); 1098out_free: 1099 if (slab != NULL) 1100 kmem_cache_free(slab, sk); 1101 else 1102 kfree(sk); 1103 return NULL; 1104} 1105 1106static void sk_prot_free(struct proto *prot, struct sock *sk) 1107{ 1108 struct kmem_cache *slab; 1109 struct module *owner; 1110 1111 owner = prot->owner; 1112 slab = prot->slab; 1113 1114 security_sk_free(sk); 1115 if (slab != NULL) 1116 kmem_cache_free(slab, sk); 1117 else 1118 kfree(sk); 1119 module_put(owner); 1120} 1121 1122#ifdef CONFIG_CGROUPS 1123void sock_update_classid(struct sock *sk) 1124{ 1125 u32 classid; 1126 1127 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1128 classid = task_cls_classid(current); 1129 rcu_read_unlock(); 1130 if (classid && classid != sk->sk_classid) 1131 sk->sk_classid = classid; 1132} 1133EXPORT_SYMBOL(sock_update_classid); 1134 1135void sock_update_netprioidx(struct sock *sk) 1136{ 1137 struct cgroup_netprio_state *state; 1138 if (in_interrupt()) 1139 return; 1140 rcu_read_lock(); 1141 state = task_netprio_state(current); 1142 sk->sk_cgrp_prioidx = state ? state->prioidx : 0; 1143 rcu_read_unlock(); 1144} 1145EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1146#endif 1147 1148/** 1149 * sk_alloc - All socket objects are allocated here 1150 * @net: the applicable net namespace 1151 * @family: protocol family 1152 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1153 * @prot: struct proto associated with this new sock instance 1154 */ 1155struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1156 struct proto *prot) 1157{ 1158 struct sock *sk; 1159 1160 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1161 if (sk) { 1162 sk->sk_family = family; 1163 /* 1164 * See comment in struct sock definition to understand 1165 * why we need sk_prot_creator -acme 1166 */ 1167 sk->sk_prot = sk->sk_prot_creator = prot; 1168 sock_lock_init(sk); 1169 sock_net_set(sk, get_net(net)); 1170 atomic_set(&sk->sk_wmem_alloc, 1); 1171 1172 sock_update_classid(sk); 1173 sock_update_netprioidx(sk); 1174 } 1175 1176 return sk; 1177} 1178EXPORT_SYMBOL(sk_alloc); 1179 1180static void __sk_free(struct sock *sk) 1181{ 1182 struct sk_filter *filter; 1183 1184 if (sk->sk_destruct) 1185 sk->sk_destruct(sk); 1186 1187 filter = rcu_dereference_check(sk->sk_filter, 1188 atomic_read(&sk->sk_wmem_alloc) == 0); 1189 if (filter) { 1190 sk_filter_uncharge(sk, filter); 1191 RCU_INIT_POINTER(sk->sk_filter, NULL); 1192 } 1193 1194 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1195 1196 if (atomic_read(&sk->sk_omem_alloc)) 1197 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1198 __func__, atomic_read(&sk->sk_omem_alloc)); 1199 1200 if (sk->sk_peer_cred) 1201 put_cred(sk->sk_peer_cred); 1202 put_pid(sk->sk_peer_pid); 1203 put_net(sock_net(sk)); 1204 sk_prot_free(sk->sk_prot_creator, sk); 1205} 1206 1207void sk_free(struct sock *sk) 1208{ 1209 /* 1210 * We subtract one from sk_wmem_alloc and can know if 1211 * some packets are still in some tx queue. 1212 * If not null, sock_wfree() will call __sk_free(sk) later 1213 */ 1214 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1215 __sk_free(sk); 1216} 1217EXPORT_SYMBOL(sk_free); 1218 1219/* 1220 * Last sock_put should drop reference to sk->sk_net. It has already 1221 * been dropped in sk_change_net. Taking reference to stopping namespace 1222 * is not an option. 1223 * Take reference to a socket to remove it from hash _alive_ and after that 1224 * destroy it in the context of init_net. 1225 */ 1226void sk_release_kernel(struct sock *sk) 1227{ 1228 if (sk == NULL || sk->sk_socket == NULL) 1229 return; 1230 1231 sock_hold(sk); 1232 sock_release(sk->sk_socket); 1233 release_net(sock_net(sk)); 1234 sock_net_set(sk, get_net(&init_net)); 1235 sock_put(sk); 1236} 1237EXPORT_SYMBOL(sk_release_kernel); 1238 1239/** 1240 * sk_clone_lock - clone a socket, and lock its clone 1241 * @sk: the socket to clone 1242 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1243 * 1244 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1245 */ 1246struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1247{ 1248 struct sock *newsk; 1249 1250 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1251 if (newsk != NULL) { 1252 struct sk_filter *filter; 1253 1254 sock_copy(newsk, sk); 1255 1256 /* SANITY */ 1257 get_net(sock_net(newsk)); 1258 sk_node_init(&newsk->sk_node); 1259 sock_lock_init(newsk); 1260 bh_lock_sock(newsk); 1261 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1262 newsk->sk_backlog.len = 0; 1263 1264 atomic_set(&newsk->sk_rmem_alloc, 0); 1265 /* 1266 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1267 */ 1268 atomic_set(&newsk->sk_wmem_alloc, 1); 1269 atomic_set(&newsk->sk_omem_alloc, 0); 1270 skb_queue_head_init(&newsk->sk_receive_queue); 1271 skb_queue_head_init(&newsk->sk_write_queue); 1272#ifdef CONFIG_NET_DMA 1273 skb_queue_head_init(&newsk->sk_async_wait_queue); 1274#endif 1275 1276 spin_lock_init(&newsk->sk_dst_lock); 1277 rwlock_init(&newsk->sk_callback_lock); 1278 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1279 af_callback_keys + newsk->sk_family, 1280 af_family_clock_key_strings[newsk->sk_family]); 1281 1282 newsk->sk_dst_cache = NULL; 1283 newsk->sk_wmem_queued = 0; 1284 newsk->sk_forward_alloc = 0; 1285 newsk->sk_send_head = NULL; 1286 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1287 1288 sock_reset_flag(newsk, SOCK_DONE); 1289 skb_queue_head_init(&newsk->sk_error_queue); 1290 1291 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1292 if (filter != NULL) 1293 sk_filter_charge(newsk, filter); 1294 1295 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1296 /* It is still raw copy of parent, so invalidate 1297 * destructor and make plain sk_free() */ 1298 newsk->sk_destruct = NULL; 1299 bh_unlock_sock(newsk); 1300 sk_free(newsk); 1301 newsk = NULL; 1302 goto out; 1303 } 1304 1305 newsk->sk_err = 0; 1306 newsk->sk_priority = 0; 1307 /* 1308 * Before updating sk_refcnt, we must commit prior changes to memory 1309 * (Documentation/RCU/rculist_nulls.txt for details) 1310 */ 1311 smp_wmb(); 1312 atomic_set(&newsk->sk_refcnt, 2); 1313 1314 /* 1315 * Increment the counter in the same struct proto as the master 1316 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1317 * is the same as sk->sk_prot->socks, as this field was copied 1318 * with memcpy). 1319 * 1320 * This _changes_ the previous behaviour, where 1321 * tcp_create_openreq_child always was incrementing the 1322 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1323 * to be taken into account in all callers. -acme 1324 */ 1325 sk_refcnt_debug_inc(newsk); 1326 sk_set_socket(newsk, NULL); 1327 newsk->sk_wq = NULL; 1328 1329 if (newsk->sk_prot->sockets_allocated) 1330 sk_sockets_allocated_inc(newsk); 1331 1332 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1333 net_enable_timestamp(); 1334 } 1335out: 1336 return newsk; 1337} 1338EXPORT_SYMBOL_GPL(sk_clone_lock); 1339 1340void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1341{ 1342 __sk_dst_set(sk, dst); 1343 sk->sk_route_caps = dst->dev->features; 1344 if (sk->sk_route_caps & NETIF_F_GSO) 1345 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1346 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1347 if (sk_can_gso(sk)) { 1348 if (dst->header_len) { 1349 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1350 } else { 1351 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1352 sk->sk_gso_max_size = dst->dev->gso_max_size; 1353 } 1354 } 1355} 1356EXPORT_SYMBOL_GPL(sk_setup_caps); 1357 1358void __init sk_init(void) 1359{ 1360 if (totalram_pages <= 4096) { 1361 sysctl_wmem_max = 32767; 1362 sysctl_rmem_max = 32767; 1363 sysctl_wmem_default = 32767; 1364 sysctl_rmem_default = 32767; 1365 } else if (totalram_pages >= 131072) { 1366 sysctl_wmem_max = 131071; 1367 sysctl_rmem_max = 131071; 1368 } 1369} 1370 1371/* 1372 * Simple resource managers for sockets. 1373 */ 1374 1375 1376/* 1377 * Write buffer destructor automatically called from kfree_skb. 1378 */ 1379void sock_wfree(struct sk_buff *skb) 1380{ 1381 struct sock *sk = skb->sk; 1382 unsigned int len = skb->truesize; 1383 1384 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1385 /* 1386 * Keep a reference on sk_wmem_alloc, this will be released 1387 * after sk_write_space() call 1388 */ 1389 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1390 sk->sk_write_space(sk); 1391 len = 1; 1392 } 1393 /* 1394 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1395 * could not do because of in-flight packets 1396 */ 1397 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1398 __sk_free(sk); 1399} 1400EXPORT_SYMBOL(sock_wfree); 1401 1402/* 1403 * Read buffer destructor automatically called from kfree_skb. 1404 */ 1405void sock_rfree(struct sk_buff *skb) 1406{ 1407 struct sock *sk = skb->sk; 1408 unsigned int len = skb->truesize; 1409 1410 atomic_sub(len, &sk->sk_rmem_alloc); 1411 sk_mem_uncharge(sk, len); 1412} 1413EXPORT_SYMBOL(sock_rfree); 1414 1415 1416int sock_i_uid(struct sock *sk) 1417{ 1418 int uid; 1419 1420 read_lock_bh(&sk->sk_callback_lock); 1421 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1422 read_unlock_bh(&sk->sk_callback_lock); 1423 return uid; 1424} 1425EXPORT_SYMBOL(sock_i_uid); 1426 1427unsigned long sock_i_ino(struct sock *sk) 1428{ 1429 unsigned long ino; 1430 1431 read_lock_bh(&sk->sk_callback_lock); 1432 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1433 read_unlock_bh(&sk->sk_callback_lock); 1434 return ino; 1435} 1436EXPORT_SYMBOL(sock_i_ino); 1437 1438/* 1439 * Allocate a skb from the socket's send buffer. 1440 */ 1441struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1442 gfp_t priority) 1443{ 1444 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1445 struct sk_buff *skb = alloc_skb(size, priority); 1446 if (skb) { 1447 skb_set_owner_w(skb, sk); 1448 return skb; 1449 } 1450 } 1451 return NULL; 1452} 1453EXPORT_SYMBOL(sock_wmalloc); 1454 1455/* 1456 * Allocate a skb from the socket's receive buffer. 1457 */ 1458struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1459 gfp_t priority) 1460{ 1461 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1462 struct sk_buff *skb = alloc_skb(size, priority); 1463 if (skb) { 1464 skb_set_owner_r(skb, sk); 1465 return skb; 1466 } 1467 } 1468 return NULL; 1469} 1470 1471/* 1472 * Allocate a memory block from the socket's option memory buffer. 1473 */ 1474void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1475{ 1476 if ((unsigned)size <= sysctl_optmem_max && 1477 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1478 void *mem; 1479 /* First do the add, to avoid the race if kmalloc 1480 * might sleep. 1481 */ 1482 atomic_add(size, &sk->sk_omem_alloc); 1483 mem = kmalloc(size, priority); 1484 if (mem) 1485 return mem; 1486 atomic_sub(size, &sk->sk_omem_alloc); 1487 } 1488 return NULL; 1489} 1490EXPORT_SYMBOL(sock_kmalloc); 1491 1492/* 1493 * Free an option memory block. 1494 */ 1495void sock_kfree_s(struct sock *sk, void *mem, int size) 1496{ 1497 kfree(mem); 1498 atomic_sub(size, &sk->sk_omem_alloc); 1499} 1500EXPORT_SYMBOL(sock_kfree_s); 1501 1502/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1503 I think, these locks should be removed for datagram sockets. 1504 */ 1505static long sock_wait_for_wmem(struct sock *sk, long timeo) 1506{ 1507 DEFINE_WAIT(wait); 1508 1509 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1510 for (;;) { 1511 if (!timeo) 1512 break; 1513 if (signal_pending(current)) 1514 break; 1515 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1516 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1517 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1518 break; 1519 if (sk->sk_shutdown & SEND_SHUTDOWN) 1520 break; 1521 if (sk->sk_err) 1522 break; 1523 timeo = schedule_timeout(timeo); 1524 } 1525 finish_wait(sk_sleep(sk), &wait); 1526 return timeo; 1527} 1528 1529 1530/* 1531 * Generic send/receive buffer handlers 1532 */ 1533 1534struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1535 unsigned long data_len, int noblock, 1536 int *errcode) 1537{ 1538 struct sk_buff *skb; 1539 gfp_t gfp_mask; 1540 long timeo; 1541 int err; 1542 1543 gfp_mask = sk->sk_allocation; 1544 if (gfp_mask & __GFP_WAIT) 1545 gfp_mask |= __GFP_REPEAT; 1546 1547 timeo = sock_sndtimeo(sk, noblock); 1548 while (1) { 1549 err = sock_error(sk); 1550 if (err != 0) 1551 goto failure; 1552 1553 err = -EPIPE; 1554 if (sk->sk_shutdown & SEND_SHUTDOWN) 1555 goto failure; 1556 1557 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1558 skb = alloc_skb(header_len, gfp_mask); 1559 if (skb) { 1560 int npages; 1561 int i; 1562 1563 /* No pages, we're done... */ 1564 if (!data_len) 1565 break; 1566 1567 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1568 skb->truesize += data_len; 1569 skb_shinfo(skb)->nr_frags = npages; 1570 for (i = 0; i < npages; i++) { 1571 struct page *page; 1572 1573 page = alloc_pages(sk->sk_allocation, 0); 1574 if (!page) { 1575 err = -ENOBUFS; 1576 skb_shinfo(skb)->nr_frags = i; 1577 kfree_skb(skb); 1578 goto failure; 1579 } 1580 1581 __skb_fill_page_desc(skb, i, 1582 page, 0, 1583 (data_len >= PAGE_SIZE ? 1584 PAGE_SIZE : 1585 data_len)); 1586 data_len -= PAGE_SIZE; 1587 } 1588 1589 /* Full success... */ 1590 break; 1591 } 1592 err = -ENOBUFS; 1593 goto failure; 1594 } 1595 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1597 err = -EAGAIN; 1598 if (!timeo) 1599 goto failure; 1600 if (signal_pending(current)) 1601 goto interrupted; 1602 timeo = sock_wait_for_wmem(sk, timeo); 1603 } 1604 1605 skb_set_owner_w(skb, sk); 1606 return skb; 1607 1608interrupted: 1609 err = sock_intr_errno(timeo); 1610failure: 1611 *errcode = err; 1612 return NULL; 1613} 1614EXPORT_SYMBOL(sock_alloc_send_pskb); 1615 1616struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1617 int noblock, int *errcode) 1618{ 1619 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1620} 1621EXPORT_SYMBOL(sock_alloc_send_skb); 1622 1623static void __lock_sock(struct sock *sk) 1624 __releases(&sk->sk_lock.slock) 1625 __acquires(&sk->sk_lock.slock) 1626{ 1627 DEFINE_WAIT(wait); 1628 1629 for (;;) { 1630 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1631 TASK_UNINTERRUPTIBLE); 1632 spin_unlock_bh(&sk->sk_lock.slock); 1633 schedule(); 1634 spin_lock_bh(&sk->sk_lock.slock); 1635 if (!sock_owned_by_user(sk)) 1636 break; 1637 } 1638 finish_wait(&sk->sk_lock.wq, &wait); 1639} 1640 1641static void __release_sock(struct sock *sk) 1642 __releases(&sk->sk_lock.slock) 1643 __acquires(&sk->sk_lock.slock) 1644{ 1645 struct sk_buff *skb = sk->sk_backlog.head; 1646 1647 do { 1648 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1649 bh_unlock_sock(sk); 1650 1651 do { 1652 struct sk_buff *next = skb->next; 1653 1654 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1655 skb->next = NULL; 1656 sk_backlog_rcv(sk, skb); 1657 1658 /* 1659 * We are in process context here with softirqs 1660 * disabled, use cond_resched_softirq() to preempt. 1661 * This is safe to do because we've taken the backlog 1662 * queue private: 1663 */ 1664 cond_resched_softirq(); 1665 1666 skb = next; 1667 } while (skb != NULL); 1668 1669 bh_lock_sock(sk); 1670 } while ((skb = sk->sk_backlog.head) != NULL); 1671 1672 /* 1673 * Doing the zeroing here guarantee we can not loop forever 1674 * while a wild producer attempts to flood us. 1675 */ 1676 sk->sk_backlog.len = 0; 1677} 1678 1679/** 1680 * sk_wait_data - wait for data to arrive at sk_receive_queue 1681 * @sk: sock to wait on 1682 * @timeo: for how long 1683 * 1684 * Now socket state including sk->sk_err is changed only under lock, 1685 * hence we may omit checks after joining wait queue. 1686 * We check receive queue before schedule() only as optimization; 1687 * it is very likely that release_sock() added new data. 1688 */ 1689int sk_wait_data(struct sock *sk, long *timeo) 1690{ 1691 int rc; 1692 DEFINE_WAIT(wait); 1693 1694 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1695 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1696 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1697 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1698 finish_wait(sk_sleep(sk), &wait); 1699 return rc; 1700} 1701EXPORT_SYMBOL(sk_wait_data); 1702 1703/** 1704 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1705 * @sk: socket 1706 * @size: memory size to allocate 1707 * @kind: allocation type 1708 * 1709 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1710 * rmem allocation. This function assumes that protocols which have 1711 * memory_pressure use sk_wmem_queued as write buffer accounting. 1712 */ 1713int __sk_mem_schedule(struct sock *sk, int size, int kind) 1714{ 1715 struct proto *prot = sk->sk_prot; 1716 int amt = sk_mem_pages(size); 1717 long allocated; 1718 int parent_status = UNDER_LIMIT; 1719 1720 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1721 1722 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1723 1724 /* Under limit. */ 1725 if (parent_status == UNDER_LIMIT && 1726 allocated <= sk_prot_mem_limits(sk, 0)) { 1727 sk_leave_memory_pressure(sk); 1728 return 1; 1729 } 1730 1731 /* Under pressure. (we or our parents) */ 1732 if ((parent_status > SOFT_LIMIT) || 1733 allocated > sk_prot_mem_limits(sk, 1)) 1734 sk_enter_memory_pressure(sk); 1735 1736 /* Over hard limit (we or our parents) */ 1737 if ((parent_status == OVER_LIMIT) || 1738 (allocated > sk_prot_mem_limits(sk, 2))) 1739 goto suppress_allocation; 1740 1741 /* guarantee minimum buffer size under pressure */ 1742 if (kind == SK_MEM_RECV) { 1743 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1744 return 1; 1745 1746 } else { /* SK_MEM_SEND */ 1747 if (sk->sk_type == SOCK_STREAM) { 1748 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1749 return 1; 1750 } else if (atomic_read(&sk->sk_wmem_alloc) < 1751 prot->sysctl_wmem[0]) 1752 return 1; 1753 } 1754 1755 if (sk_has_memory_pressure(sk)) { 1756 int alloc; 1757 1758 if (!sk_under_memory_pressure(sk)) 1759 return 1; 1760 alloc = sk_sockets_allocated_read_positive(sk); 1761 if (sk_prot_mem_limits(sk, 2) > alloc * 1762 sk_mem_pages(sk->sk_wmem_queued + 1763 atomic_read(&sk->sk_rmem_alloc) + 1764 sk->sk_forward_alloc)) 1765 return 1; 1766 } 1767 1768suppress_allocation: 1769 1770 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1771 sk_stream_moderate_sndbuf(sk); 1772 1773 /* Fail only if socket is _under_ its sndbuf. 1774 * In this case we cannot block, so that we have to fail. 1775 */ 1776 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1777 return 1; 1778 } 1779 1780 trace_sock_exceed_buf_limit(sk, prot, allocated); 1781 1782 /* Alas. Undo changes. */ 1783 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1784 1785 sk_memory_allocated_sub(sk, amt, parent_status); 1786 1787 return 0; 1788} 1789EXPORT_SYMBOL(__sk_mem_schedule); 1790 1791/** 1792 * __sk_reclaim - reclaim memory_allocated 1793 * @sk: socket 1794 */ 1795void __sk_mem_reclaim(struct sock *sk) 1796{ 1797 sk_memory_allocated_sub(sk, 1798 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); 1799 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1800 1801 if (sk_under_memory_pressure(sk) && 1802 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 1803 sk_leave_memory_pressure(sk); 1804} 1805EXPORT_SYMBOL(__sk_mem_reclaim); 1806 1807 1808/* 1809 * Set of default routines for initialising struct proto_ops when 1810 * the protocol does not support a particular function. In certain 1811 * cases where it makes no sense for a protocol to have a "do nothing" 1812 * function, some default processing is provided. 1813 */ 1814 1815int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1816{ 1817 return -EOPNOTSUPP; 1818} 1819EXPORT_SYMBOL(sock_no_bind); 1820 1821int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1822 int len, int flags) 1823{ 1824 return -EOPNOTSUPP; 1825} 1826EXPORT_SYMBOL(sock_no_connect); 1827 1828int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1829{ 1830 return -EOPNOTSUPP; 1831} 1832EXPORT_SYMBOL(sock_no_socketpair); 1833 1834int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1835{ 1836 return -EOPNOTSUPP; 1837} 1838EXPORT_SYMBOL(sock_no_accept); 1839 1840int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1841 int *len, int peer) 1842{ 1843 return -EOPNOTSUPP; 1844} 1845EXPORT_SYMBOL(sock_no_getname); 1846 1847unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1848{ 1849 return 0; 1850} 1851EXPORT_SYMBOL(sock_no_poll); 1852 1853int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1854{ 1855 return -EOPNOTSUPP; 1856} 1857EXPORT_SYMBOL(sock_no_ioctl); 1858 1859int sock_no_listen(struct socket *sock, int backlog) 1860{ 1861 return -EOPNOTSUPP; 1862} 1863EXPORT_SYMBOL(sock_no_listen); 1864 1865int sock_no_shutdown(struct socket *sock, int how) 1866{ 1867 return -EOPNOTSUPP; 1868} 1869EXPORT_SYMBOL(sock_no_shutdown); 1870 1871int sock_no_setsockopt(struct socket *sock, int level, int optname, 1872 char __user *optval, unsigned int optlen) 1873{ 1874 return -EOPNOTSUPP; 1875} 1876EXPORT_SYMBOL(sock_no_setsockopt); 1877 1878int sock_no_getsockopt(struct socket *sock, int level, int optname, 1879 char __user *optval, int __user *optlen) 1880{ 1881 return -EOPNOTSUPP; 1882} 1883EXPORT_SYMBOL(sock_no_getsockopt); 1884 1885int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1886 size_t len) 1887{ 1888 return -EOPNOTSUPP; 1889} 1890EXPORT_SYMBOL(sock_no_sendmsg); 1891 1892int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1893 size_t len, int flags) 1894{ 1895 return -EOPNOTSUPP; 1896} 1897EXPORT_SYMBOL(sock_no_recvmsg); 1898 1899int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1900{ 1901 /* Mirror missing mmap method error code */ 1902 return -ENODEV; 1903} 1904EXPORT_SYMBOL(sock_no_mmap); 1905 1906ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1907{ 1908 ssize_t res; 1909 struct msghdr msg = {.msg_flags = flags}; 1910 struct kvec iov; 1911 char *kaddr = kmap(page); 1912 iov.iov_base = kaddr + offset; 1913 iov.iov_len = size; 1914 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1915 kunmap(page); 1916 return res; 1917} 1918EXPORT_SYMBOL(sock_no_sendpage); 1919 1920/* 1921 * Default Socket Callbacks 1922 */ 1923 1924static void sock_def_wakeup(struct sock *sk) 1925{ 1926 struct socket_wq *wq; 1927 1928 rcu_read_lock(); 1929 wq = rcu_dereference(sk->sk_wq); 1930 if (wq_has_sleeper(wq)) 1931 wake_up_interruptible_all(&wq->wait); 1932 rcu_read_unlock(); 1933} 1934 1935static void sock_def_error_report(struct sock *sk) 1936{ 1937 struct socket_wq *wq; 1938 1939 rcu_read_lock(); 1940 wq = rcu_dereference(sk->sk_wq); 1941 if (wq_has_sleeper(wq)) 1942 wake_up_interruptible_poll(&wq->wait, POLLERR); 1943 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1944 rcu_read_unlock(); 1945} 1946 1947static void sock_def_readable(struct sock *sk, int len) 1948{ 1949 struct socket_wq *wq; 1950 1951 rcu_read_lock(); 1952 wq = rcu_dereference(sk->sk_wq); 1953 if (wq_has_sleeper(wq)) 1954 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 1955 POLLRDNORM | POLLRDBAND); 1956 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1957 rcu_read_unlock(); 1958} 1959 1960static void sock_def_write_space(struct sock *sk) 1961{ 1962 struct socket_wq *wq; 1963 1964 rcu_read_lock(); 1965 1966 /* Do not wake up a writer until he can make "significant" 1967 * progress. --DaveM 1968 */ 1969 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1970 wq = rcu_dereference(sk->sk_wq); 1971 if (wq_has_sleeper(wq)) 1972 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1973 POLLWRNORM | POLLWRBAND); 1974 1975 /* Should agree with poll, otherwise some programs break */ 1976 if (sock_writeable(sk)) 1977 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1978 } 1979 1980 rcu_read_unlock(); 1981} 1982 1983static void sock_def_destruct(struct sock *sk) 1984{ 1985 kfree(sk->sk_protinfo); 1986} 1987 1988void sk_send_sigurg(struct sock *sk) 1989{ 1990 if (sk->sk_socket && sk->sk_socket->file) 1991 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1992 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1993} 1994EXPORT_SYMBOL(sk_send_sigurg); 1995 1996void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1997 unsigned long expires) 1998{ 1999 if (!mod_timer(timer, expires)) 2000 sock_hold(sk); 2001} 2002EXPORT_SYMBOL(sk_reset_timer); 2003 2004void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2005{ 2006 if (timer_pending(timer) && del_timer(timer)) 2007 __sock_put(sk); 2008} 2009EXPORT_SYMBOL(sk_stop_timer); 2010 2011void sock_init_data(struct socket *sock, struct sock *sk) 2012{ 2013 skb_queue_head_init(&sk->sk_receive_queue); 2014 skb_queue_head_init(&sk->sk_write_queue); 2015 skb_queue_head_init(&sk->sk_error_queue); 2016#ifdef CONFIG_NET_DMA 2017 skb_queue_head_init(&sk->sk_async_wait_queue); 2018#endif 2019 2020 sk->sk_send_head = NULL; 2021 2022 init_timer(&sk->sk_timer); 2023 2024 sk->sk_allocation = GFP_KERNEL; 2025 sk->sk_rcvbuf = sysctl_rmem_default; 2026 sk->sk_sndbuf = sysctl_wmem_default; 2027 sk->sk_state = TCP_CLOSE; 2028 sk_set_socket(sk, sock); 2029 2030 sock_set_flag(sk, SOCK_ZAPPED); 2031 2032 if (sock) { 2033 sk->sk_type = sock->type; 2034 sk->sk_wq = sock->wq; 2035 sock->sk = sk; 2036 } else 2037 sk->sk_wq = NULL; 2038 2039 spin_lock_init(&sk->sk_dst_lock); 2040 rwlock_init(&sk->sk_callback_lock); 2041 lockdep_set_class_and_name(&sk->sk_callback_lock, 2042 af_callback_keys + sk->sk_family, 2043 af_family_clock_key_strings[sk->sk_family]); 2044 2045 sk->sk_state_change = sock_def_wakeup; 2046 sk->sk_data_ready = sock_def_readable; 2047 sk->sk_write_space = sock_def_write_space; 2048 sk->sk_error_report = sock_def_error_report; 2049 sk->sk_destruct = sock_def_destruct; 2050 2051 sk->sk_sndmsg_page = NULL; 2052 sk->sk_sndmsg_off = 0; 2053 2054 sk->sk_peer_pid = NULL; 2055 sk->sk_peer_cred = NULL; 2056 sk->sk_write_pending = 0; 2057 sk->sk_rcvlowat = 1; 2058 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2059 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2060 2061 sk->sk_stamp = ktime_set(-1L, 0); 2062 2063 /* 2064 * Before updating sk_refcnt, we must commit prior changes to memory 2065 * (Documentation/RCU/rculist_nulls.txt for details) 2066 */ 2067 smp_wmb(); 2068 atomic_set(&sk->sk_refcnt, 1); 2069 atomic_set(&sk->sk_drops, 0); 2070} 2071EXPORT_SYMBOL(sock_init_data); 2072 2073void lock_sock_nested(struct sock *sk, int subclass) 2074{ 2075 might_sleep(); 2076 spin_lock_bh(&sk->sk_lock.slock); 2077 if (sk->sk_lock.owned) 2078 __lock_sock(sk); 2079 sk->sk_lock.owned = 1; 2080 spin_unlock(&sk->sk_lock.slock); 2081 /* 2082 * The sk_lock has mutex_lock() semantics here: 2083 */ 2084 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2085 local_bh_enable(); 2086} 2087EXPORT_SYMBOL(lock_sock_nested); 2088 2089void release_sock(struct sock *sk) 2090{ 2091 /* 2092 * The sk_lock has mutex_unlock() semantics: 2093 */ 2094 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2095 2096 spin_lock_bh(&sk->sk_lock.slock); 2097 if (sk->sk_backlog.tail) 2098 __release_sock(sk); 2099 sk->sk_lock.owned = 0; 2100 if (waitqueue_active(&sk->sk_lock.wq)) 2101 wake_up(&sk->sk_lock.wq); 2102 spin_unlock_bh(&sk->sk_lock.slock); 2103} 2104EXPORT_SYMBOL(release_sock); 2105 2106/** 2107 * lock_sock_fast - fast version of lock_sock 2108 * @sk: socket 2109 * 2110 * This version should be used for very small section, where process wont block 2111 * return false if fast path is taken 2112 * sk_lock.slock locked, owned = 0, BH disabled 2113 * return true if slow path is taken 2114 * sk_lock.slock unlocked, owned = 1, BH enabled 2115 */ 2116bool lock_sock_fast(struct sock *sk) 2117{ 2118 might_sleep(); 2119 spin_lock_bh(&sk->sk_lock.slock); 2120 2121 if (!sk->sk_lock.owned) 2122 /* 2123 * Note : We must disable BH 2124 */ 2125 return false; 2126 2127 __lock_sock(sk); 2128 sk->sk_lock.owned = 1; 2129 spin_unlock(&sk->sk_lock.slock); 2130 /* 2131 * The sk_lock has mutex_lock() semantics here: 2132 */ 2133 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2134 local_bh_enable(); 2135 return true; 2136} 2137EXPORT_SYMBOL(lock_sock_fast); 2138 2139int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2140{ 2141 struct timeval tv; 2142 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2143 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2144 tv = ktime_to_timeval(sk->sk_stamp); 2145 if (tv.tv_sec == -1) 2146 return -ENOENT; 2147 if (tv.tv_sec == 0) { 2148 sk->sk_stamp = ktime_get_real(); 2149 tv = ktime_to_timeval(sk->sk_stamp); 2150 } 2151 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2152} 2153EXPORT_SYMBOL(sock_get_timestamp); 2154 2155int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2156{ 2157 struct timespec ts; 2158 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2159 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2160 ts = ktime_to_timespec(sk->sk_stamp); 2161 if (ts.tv_sec == -1) 2162 return -ENOENT; 2163 if (ts.tv_sec == 0) { 2164 sk->sk_stamp = ktime_get_real(); 2165 ts = ktime_to_timespec(sk->sk_stamp); 2166 } 2167 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2168} 2169EXPORT_SYMBOL(sock_get_timestampns); 2170 2171void sock_enable_timestamp(struct sock *sk, int flag) 2172{ 2173 if (!sock_flag(sk, flag)) { 2174 unsigned long previous_flags = sk->sk_flags; 2175 2176 sock_set_flag(sk, flag); 2177 /* 2178 * we just set one of the two flags which require net 2179 * time stamping, but time stamping might have been on 2180 * already because of the other one 2181 */ 2182 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2183 net_enable_timestamp(); 2184 } 2185} 2186 2187/* 2188 * Get a socket option on an socket. 2189 * 2190 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2191 * asynchronous errors should be reported by getsockopt. We assume 2192 * this means if you specify SO_ERROR (otherwise whats the point of it). 2193 */ 2194int sock_common_getsockopt(struct socket *sock, int level, int optname, 2195 char __user *optval, int __user *optlen) 2196{ 2197 struct sock *sk = sock->sk; 2198 2199 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2200} 2201EXPORT_SYMBOL(sock_common_getsockopt); 2202 2203#ifdef CONFIG_COMPAT 2204int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2205 char __user *optval, int __user *optlen) 2206{ 2207 struct sock *sk = sock->sk; 2208 2209 if (sk->sk_prot->compat_getsockopt != NULL) 2210 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2211 optval, optlen); 2212 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2213} 2214EXPORT_SYMBOL(compat_sock_common_getsockopt); 2215#endif 2216 2217int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2218 struct msghdr *msg, size_t size, int flags) 2219{ 2220 struct sock *sk = sock->sk; 2221 int addr_len = 0; 2222 int err; 2223 2224 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2225 flags & ~MSG_DONTWAIT, &addr_len); 2226 if (err >= 0) 2227 msg->msg_namelen = addr_len; 2228 return err; 2229} 2230EXPORT_SYMBOL(sock_common_recvmsg); 2231 2232/* 2233 * Set socket options on an inet socket. 2234 */ 2235int sock_common_setsockopt(struct socket *sock, int level, int optname, 2236 char __user *optval, unsigned int optlen) 2237{ 2238 struct sock *sk = sock->sk; 2239 2240 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2241} 2242EXPORT_SYMBOL(sock_common_setsockopt); 2243 2244#ifdef CONFIG_COMPAT 2245int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2246 char __user *optval, unsigned int optlen) 2247{ 2248 struct sock *sk = sock->sk; 2249 2250 if (sk->sk_prot->compat_setsockopt != NULL) 2251 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2252 optval, optlen); 2253 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2254} 2255EXPORT_SYMBOL(compat_sock_common_setsockopt); 2256#endif 2257 2258void sk_common_release(struct sock *sk) 2259{ 2260 if (sk->sk_prot->destroy) 2261 sk->sk_prot->destroy(sk); 2262 2263 /* 2264 * Observation: when sock_common_release is called, processes have 2265 * no access to socket. But net still has. 2266 * Step one, detach it from networking: 2267 * 2268 * A. Remove from hash tables. 2269 */ 2270 2271 sk->sk_prot->unhash(sk); 2272 2273 /* 2274 * In this point socket cannot receive new packets, but it is possible 2275 * that some packets are in flight because some CPU runs receiver and 2276 * did hash table lookup before we unhashed socket. They will achieve 2277 * receive queue and will be purged by socket destructor. 2278 * 2279 * Also we still have packets pending on receive queue and probably, 2280 * our own packets waiting in device queues. sock_destroy will drain 2281 * receive queue, but transmitted packets will delay socket destruction 2282 * until the last reference will be released. 2283 */ 2284 2285 sock_orphan(sk); 2286 2287 xfrm_sk_free_policy(sk); 2288 2289 sk_refcnt_debug_release(sk); 2290 sock_put(sk); 2291} 2292EXPORT_SYMBOL(sk_common_release); 2293 2294static DEFINE_RWLOCK(proto_list_lock); 2295static LIST_HEAD(proto_list); 2296 2297#ifdef CONFIG_PROC_FS 2298#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2299struct prot_inuse { 2300 int val[PROTO_INUSE_NR]; 2301}; 2302 2303static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2304 2305#ifdef CONFIG_NET_NS 2306void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2307{ 2308 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2309} 2310EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2311 2312int sock_prot_inuse_get(struct net *net, struct proto *prot) 2313{ 2314 int cpu, idx = prot->inuse_idx; 2315 int res = 0; 2316 2317 for_each_possible_cpu(cpu) 2318 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2319 2320 return res >= 0 ? res : 0; 2321} 2322EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2323 2324static int __net_init sock_inuse_init_net(struct net *net) 2325{ 2326 net->core.inuse = alloc_percpu(struct prot_inuse); 2327 return net->core.inuse ? 0 : -ENOMEM; 2328} 2329 2330static void __net_exit sock_inuse_exit_net(struct net *net) 2331{ 2332 free_percpu(net->core.inuse); 2333} 2334 2335static struct pernet_operations net_inuse_ops = { 2336 .init = sock_inuse_init_net, 2337 .exit = sock_inuse_exit_net, 2338}; 2339 2340static __init int net_inuse_init(void) 2341{ 2342 if (register_pernet_subsys(&net_inuse_ops)) 2343 panic("Cannot initialize net inuse counters"); 2344 2345 return 0; 2346} 2347 2348core_initcall(net_inuse_init); 2349#else 2350static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2351 2352void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2353{ 2354 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2355} 2356EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2357 2358int sock_prot_inuse_get(struct net *net, struct proto *prot) 2359{ 2360 int cpu, idx = prot->inuse_idx; 2361 int res = 0; 2362 2363 for_each_possible_cpu(cpu) 2364 res += per_cpu(prot_inuse, cpu).val[idx]; 2365 2366 return res >= 0 ? res : 0; 2367} 2368EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2369#endif 2370 2371static void assign_proto_idx(struct proto *prot) 2372{ 2373 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2374 2375 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2376 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2377 return; 2378 } 2379 2380 set_bit(prot->inuse_idx, proto_inuse_idx); 2381} 2382 2383static void release_proto_idx(struct proto *prot) 2384{ 2385 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2386 clear_bit(prot->inuse_idx, proto_inuse_idx); 2387} 2388#else 2389static inline void assign_proto_idx(struct proto *prot) 2390{ 2391} 2392 2393static inline void release_proto_idx(struct proto *prot) 2394{ 2395} 2396#endif 2397 2398int proto_register(struct proto *prot, int alloc_slab) 2399{ 2400 if (alloc_slab) { 2401 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2402 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2403 NULL); 2404 2405 if (prot->slab == NULL) { 2406 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2407 prot->name); 2408 goto out; 2409 } 2410 2411 if (prot->rsk_prot != NULL) { 2412 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2413 if (prot->rsk_prot->slab_name == NULL) 2414 goto out_free_sock_slab; 2415 2416 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2417 prot->rsk_prot->obj_size, 0, 2418 SLAB_HWCACHE_ALIGN, NULL); 2419 2420 if (prot->rsk_prot->slab == NULL) { 2421 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2422 prot->name); 2423 goto out_free_request_sock_slab_name; 2424 } 2425 } 2426 2427 if (prot->twsk_prot != NULL) { 2428 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2429 2430 if (prot->twsk_prot->twsk_slab_name == NULL) 2431 goto out_free_request_sock_slab; 2432 2433 prot->twsk_prot->twsk_slab = 2434 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2435 prot->twsk_prot->twsk_obj_size, 2436 0, 2437 SLAB_HWCACHE_ALIGN | 2438 prot->slab_flags, 2439 NULL); 2440 if (prot->twsk_prot->twsk_slab == NULL) 2441 goto out_free_timewait_sock_slab_name; 2442 } 2443 } 2444 2445 write_lock(&proto_list_lock); 2446 list_add(&prot->node, &proto_list); 2447 assign_proto_idx(prot); 2448 write_unlock(&proto_list_lock); 2449 return 0; 2450 2451out_free_timewait_sock_slab_name: 2452 kfree(prot->twsk_prot->twsk_slab_name); 2453out_free_request_sock_slab: 2454 if (prot->rsk_prot && prot->rsk_prot->slab) { 2455 kmem_cache_destroy(prot->rsk_prot->slab); 2456 prot->rsk_prot->slab = NULL; 2457 } 2458out_free_request_sock_slab_name: 2459 if (prot->rsk_prot) 2460 kfree(prot->rsk_prot->slab_name); 2461out_free_sock_slab: 2462 kmem_cache_destroy(prot->slab); 2463 prot->slab = NULL; 2464out: 2465 return -ENOBUFS; 2466} 2467EXPORT_SYMBOL(proto_register); 2468 2469void proto_unregister(struct proto *prot) 2470{ 2471 write_lock(&proto_list_lock); 2472 release_proto_idx(prot); 2473 list_del(&prot->node); 2474 write_unlock(&proto_list_lock); 2475 2476 if (prot->slab != NULL) { 2477 kmem_cache_destroy(prot->slab); 2478 prot->slab = NULL; 2479 } 2480 2481 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2482 kmem_cache_destroy(prot->rsk_prot->slab); 2483 kfree(prot->rsk_prot->slab_name); 2484 prot->rsk_prot->slab = NULL; 2485 } 2486 2487 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2488 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2489 kfree(prot->twsk_prot->twsk_slab_name); 2490 prot->twsk_prot->twsk_slab = NULL; 2491 } 2492} 2493EXPORT_SYMBOL(proto_unregister); 2494 2495#ifdef CONFIG_PROC_FS 2496static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2497 __acquires(proto_list_lock) 2498{ 2499 read_lock(&proto_list_lock); 2500 return seq_list_start_head(&proto_list, *pos); 2501} 2502 2503static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2504{ 2505 return seq_list_next(v, &proto_list, pos); 2506} 2507 2508static void proto_seq_stop(struct seq_file *seq, void *v) 2509 __releases(proto_list_lock) 2510{ 2511 read_unlock(&proto_list_lock); 2512} 2513 2514static char proto_method_implemented(const void *method) 2515{ 2516 return method == NULL ? 'n' : 'y'; 2517} 2518static long sock_prot_memory_allocated(struct proto *proto) 2519{ 2520 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; 2521} 2522 2523static char *sock_prot_memory_pressure(struct proto *proto) 2524{ 2525 return proto->memory_pressure != NULL ? 2526 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2527} 2528 2529static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2530{ 2531 2532 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2533 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2534 proto->name, 2535 proto->obj_size, 2536 sock_prot_inuse_get(seq_file_net(seq), proto), 2537 sock_prot_memory_allocated(proto), 2538 sock_prot_memory_pressure(proto), 2539 proto->max_header, 2540 proto->slab == NULL ? "no" : "yes", 2541 module_name(proto->owner), 2542 proto_method_implemented(proto->close), 2543 proto_method_implemented(proto->connect), 2544 proto_method_implemented(proto->disconnect), 2545 proto_method_implemented(proto->accept), 2546 proto_method_implemented(proto->ioctl), 2547 proto_method_implemented(proto->init), 2548 proto_method_implemented(proto->destroy), 2549 proto_method_implemented(proto->shutdown), 2550 proto_method_implemented(proto->setsockopt), 2551 proto_method_implemented(proto->getsockopt), 2552 proto_method_implemented(proto->sendmsg), 2553 proto_method_implemented(proto->recvmsg), 2554 proto_method_implemented(proto->sendpage), 2555 proto_method_implemented(proto->bind), 2556 proto_method_implemented(proto->backlog_rcv), 2557 proto_method_implemented(proto->hash), 2558 proto_method_implemented(proto->unhash), 2559 proto_method_implemented(proto->get_port), 2560 proto_method_implemented(proto->enter_memory_pressure)); 2561} 2562 2563static int proto_seq_show(struct seq_file *seq, void *v) 2564{ 2565 if (v == &proto_list) 2566 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2567 "protocol", 2568 "size", 2569 "sockets", 2570 "memory", 2571 "press", 2572 "maxhdr", 2573 "slab", 2574 "module", 2575 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2576 else 2577 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2578 return 0; 2579} 2580 2581static const struct seq_operations proto_seq_ops = { 2582 .start = proto_seq_start, 2583 .next = proto_seq_next, 2584 .stop = proto_seq_stop, 2585 .show = proto_seq_show, 2586}; 2587 2588static int proto_seq_open(struct inode *inode, struct file *file) 2589{ 2590 return seq_open_net(inode, file, &proto_seq_ops, 2591 sizeof(struct seq_net_private)); 2592} 2593 2594static const struct file_operations proto_seq_fops = { 2595 .owner = THIS_MODULE, 2596 .open = proto_seq_open, 2597 .read = seq_read, 2598 .llseek = seq_lseek, 2599 .release = seq_release_net, 2600}; 2601 2602static __net_init int proto_init_net(struct net *net) 2603{ 2604 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2605 return -ENOMEM; 2606 2607 return 0; 2608} 2609 2610static __net_exit void proto_exit_net(struct net *net) 2611{ 2612 proc_net_remove(net, "protocols"); 2613} 2614 2615 2616static __net_initdata struct pernet_operations proto_net_ops = { 2617 .init = proto_init_net, 2618 .exit = proto_exit_net, 2619}; 2620 2621static int __init proto_init(void) 2622{ 2623 return register_pernet_subsys(&proto_net_ops); 2624} 2625 2626subsys_initcall(proto_init); 2627 2628#endif /* PROC_FS */ 2629