sock.c revision 3bdc0eba0b8b47797f4a76e377dd8360f317450f
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113#include <linux/user_namespace.h> 114#include <linux/jump_label.h> 115#include <linux/memcontrol.h> 116 117#include <asm/uaccess.h> 118#include <asm/system.h> 119 120#include <linux/netdevice.h> 121#include <net/protocol.h> 122#include <linux/skbuff.h> 123#include <net/net_namespace.h> 124#include <net/request_sock.h> 125#include <net/sock.h> 126#include <linux/net_tstamp.h> 127#include <net/xfrm.h> 128#include <linux/ipsec.h> 129#include <net/cls_cgroup.h> 130#include <net/netprio_cgroup.h> 131 132#include <linux/filter.h> 133 134#include <trace/events/sock.h> 135 136#ifdef CONFIG_INET 137#include <net/tcp.h> 138#endif 139 140static DEFINE_MUTEX(proto_list_mutex); 141static LIST_HEAD(proto_list); 142 143#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 144int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) 145{ 146 struct proto *proto; 147 int ret = 0; 148 149 mutex_lock(&proto_list_mutex); 150 list_for_each_entry(proto, &proto_list, node) { 151 if (proto->init_cgroup) { 152 ret = proto->init_cgroup(cgrp, ss); 153 if (ret) 154 goto out; 155 } 156 } 157 158 mutex_unlock(&proto_list_mutex); 159 return ret; 160out: 161 list_for_each_entry_continue_reverse(proto, &proto_list, node) 162 if (proto->destroy_cgroup) 163 proto->destroy_cgroup(cgrp, ss); 164 mutex_unlock(&proto_list_mutex); 165 return ret; 166} 167 168void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) 169{ 170 struct proto *proto; 171 172 mutex_lock(&proto_list_mutex); 173 list_for_each_entry_reverse(proto, &proto_list, node) 174 if (proto->destroy_cgroup) 175 proto->destroy_cgroup(cgrp, ss); 176 mutex_unlock(&proto_list_mutex); 177} 178#endif 179 180/* 181 * Each address family might have different locking rules, so we have 182 * one slock key per address family: 183 */ 184static struct lock_class_key af_family_keys[AF_MAX]; 185static struct lock_class_key af_family_slock_keys[AF_MAX]; 186 187struct jump_label_key memcg_socket_limit_enabled; 188EXPORT_SYMBOL(memcg_socket_limit_enabled); 189 190/* 191 * Make lock validator output more readable. (we pre-construct these 192 * strings build-time, so that runtime initialization of socket 193 * locks is fast): 194 */ 195static const char *const af_family_key_strings[AF_MAX+1] = { 196 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 197 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 198 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 199 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 200 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 201 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 202 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 203 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 204 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 205 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 206 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 207 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 208 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 209 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 210}; 211static const char *const af_family_slock_key_strings[AF_MAX+1] = { 212 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 213 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 214 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 215 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 216 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 217 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 218 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 219 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 220 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 221 "slock-27" , "slock-28" , "slock-AF_CAN" , 222 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 223 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 224 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 225 "slock-AF_NFC" , "slock-AF_MAX" 226}; 227static const char *const af_family_clock_key_strings[AF_MAX+1] = { 228 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 229 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 230 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 231 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 232 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 233 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 234 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 235 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 236 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 237 "clock-27" , "clock-28" , "clock-AF_CAN" , 238 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 239 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 240 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 241 "clock-AF_NFC" , "clock-AF_MAX" 242}; 243 244/* 245 * sk_callback_lock locking rules are per-address-family, 246 * so split the lock classes by using a per-AF key: 247 */ 248static struct lock_class_key af_callback_keys[AF_MAX]; 249 250/* Take into consideration the size of the struct sk_buff overhead in the 251 * determination of these values, since that is non-constant across 252 * platforms. This makes socket queueing behavior and performance 253 * not depend upon such differences. 254 */ 255#define _SK_MEM_PACKETS 256 256#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 257#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 258#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 259 260/* Run time adjustable parameters. */ 261__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 262__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 263__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 264__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 265 266/* Maximal space eaten by iovec or ancillary data plus some space */ 267int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 268EXPORT_SYMBOL(sysctl_optmem_max); 269 270#if defined(CONFIG_CGROUPS) 271#if !defined(CONFIG_NET_CLS_CGROUP) 272int net_cls_subsys_id = -1; 273EXPORT_SYMBOL_GPL(net_cls_subsys_id); 274#endif 275#if !defined(CONFIG_NETPRIO_CGROUP) 276int net_prio_subsys_id = -1; 277EXPORT_SYMBOL_GPL(net_prio_subsys_id); 278#endif 279#endif 280 281static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 282{ 283 struct timeval tv; 284 285 if (optlen < sizeof(tv)) 286 return -EINVAL; 287 if (copy_from_user(&tv, optval, sizeof(tv))) 288 return -EFAULT; 289 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 290 return -EDOM; 291 292 if (tv.tv_sec < 0) { 293 static int warned __read_mostly; 294 295 *timeo_p = 0; 296 if (warned < 10 && net_ratelimit()) { 297 warned++; 298 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 299 "tries to set negative timeout\n", 300 current->comm, task_pid_nr(current)); 301 } 302 return 0; 303 } 304 *timeo_p = MAX_SCHEDULE_TIMEOUT; 305 if (tv.tv_sec == 0 && tv.tv_usec == 0) 306 return 0; 307 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 308 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 309 return 0; 310} 311 312static void sock_warn_obsolete_bsdism(const char *name) 313{ 314 static int warned; 315 static char warncomm[TASK_COMM_LEN]; 316 if (strcmp(warncomm, current->comm) && warned < 5) { 317 strcpy(warncomm, current->comm); 318 printk(KERN_WARNING "process `%s' is using obsolete " 319 "%s SO_BSDCOMPAT\n", warncomm, name); 320 warned++; 321 } 322} 323 324#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 325 326static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 327{ 328 if (sk->sk_flags & flags) { 329 sk->sk_flags &= ~flags; 330 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 331 net_disable_timestamp(); 332 } 333} 334 335 336int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 337{ 338 int err; 339 int skb_len; 340 unsigned long flags; 341 struct sk_buff_head *list = &sk->sk_receive_queue; 342 343 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 344 atomic_inc(&sk->sk_drops); 345 trace_sock_rcvqueue_full(sk, skb); 346 return -ENOMEM; 347 } 348 349 err = sk_filter(sk, skb); 350 if (err) 351 return err; 352 353 if (!sk_rmem_schedule(sk, skb->truesize)) { 354 atomic_inc(&sk->sk_drops); 355 return -ENOBUFS; 356 } 357 358 skb->dev = NULL; 359 skb_set_owner_r(skb, sk); 360 361 /* Cache the SKB length before we tack it onto the receive 362 * queue. Once it is added it no longer belongs to us and 363 * may be freed by other threads of control pulling packets 364 * from the queue. 365 */ 366 skb_len = skb->len; 367 368 /* we escape from rcu protected region, make sure we dont leak 369 * a norefcounted dst 370 */ 371 skb_dst_force(skb); 372 373 spin_lock_irqsave(&list->lock, flags); 374 skb->dropcount = atomic_read(&sk->sk_drops); 375 __skb_queue_tail(list, skb); 376 spin_unlock_irqrestore(&list->lock, flags); 377 378 if (!sock_flag(sk, SOCK_DEAD)) 379 sk->sk_data_ready(sk, skb_len); 380 return 0; 381} 382EXPORT_SYMBOL(sock_queue_rcv_skb); 383 384int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 385{ 386 int rc = NET_RX_SUCCESS; 387 388 if (sk_filter(sk, skb)) 389 goto discard_and_relse; 390 391 skb->dev = NULL; 392 393 if (sk_rcvqueues_full(sk, skb)) { 394 atomic_inc(&sk->sk_drops); 395 goto discard_and_relse; 396 } 397 if (nested) 398 bh_lock_sock_nested(sk); 399 else 400 bh_lock_sock(sk); 401 if (!sock_owned_by_user(sk)) { 402 /* 403 * trylock + unlock semantics: 404 */ 405 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 406 407 rc = sk_backlog_rcv(sk, skb); 408 409 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 410 } else if (sk_add_backlog(sk, skb)) { 411 bh_unlock_sock(sk); 412 atomic_inc(&sk->sk_drops); 413 goto discard_and_relse; 414 } 415 416 bh_unlock_sock(sk); 417out: 418 sock_put(sk); 419 return rc; 420discard_and_relse: 421 kfree_skb(skb); 422 goto out; 423} 424EXPORT_SYMBOL(sk_receive_skb); 425 426void sk_reset_txq(struct sock *sk) 427{ 428 sk_tx_queue_clear(sk); 429} 430EXPORT_SYMBOL(sk_reset_txq); 431 432struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 433{ 434 struct dst_entry *dst = __sk_dst_get(sk); 435 436 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 437 sk_tx_queue_clear(sk); 438 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 439 dst_release(dst); 440 return NULL; 441 } 442 443 return dst; 444} 445EXPORT_SYMBOL(__sk_dst_check); 446 447struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 448{ 449 struct dst_entry *dst = sk_dst_get(sk); 450 451 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 452 sk_dst_reset(sk); 453 dst_release(dst); 454 return NULL; 455 } 456 457 return dst; 458} 459EXPORT_SYMBOL(sk_dst_check); 460 461static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 462{ 463 int ret = -ENOPROTOOPT; 464#ifdef CONFIG_NETDEVICES 465 struct net *net = sock_net(sk); 466 char devname[IFNAMSIZ]; 467 int index; 468 469 /* Sorry... */ 470 ret = -EPERM; 471 if (!capable(CAP_NET_RAW)) 472 goto out; 473 474 ret = -EINVAL; 475 if (optlen < 0) 476 goto out; 477 478 /* Bind this socket to a particular device like "eth0", 479 * as specified in the passed interface name. If the 480 * name is "" or the option length is zero the socket 481 * is not bound. 482 */ 483 if (optlen > IFNAMSIZ - 1) 484 optlen = IFNAMSIZ - 1; 485 memset(devname, 0, sizeof(devname)); 486 487 ret = -EFAULT; 488 if (copy_from_user(devname, optval, optlen)) 489 goto out; 490 491 index = 0; 492 if (devname[0] != '\0') { 493 struct net_device *dev; 494 495 rcu_read_lock(); 496 dev = dev_get_by_name_rcu(net, devname); 497 if (dev) 498 index = dev->ifindex; 499 rcu_read_unlock(); 500 ret = -ENODEV; 501 if (!dev) 502 goto out; 503 } 504 505 lock_sock(sk); 506 sk->sk_bound_dev_if = index; 507 sk_dst_reset(sk); 508 release_sock(sk); 509 510 ret = 0; 511 512out: 513#endif 514 515 return ret; 516} 517 518static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 519{ 520 if (valbool) 521 sock_set_flag(sk, bit); 522 else 523 sock_reset_flag(sk, bit); 524} 525 526/* 527 * This is meant for all protocols to use and covers goings on 528 * at the socket level. Everything here is generic. 529 */ 530 531int sock_setsockopt(struct socket *sock, int level, int optname, 532 char __user *optval, unsigned int optlen) 533{ 534 struct sock *sk = sock->sk; 535 int val; 536 int valbool; 537 struct linger ling; 538 int ret = 0; 539 540 /* 541 * Options without arguments 542 */ 543 544 if (optname == SO_BINDTODEVICE) 545 return sock_bindtodevice(sk, optval, optlen); 546 547 if (optlen < sizeof(int)) 548 return -EINVAL; 549 550 if (get_user(val, (int __user *)optval)) 551 return -EFAULT; 552 553 valbool = val ? 1 : 0; 554 555 lock_sock(sk); 556 557 switch (optname) { 558 case SO_DEBUG: 559 if (val && !capable(CAP_NET_ADMIN)) 560 ret = -EACCES; 561 else 562 sock_valbool_flag(sk, SOCK_DBG, valbool); 563 break; 564 case SO_REUSEADDR: 565 sk->sk_reuse = valbool; 566 break; 567 case SO_TYPE: 568 case SO_PROTOCOL: 569 case SO_DOMAIN: 570 case SO_ERROR: 571 ret = -ENOPROTOOPT; 572 break; 573 case SO_DONTROUTE: 574 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 575 break; 576 case SO_BROADCAST: 577 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 578 break; 579 case SO_SNDBUF: 580 /* Don't error on this BSD doesn't and if you think 581 about it this is right. Otherwise apps have to 582 play 'guess the biggest size' games. RCVBUF/SNDBUF 583 are treated in BSD as hints */ 584 585 if (val > sysctl_wmem_max) 586 val = sysctl_wmem_max; 587set_sndbuf: 588 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 589 if ((val * 2) < SOCK_MIN_SNDBUF) 590 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 591 else 592 sk->sk_sndbuf = val * 2; 593 594 /* 595 * Wake up sending tasks if we 596 * upped the value. 597 */ 598 sk->sk_write_space(sk); 599 break; 600 601 case SO_SNDBUFFORCE: 602 if (!capable(CAP_NET_ADMIN)) { 603 ret = -EPERM; 604 break; 605 } 606 goto set_sndbuf; 607 608 case SO_RCVBUF: 609 /* Don't error on this BSD doesn't and if you think 610 about it this is right. Otherwise apps have to 611 play 'guess the biggest size' games. RCVBUF/SNDBUF 612 are treated in BSD as hints */ 613 614 if (val > sysctl_rmem_max) 615 val = sysctl_rmem_max; 616set_rcvbuf: 617 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 618 /* 619 * We double it on the way in to account for 620 * "struct sk_buff" etc. overhead. Applications 621 * assume that the SO_RCVBUF setting they make will 622 * allow that much actual data to be received on that 623 * socket. 624 * 625 * Applications are unaware that "struct sk_buff" and 626 * other overheads allocate from the receive buffer 627 * during socket buffer allocation. 628 * 629 * And after considering the possible alternatives, 630 * returning the value we actually used in getsockopt 631 * is the most desirable behavior. 632 */ 633 if ((val * 2) < SOCK_MIN_RCVBUF) 634 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 635 else 636 sk->sk_rcvbuf = val * 2; 637 break; 638 639 case SO_RCVBUFFORCE: 640 if (!capable(CAP_NET_ADMIN)) { 641 ret = -EPERM; 642 break; 643 } 644 goto set_rcvbuf; 645 646 case SO_KEEPALIVE: 647#ifdef CONFIG_INET 648 if (sk->sk_protocol == IPPROTO_TCP) 649 tcp_set_keepalive(sk, valbool); 650#endif 651 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 652 break; 653 654 case SO_OOBINLINE: 655 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 656 break; 657 658 case SO_NO_CHECK: 659 sk->sk_no_check = valbool; 660 break; 661 662 case SO_PRIORITY: 663 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 664 sk->sk_priority = val; 665 else 666 ret = -EPERM; 667 break; 668 669 case SO_LINGER: 670 if (optlen < sizeof(ling)) { 671 ret = -EINVAL; /* 1003.1g */ 672 break; 673 } 674 if (copy_from_user(&ling, optval, sizeof(ling))) { 675 ret = -EFAULT; 676 break; 677 } 678 if (!ling.l_onoff) 679 sock_reset_flag(sk, SOCK_LINGER); 680 else { 681#if (BITS_PER_LONG == 32) 682 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 683 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 684 else 685#endif 686 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 687 sock_set_flag(sk, SOCK_LINGER); 688 } 689 break; 690 691 case SO_BSDCOMPAT: 692 sock_warn_obsolete_bsdism("setsockopt"); 693 break; 694 695 case SO_PASSCRED: 696 if (valbool) 697 set_bit(SOCK_PASSCRED, &sock->flags); 698 else 699 clear_bit(SOCK_PASSCRED, &sock->flags); 700 break; 701 702 case SO_TIMESTAMP: 703 case SO_TIMESTAMPNS: 704 if (valbool) { 705 if (optname == SO_TIMESTAMP) 706 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 707 else 708 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 709 sock_set_flag(sk, SOCK_RCVTSTAMP); 710 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 711 } else { 712 sock_reset_flag(sk, SOCK_RCVTSTAMP); 713 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 714 } 715 break; 716 717 case SO_TIMESTAMPING: 718 if (val & ~SOF_TIMESTAMPING_MASK) { 719 ret = -EINVAL; 720 break; 721 } 722 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 723 val & SOF_TIMESTAMPING_TX_HARDWARE); 724 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 725 val & SOF_TIMESTAMPING_TX_SOFTWARE); 726 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 727 val & SOF_TIMESTAMPING_RX_HARDWARE); 728 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 729 sock_enable_timestamp(sk, 730 SOCK_TIMESTAMPING_RX_SOFTWARE); 731 else 732 sock_disable_timestamp(sk, 733 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 734 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 735 val & SOF_TIMESTAMPING_SOFTWARE); 736 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 737 val & SOF_TIMESTAMPING_SYS_HARDWARE); 738 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 739 val & SOF_TIMESTAMPING_RAW_HARDWARE); 740 break; 741 742 case SO_RCVLOWAT: 743 if (val < 0) 744 val = INT_MAX; 745 sk->sk_rcvlowat = val ? : 1; 746 break; 747 748 case SO_RCVTIMEO: 749 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 750 break; 751 752 case SO_SNDTIMEO: 753 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 754 break; 755 756 case SO_ATTACH_FILTER: 757 ret = -EINVAL; 758 if (optlen == sizeof(struct sock_fprog)) { 759 struct sock_fprog fprog; 760 761 ret = -EFAULT; 762 if (copy_from_user(&fprog, optval, sizeof(fprog))) 763 break; 764 765 ret = sk_attach_filter(&fprog, sk); 766 } 767 break; 768 769 case SO_DETACH_FILTER: 770 ret = sk_detach_filter(sk); 771 break; 772 773 case SO_PASSSEC: 774 if (valbool) 775 set_bit(SOCK_PASSSEC, &sock->flags); 776 else 777 clear_bit(SOCK_PASSSEC, &sock->flags); 778 break; 779 case SO_MARK: 780 if (!capable(CAP_NET_ADMIN)) 781 ret = -EPERM; 782 else 783 sk->sk_mark = val; 784 break; 785 786 /* We implement the SO_SNDLOWAT etc to 787 not be settable (1003.1g 5.3) */ 788 case SO_RXQ_OVFL: 789 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 790 break; 791 792 case SO_WIFI_STATUS: 793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 794 break; 795 796 case SO_PEEK_OFF: 797 if (sock->ops->set_peek_off) 798 sock->ops->set_peek_off(sk, val); 799 else 800 ret = -EOPNOTSUPP; 801 break; 802 803 case SO_NOFCS: 804 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 805 break; 806 807 default: 808 ret = -ENOPROTOOPT; 809 break; 810 } 811 release_sock(sk); 812 return ret; 813} 814EXPORT_SYMBOL(sock_setsockopt); 815 816 817void cred_to_ucred(struct pid *pid, const struct cred *cred, 818 struct ucred *ucred) 819{ 820 ucred->pid = pid_vnr(pid); 821 ucred->uid = ucred->gid = -1; 822 if (cred) { 823 struct user_namespace *current_ns = current_user_ns(); 824 825 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 826 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 827 } 828} 829EXPORT_SYMBOL_GPL(cred_to_ucred); 830 831int sock_getsockopt(struct socket *sock, int level, int optname, 832 char __user *optval, int __user *optlen) 833{ 834 struct sock *sk = sock->sk; 835 836 union { 837 int val; 838 struct linger ling; 839 struct timeval tm; 840 } v; 841 842 int lv = sizeof(int); 843 int len; 844 845 if (get_user(len, optlen)) 846 return -EFAULT; 847 if (len < 0) 848 return -EINVAL; 849 850 memset(&v, 0, sizeof(v)); 851 852 switch (optname) { 853 case SO_DEBUG: 854 v.val = sock_flag(sk, SOCK_DBG); 855 break; 856 857 case SO_DONTROUTE: 858 v.val = sock_flag(sk, SOCK_LOCALROUTE); 859 break; 860 861 case SO_BROADCAST: 862 v.val = !!sock_flag(sk, SOCK_BROADCAST); 863 break; 864 865 case SO_SNDBUF: 866 v.val = sk->sk_sndbuf; 867 break; 868 869 case SO_RCVBUF: 870 v.val = sk->sk_rcvbuf; 871 break; 872 873 case SO_REUSEADDR: 874 v.val = sk->sk_reuse; 875 break; 876 877 case SO_KEEPALIVE: 878 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 879 break; 880 881 case SO_TYPE: 882 v.val = sk->sk_type; 883 break; 884 885 case SO_PROTOCOL: 886 v.val = sk->sk_protocol; 887 break; 888 889 case SO_DOMAIN: 890 v.val = sk->sk_family; 891 break; 892 893 case SO_ERROR: 894 v.val = -sock_error(sk); 895 if (v.val == 0) 896 v.val = xchg(&sk->sk_err_soft, 0); 897 break; 898 899 case SO_OOBINLINE: 900 v.val = !!sock_flag(sk, SOCK_URGINLINE); 901 break; 902 903 case SO_NO_CHECK: 904 v.val = sk->sk_no_check; 905 break; 906 907 case SO_PRIORITY: 908 v.val = sk->sk_priority; 909 break; 910 911 case SO_LINGER: 912 lv = sizeof(v.ling); 913 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 914 v.ling.l_linger = sk->sk_lingertime / HZ; 915 break; 916 917 case SO_BSDCOMPAT: 918 sock_warn_obsolete_bsdism("getsockopt"); 919 break; 920 921 case SO_TIMESTAMP: 922 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 923 !sock_flag(sk, SOCK_RCVTSTAMPNS); 924 break; 925 926 case SO_TIMESTAMPNS: 927 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 928 break; 929 930 case SO_TIMESTAMPING: 931 v.val = 0; 932 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 933 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 934 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 935 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 936 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 937 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 938 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 939 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 940 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 941 v.val |= SOF_TIMESTAMPING_SOFTWARE; 942 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 943 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 944 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 945 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 946 break; 947 948 case SO_RCVTIMEO: 949 lv = sizeof(struct timeval); 950 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 951 v.tm.tv_sec = 0; 952 v.tm.tv_usec = 0; 953 } else { 954 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 955 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 956 } 957 break; 958 959 case SO_SNDTIMEO: 960 lv = sizeof(struct timeval); 961 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 962 v.tm.tv_sec = 0; 963 v.tm.tv_usec = 0; 964 } else { 965 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 966 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 967 } 968 break; 969 970 case SO_RCVLOWAT: 971 v.val = sk->sk_rcvlowat; 972 break; 973 974 case SO_SNDLOWAT: 975 v.val = 1; 976 break; 977 978 case SO_PASSCRED: 979 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 980 break; 981 982 case SO_PEERCRED: 983 { 984 struct ucred peercred; 985 if (len > sizeof(peercred)) 986 len = sizeof(peercred); 987 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 988 if (copy_to_user(optval, &peercred, len)) 989 return -EFAULT; 990 goto lenout; 991 } 992 993 case SO_PEERNAME: 994 { 995 char address[128]; 996 997 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 998 return -ENOTCONN; 999 if (lv < len) 1000 return -EINVAL; 1001 if (copy_to_user(optval, address, len)) 1002 return -EFAULT; 1003 goto lenout; 1004 } 1005 1006 /* Dubious BSD thing... Probably nobody even uses it, but 1007 * the UNIX standard wants it for whatever reason... -DaveM 1008 */ 1009 case SO_ACCEPTCONN: 1010 v.val = sk->sk_state == TCP_LISTEN; 1011 break; 1012 1013 case SO_PASSSEC: 1014 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 1015 break; 1016 1017 case SO_PEERSEC: 1018 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1019 1020 case SO_MARK: 1021 v.val = sk->sk_mark; 1022 break; 1023 1024 case SO_RXQ_OVFL: 1025 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 1026 break; 1027 1028 case SO_WIFI_STATUS: 1029 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 1030 break; 1031 1032 case SO_PEEK_OFF: 1033 if (!sock->ops->set_peek_off) 1034 return -EOPNOTSUPP; 1035 1036 v.val = sk->sk_peek_off; 1037 break; 1038 default: 1039 return -ENOPROTOOPT; 1040 } 1041 1042 if (len > lv) 1043 len = lv; 1044 if (copy_to_user(optval, &v, len)) 1045 return -EFAULT; 1046lenout: 1047 if (put_user(len, optlen)) 1048 return -EFAULT; 1049 return 0; 1050} 1051 1052/* 1053 * Initialize an sk_lock. 1054 * 1055 * (We also register the sk_lock with the lock validator.) 1056 */ 1057static inline void sock_lock_init(struct sock *sk) 1058{ 1059 sock_lock_init_class_and_name(sk, 1060 af_family_slock_key_strings[sk->sk_family], 1061 af_family_slock_keys + sk->sk_family, 1062 af_family_key_strings[sk->sk_family], 1063 af_family_keys + sk->sk_family); 1064} 1065 1066/* 1067 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1068 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1069 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1070 */ 1071static void sock_copy(struct sock *nsk, const struct sock *osk) 1072{ 1073#ifdef CONFIG_SECURITY_NETWORK 1074 void *sptr = nsk->sk_security; 1075#endif 1076 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1077 1078 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1079 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1080 1081#ifdef CONFIG_SECURITY_NETWORK 1082 nsk->sk_security = sptr; 1083 security_sk_clone(osk, nsk); 1084#endif 1085} 1086 1087/* 1088 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1089 * un-modified. Special care is taken when initializing object to zero. 1090 */ 1091static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1092{ 1093 if (offsetof(struct sock, sk_node.next) != 0) 1094 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1095 memset(&sk->sk_node.pprev, 0, 1096 size - offsetof(struct sock, sk_node.pprev)); 1097} 1098 1099void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1100{ 1101 unsigned long nulls1, nulls2; 1102 1103 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1104 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1105 if (nulls1 > nulls2) 1106 swap(nulls1, nulls2); 1107 1108 if (nulls1 != 0) 1109 memset((char *)sk, 0, nulls1); 1110 memset((char *)sk + nulls1 + sizeof(void *), 0, 1111 nulls2 - nulls1 - sizeof(void *)); 1112 memset((char *)sk + nulls2 + sizeof(void *), 0, 1113 size - nulls2 - sizeof(void *)); 1114} 1115EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1116 1117static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1118 int family) 1119{ 1120 struct sock *sk; 1121 struct kmem_cache *slab; 1122 1123 slab = prot->slab; 1124 if (slab != NULL) { 1125 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1126 if (!sk) 1127 return sk; 1128 if (priority & __GFP_ZERO) { 1129 if (prot->clear_sk) 1130 prot->clear_sk(sk, prot->obj_size); 1131 else 1132 sk_prot_clear_nulls(sk, prot->obj_size); 1133 } 1134 } else 1135 sk = kmalloc(prot->obj_size, priority); 1136 1137 if (sk != NULL) { 1138 kmemcheck_annotate_bitfield(sk, flags); 1139 1140 if (security_sk_alloc(sk, family, priority)) 1141 goto out_free; 1142 1143 if (!try_module_get(prot->owner)) 1144 goto out_free_sec; 1145 sk_tx_queue_clear(sk); 1146 } 1147 1148 return sk; 1149 1150out_free_sec: 1151 security_sk_free(sk); 1152out_free: 1153 if (slab != NULL) 1154 kmem_cache_free(slab, sk); 1155 else 1156 kfree(sk); 1157 return NULL; 1158} 1159 1160static void sk_prot_free(struct proto *prot, struct sock *sk) 1161{ 1162 struct kmem_cache *slab; 1163 struct module *owner; 1164 1165 owner = prot->owner; 1166 slab = prot->slab; 1167 1168 security_sk_free(sk); 1169 if (slab != NULL) 1170 kmem_cache_free(slab, sk); 1171 else 1172 kfree(sk); 1173 module_put(owner); 1174} 1175 1176#ifdef CONFIG_CGROUPS 1177void sock_update_classid(struct sock *sk) 1178{ 1179 u32 classid; 1180 1181 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1182 classid = task_cls_classid(current); 1183 rcu_read_unlock(); 1184 if (classid && classid != sk->sk_classid) 1185 sk->sk_classid = classid; 1186} 1187EXPORT_SYMBOL(sock_update_classid); 1188 1189void sock_update_netprioidx(struct sock *sk) 1190{ 1191 if (in_interrupt()) 1192 return; 1193 1194 sk->sk_cgrp_prioidx = task_netprioidx(current); 1195} 1196EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1197#endif 1198 1199/** 1200 * sk_alloc - All socket objects are allocated here 1201 * @net: the applicable net namespace 1202 * @family: protocol family 1203 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1204 * @prot: struct proto associated with this new sock instance 1205 */ 1206struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1207 struct proto *prot) 1208{ 1209 struct sock *sk; 1210 1211 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1212 if (sk) { 1213 sk->sk_family = family; 1214 /* 1215 * See comment in struct sock definition to understand 1216 * why we need sk_prot_creator -acme 1217 */ 1218 sk->sk_prot = sk->sk_prot_creator = prot; 1219 sock_lock_init(sk); 1220 sock_net_set(sk, get_net(net)); 1221 atomic_set(&sk->sk_wmem_alloc, 1); 1222 1223 sock_update_classid(sk); 1224 sock_update_netprioidx(sk); 1225 } 1226 1227 return sk; 1228} 1229EXPORT_SYMBOL(sk_alloc); 1230 1231static void __sk_free(struct sock *sk) 1232{ 1233 struct sk_filter *filter; 1234 1235 if (sk->sk_destruct) 1236 sk->sk_destruct(sk); 1237 1238 filter = rcu_dereference_check(sk->sk_filter, 1239 atomic_read(&sk->sk_wmem_alloc) == 0); 1240 if (filter) { 1241 sk_filter_uncharge(sk, filter); 1242 RCU_INIT_POINTER(sk->sk_filter, NULL); 1243 } 1244 1245 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1246 1247 if (atomic_read(&sk->sk_omem_alloc)) 1248 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1249 __func__, atomic_read(&sk->sk_omem_alloc)); 1250 1251 if (sk->sk_peer_cred) 1252 put_cred(sk->sk_peer_cred); 1253 put_pid(sk->sk_peer_pid); 1254 put_net(sock_net(sk)); 1255 sk_prot_free(sk->sk_prot_creator, sk); 1256} 1257 1258void sk_free(struct sock *sk) 1259{ 1260 /* 1261 * We subtract one from sk_wmem_alloc and can know if 1262 * some packets are still in some tx queue. 1263 * If not null, sock_wfree() will call __sk_free(sk) later 1264 */ 1265 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1266 __sk_free(sk); 1267} 1268EXPORT_SYMBOL(sk_free); 1269 1270/* 1271 * Last sock_put should drop reference to sk->sk_net. It has already 1272 * been dropped in sk_change_net. Taking reference to stopping namespace 1273 * is not an option. 1274 * Take reference to a socket to remove it from hash _alive_ and after that 1275 * destroy it in the context of init_net. 1276 */ 1277void sk_release_kernel(struct sock *sk) 1278{ 1279 if (sk == NULL || sk->sk_socket == NULL) 1280 return; 1281 1282 sock_hold(sk); 1283 sock_release(sk->sk_socket); 1284 release_net(sock_net(sk)); 1285 sock_net_set(sk, get_net(&init_net)); 1286 sock_put(sk); 1287} 1288EXPORT_SYMBOL(sk_release_kernel); 1289 1290static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1291{ 1292 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1293 sock_update_memcg(newsk); 1294} 1295 1296/** 1297 * sk_clone_lock - clone a socket, and lock its clone 1298 * @sk: the socket to clone 1299 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1300 * 1301 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1302 */ 1303struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1304{ 1305 struct sock *newsk; 1306 1307 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1308 if (newsk != NULL) { 1309 struct sk_filter *filter; 1310 1311 sock_copy(newsk, sk); 1312 1313 /* SANITY */ 1314 get_net(sock_net(newsk)); 1315 sk_node_init(&newsk->sk_node); 1316 sock_lock_init(newsk); 1317 bh_lock_sock(newsk); 1318 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1319 newsk->sk_backlog.len = 0; 1320 1321 atomic_set(&newsk->sk_rmem_alloc, 0); 1322 /* 1323 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1324 */ 1325 atomic_set(&newsk->sk_wmem_alloc, 1); 1326 atomic_set(&newsk->sk_omem_alloc, 0); 1327 skb_queue_head_init(&newsk->sk_receive_queue); 1328 skb_queue_head_init(&newsk->sk_write_queue); 1329#ifdef CONFIG_NET_DMA 1330 skb_queue_head_init(&newsk->sk_async_wait_queue); 1331#endif 1332 1333 spin_lock_init(&newsk->sk_dst_lock); 1334 rwlock_init(&newsk->sk_callback_lock); 1335 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1336 af_callback_keys + newsk->sk_family, 1337 af_family_clock_key_strings[newsk->sk_family]); 1338 1339 newsk->sk_dst_cache = NULL; 1340 newsk->sk_wmem_queued = 0; 1341 newsk->sk_forward_alloc = 0; 1342 newsk->sk_send_head = NULL; 1343 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1344 1345 sock_reset_flag(newsk, SOCK_DONE); 1346 skb_queue_head_init(&newsk->sk_error_queue); 1347 1348 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1349 if (filter != NULL) 1350 sk_filter_charge(newsk, filter); 1351 1352 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1353 /* It is still raw copy of parent, so invalidate 1354 * destructor and make plain sk_free() */ 1355 newsk->sk_destruct = NULL; 1356 bh_unlock_sock(newsk); 1357 sk_free(newsk); 1358 newsk = NULL; 1359 goto out; 1360 } 1361 1362 newsk->sk_err = 0; 1363 newsk->sk_priority = 0; 1364 /* 1365 * Before updating sk_refcnt, we must commit prior changes to memory 1366 * (Documentation/RCU/rculist_nulls.txt for details) 1367 */ 1368 smp_wmb(); 1369 atomic_set(&newsk->sk_refcnt, 2); 1370 1371 /* 1372 * Increment the counter in the same struct proto as the master 1373 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1374 * is the same as sk->sk_prot->socks, as this field was copied 1375 * with memcpy). 1376 * 1377 * This _changes_ the previous behaviour, where 1378 * tcp_create_openreq_child always was incrementing the 1379 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1380 * to be taken into account in all callers. -acme 1381 */ 1382 sk_refcnt_debug_inc(newsk); 1383 sk_set_socket(newsk, NULL); 1384 newsk->sk_wq = NULL; 1385 1386 sk_update_clone(sk, newsk); 1387 1388 if (newsk->sk_prot->sockets_allocated) 1389 sk_sockets_allocated_inc(newsk); 1390 1391 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1392 net_enable_timestamp(); 1393 } 1394out: 1395 return newsk; 1396} 1397EXPORT_SYMBOL_GPL(sk_clone_lock); 1398 1399void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1400{ 1401 __sk_dst_set(sk, dst); 1402 sk->sk_route_caps = dst->dev->features; 1403 if (sk->sk_route_caps & NETIF_F_GSO) 1404 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1405 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1406 if (sk_can_gso(sk)) { 1407 if (dst->header_len) { 1408 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1409 } else { 1410 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1411 sk->sk_gso_max_size = dst->dev->gso_max_size; 1412 } 1413 } 1414} 1415EXPORT_SYMBOL_GPL(sk_setup_caps); 1416 1417void __init sk_init(void) 1418{ 1419 if (totalram_pages <= 4096) { 1420 sysctl_wmem_max = 32767; 1421 sysctl_rmem_max = 32767; 1422 sysctl_wmem_default = 32767; 1423 sysctl_rmem_default = 32767; 1424 } else if (totalram_pages >= 131072) { 1425 sysctl_wmem_max = 131071; 1426 sysctl_rmem_max = 131071; 1427 } 1428} 1429 1430/* 1431 * Simple resource managers for sockets. 1432 */ 1433 1434 1435/* 1436 * Write buffer destructor automatically called from kfree_skb. 1437 */ 1438void sock_wfree(struct sk_buff *skb) 1439{ 1440 struct sock *sk = skb->sk; 1441 unsigned int len = skb->truesize; 1442 1443 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1444 /* 1445 * Keep a reference on sk_wmem_alloc, this will be released 1446 * after sk_write_space() call 1447 */ 1448 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1449 sk->sk_write_space(sk); 1450 len = 1; 1451 } 1452 /* 1453 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1454 * could not do because of in-flight packets 1455 */ 1456 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1457 __sk_free(sk); 1458} 1459EXPORT_SYMBOL(sock_wfree); 1460 1461/* 1462 * Read buffer destructor automatically called from kfree_skb. 1463 */ 1464void sock_rfree(struct sk_buff *skb) 1465{ 1466 struct sock *sk = skb->sk; 1467 unsigned int len = skb->truesize; 1468 1469 atomic_sub(len, &sk->sk_rmem_alloc); 1470 sk_mem_uncharge(sk, len); 1471} 1472EXPORT_SYMBOL(sock_rfree); 1473 1474 1475int sock_i_uid(struct sock *sk) 1476{ 1477 int uid; 1478 1479 read_lock_bh(&sk->sk_callback_lock); 1480 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1481 read_unlock_bh(&sk->sk_callback_lock); 1482 return uid; 1483} 1484EXPORT_SYMBOL(sock_i_uid); 1485 1486unsigned long sock_i_ino(struct sock *sk) 1487{ 1488 unsigned long ino; 1489 1490 read_lock_bh(&sk->sk_callback_lock); 1491 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1492 read_unlock_bh(&sk->sk_callback_lock); 1493 return ino; 1494} 1495EXPORT_SYMBOL(sock_i_ino); 1496 1497/* 1498 * Allocate a skb from the socket's send buffer. 1499 */ 1500struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1501 gfp_t priority) 1502{ 1503 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1504 struct sk_buff *skb = alloc_skb(size, priority); 1505 if (skb) { 1506 skb_set_owner_w(skb, sk); 1507 return skb; 1508 } 1509 } 1510 return NULL; 1511} 1512EXPORT_SYMBOL(sock_wmalloc); 1513 1514/* 1515 * Allocate a skb from the socket's receive buffer. 1516 */ 1517struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1518 gfp_t priority) 1519{ 1520 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1521 struct sk_buff *skb = alloc_skb(size, priority); 1522 if (skb) { 1523 skb_set_owner_r(skb, sk); 1524 return skb; 1525 } 1526 } 1527 return NULL; 1528} 1529 1530/* 1531 * Allocate a memory block from the socket's option memory buffer. 1532 */ 1533void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1534{ 1535 if ((unsigned)size <= sysctl_optmem_max && 1536 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1537 void *mem; 1538 /* First do the add, to avoid the race if kmalloc 1539 * might sleep. 1540 */ 1541 atomic_add(size, &sk->sk_omem_alloc); 1542 mem = kmalloc(size, priority); 1543 if (mem) 1544 return mem; 1545 atomic_sub(size, &sk->sk_omem_alloc); 1546 } 1547 return NULL; 1548} 1549EXPORT_SYMBOL(sock_kmalloc); 1550 1551/* 1552 * Free an option memory block. 1553 */ 1554void sock_kfree_s(struct sock *sk, void *mem, int size) 1555{ 1556 kfree(mem); 1557 atomic_sub(size, &sk->sk_omem_alloc); 1558} 1559EXPORT_SYMBOL(sock_kfree_s); 1560 1561/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1562 I think, these locks should be removed for datagram sockets. 1563 */ 1564static long sock_wait_for_wmem(struct sock *sk, long timeo) 1565{ 1566 DEFINE_WAIT(wait); 1567 1568 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1569 for (;;) { 1570 if (!timeo) 1571 break; 1572 if (signal_pending(current)) 1573 break; 1574 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1575 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1576 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1577 break; 1578 if (sk->sk_shutdown & SEND_SHUTDOWN) 1579 break; 1580 if (sk->sk_err) 1581 break; 1582 timeo = schedule_timeout(timeo); 1583 } 1584 finish_wait(sk_sleep(sk), &wait); 1585 return timeo; 1586} 1587 1588 1589/* 1590 * Generic send/receive buffer handlers 1591 */ 1592 1593struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1594 unsigned long data_len, int noblock, 1595 int *errcode) 1596{ 1597 struct sk_buff *skb; 1598 gfp_t gfp_mask; 1599 long timeo; 1600 int err; 1601 1602 gfp_mask = sk->sk_allocation; 1603 if (gfp_mask & __GFP_WAIT) 1604 gfp_mask |= __GFP_REPEAT; 1605 1606 timeo = sock_sndtimeo(sk, noblock); 1607 while (1) { 1608 err = sock_error(sk); 1609 if (err != 0) 1610 goto failure; 1611 1612 err = -EPIPE; 1613 if (sk->sk_shutdown & SEND_SHUTDOWN) 1614 goto failure; 1615 1616 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1617 skb = alloc_skb(header_len, gfp_mask); 1618 if (skb) { 1619 int npages; 1620 int i; 1621 1622 /* No pages, we're done... */ 1623 if (!data_len) 1624 break; 1625 1626 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1627 skb->truesize += data_len; 1628 skb_shinfo(skb)->nr_frags = npages; 1629 for (i = 0; i < npages; i++) { 1630 struct page *page; 1631 1632 page = alloc_pages(sk->sk_allocation, 0); 1633 if (!page) { 1634 err = -ENOBUFS; 1635 skb_shinfo(skb)->nr_frags = i; 1636 kfree_skb(skb); 1637 goto failure; 1638 } 1639 1640 __skb_fill_page_desc(skb, i, 1641 page, 0, 1642 (data_len >= PAGE_SIZE ? 1643 PAGE_SIZE : 1644 data_len)); 1645 data_len -= PAGE_SIZE; 1646 } 1647 1648 /* Full success... */ 1649 break; 1650 } 1651 err = -ENOBUFS; 1652 goto failure; 1653 } 1654 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1655 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1656 err = -EAGAIN; 1657 if (!timeo) 1658 goto failure; 1659 if (signal_pending(current)) 1660 goto interrupted; 1661 timeo = sock_wait_for_wmem(sk, timeo); 1662 } 1663 1664 skb_set_owner_w(skb, sk); 1665 return skb; 1666 1667interrupted: 1668 err = sock_intr_errno(timeo); 1669failure: 1670 *errcode = err; 1671 return NULL; 1672} 1673EXPORT_SYMBOL(sock_alloc_send_pskb); 1674 1675struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1676 int noblock, int *errcode) 1677{ 1678 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1679} 1680EXPORT_SYMBOL(sock_alloc_send_skb); 1681 1682static void __lock_sock(struct sock *sk) 1683 __releases(&sk->sk_lock.slock) 1684 __acquires(&sk->sk_lock.slock) 1685{ 1686 DEFINE_WAIT(wait); 1687 1688 for (;;) { 1689 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1690 TASK_UNINTERRUPTIBLE); 1691 spin_unlock_bh(&sk->sk_lock.slock); 1692 schedule(); 1693 spin_lock_bh(&sk->sk_lock.slock); 1694 if (!sock_owned_by_user(sk)) 1695 break; 1696 } 1697 finish_wait(&sk->sk_lock.wq, &wait); 1698} 1699 1700static void __release_sock(struct sock *sk) 1701 __releases(&sk->sk_lock.slock) 1702 __acquires(&sk->sk_lock.slock) 1703{ 1704 struct sk_buff *skb = sk->sk_backlog.head; 1705 1706 do { 1707 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1708 bh_unlock_sock(sk); 1709 1710 do { 1711 struct sk_buff *next = skb->next; 1712 1713 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1714 skb->next = NULL; 1715 sk_backlog_rcv(sk, skb); 1716 1717 /* 1718 * We are in process context here with softirqs 1719 * disabled, use cond_resched_softirq() to preempt. 1720 * This is safe to do because we've taken the backlog 1721 * queue private: 1722 */ 1723 cond_resched_softirq(); 1724 1725 skb = next; 1726 } while (skb != NULL); 1727 1728 bh_lock_sock(sk); 1729 } while ((skb = sk->sk_backlog.head) != NULL); 1730 1731 /* 1732 * Doing the zeroing here guarantee we can not loop forever 1733 * while a wild producer attempts to flood us. 1734 */ 1735 sk->sk_backlog.len = 0; 1736} 1737 1738/** 1739 * sk_wait_data - wait for data to arrive at sk_receive_queue 1740 * @sk: sock to wait on 1741 * @timeo: for how long 1742 * 1743 * Now socket state including sk->sk_err is changed only under lock, 1744 * hence we may omit checks after joining wait queue. 1745 * We check receive queue before schedule() only as optimization; 1746 * it is very likely that release_sock() added new data. 1747 */ 1748int sk_wait_data(struct sock *sk, long *timeo) 1749{ 1750 int rc; 1751 DEFINE_WAIT(wait); 1752 1753 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1754 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1755 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1756 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1757 finish_wait(sk_sleep(sk), &wait); 1758 return rc; 1759} 1760EXPORT_SYMBOL(sk_wait_data); 1761 1762/** 1763 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1764 * @sk: socket 1765 * @size: memory size to allocate 1766 * @kind: allocation type 1767 * 1768 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1769 * rmem allocation. This function assumes that protocols which have 1770 * memory_pressure use sk_wmem_queued as write buffer accounting. 1771 */ 1772int __sk_mem_schedule(struct sock *sk, int size, int kind) 1773{ 1774 struct proto *prot = sk->sk_prot; 1775 int amt = sk_mem_pages(size); 1776 long allocated; 1777 int parent_status = UNDER_LIMIT; 1778 1779 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1780 1781 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1782 1783 /* Under limit. */ 1784 if (parent_status == UNDER_LIMIT && 1785 allocated <= sk_prot_mem_limits(sk, 0)) { 1786 sk_leave_memory_pressure(sk); 1787 return 1; 1788 } 1789 1790 /* Under pressure. (we or our parents) */ 1791 if ((parent_status > SOFT_LIMIT) || 1792 allocated > sk_prot_mem_limits(sk, 1)) 1793 sk_enter_memory_pressure(sk); 1794 1795 /* Over hard limit (we or our parents) */ 1796 if ((parent_status == OVER_LIMIT) || 1797 (allocated > sk_prot_mem_limits(sk, 2))) 1798 goto suppress_allocation; 1799 1800 /* guarantee minimum buffer size under pressure */ 1801 if (kind == SK_MEM_RECV) { 1802 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1803 return 1; 1804 1805 } else { /* SK_MEM_SEND */ 1806 if (sk->sk_type == SOCK_STREAM) { 1807 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1808 return 1; 1809 } else if (atomic_read(&sk->sk_wmem_alloc) < 1810 prot->sysctl_wmem[0]) 1811 return 1; 1812 } 1813 1814 if (sk_has_memory_pressure(sk)) { 1815 int alloc; 1816 1817 if (!sk_under_memory_pressure(sk)) 1818 return 1; 1819 alloc = sk_sockets_allocated_read_positive(sk); 1820 if (sk_prot_mem_limits(sk, 2) > alloc * 1821 sk_mem_pages(sk->sk_wmem_queued + 1822 atomic_read(&sk->sk_rmem_alloc) + 1823 sk->sk_forward_alloc)) 1824 return 1; 1825 } 1826 1827suppress_allocation: 1828 1829 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1830 sk_stream_moderate_sndbuf(sk); 1831 1832 /* Fail only if socket is _under_ its sndbuf. 1833 * In this case we cannot block, so that we have to fail. 1834 */ 1835 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1836 return 1; 1837 } 1838 1839 trace_sock_exceed_buf_limit(sk, prot, allocated); 1840 1841 /* Alas. Undo changes. */ 1842 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1843 1844 sk_memory_allocated_sub(sk, amt); 1845 1846 return 0; 1847} 1848EXPORT_SYMBOL(__sk_mem_schedule); 1849 1850/** 1851 * __sk_reclaim - reclaim memory_allocated 1852 * @sk: socket 1853 */ 1854void __sk_mem_reclaim(struct sock *sk) 1855{ 1856 sk_memory_allocated_sub(sk, 1857 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 1858 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1859 1860 if (sk_under_memory_pressure(sk) && 1861 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 1862 sk_leave_memory_pressure(sk); 1863} 1864EXPORT_SYMBOL(__sk_mem_reclaim); 1865 1866 1867/* 1868 * Set of default routines for initialising struct proto_ops when 1869 * the protocol does not support a particular function. In certain 1870 * cases where it makes no sense for a protocol to have a "do nothing" 1871 * function, some default processing is provided. 1872 */ 1873 1874int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1875{ 1876 return -EOPNOTSUPP; 1877} 1878EXPORT_SYMBOL(sock_no_bind); 1879 1880int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1881 int len, int flags) 1882{ 1883 return -EOPNOTSUPP; 1884} 1885EXPORT_SYMBOL(sock_no_connect); 1886 1887int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1888{ 1889 return -EOPNOTSUPP; 1890} 1891EXPORT_SYMBOL(sock_no_socketpair); 1892 1893int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1894{ 1895 return -EOPNOTSUPP; 1896} 1897EXPORT_SYMBOL(sock_no_accept); 1898 1899int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1900 int *len, int peer) 1901{ 1902 return -EOPNOTSUPP; 1903} 1904EXPORT_SYMBOL(sock_no_getname); 1905 1906unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1907{ 1908 return 0; 1909} 1910EXPORT_SYMBOL(sock_no_poll); 1911 1912int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1913{ 1914 return -EOPNOTSUPP; 1915} 1916EXPORT_SYMBOL(sock_no_ioctl); 1917 1918int sock_no_listen(struct socket *sock, int backlog) 1919{ 1920 return -EOPNOTSUPP; 1921} 1922EXPORT_SYMBOL(sock_no_listen); 1923 1924int sock_no_shutdown(struct socket *sock, int how) 1925{ 1926 return -EOPNOTSUPP; 1927} 1928EXPORT_SYMBOL(sock_no_shutdown); 1929 1930int sock_no_setsockopt(struct socket *sock, int level, int optname, 1931 char __user *optval, unsigned int optlen) 1932{ 1933 return -EOPNOTSUPP; 1934} 1935EXPORT_SYMBOL(sock_no_setsockopt); 1936 1937int sock_no_getsockopt(struct socket *sock, int level, int optname, 1938 char __user *optval, int __user *optlen) 1939{ 1940 return -EOPNOTSUPP; 1941} 1942EXPORT_SYMBOL(sock_no_getsockopt); 1943 1944int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1945 size_t len) 1946{ 1947 return -EOPNOTSUPP; 1948} 1949EXPORT_SYMBOL(sock_no_sendmsg); 1950 1951int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1952 size_t len, int flags) 1953{ 1954 return -EOPNOTSUPP; 1955} 1956EXPORT_SYMBOL(sock_no_recvmsg); 1957 1958int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1959{ 1960 /* Mirror missing mmap method error code */ 1961 return -ENODEV; 1962} 1963EXPORT_SYMBOL(sock_no_mmap); 1964 1965ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1966{ 1967 ssize_t res; 1968 struct msghdr msg = {.msg_flags = flags}; 1969 struct kvec iov; 1970 char *kaddr = kmap(page); 1971 iov.iov_base = kaddr + offset; 1972 iov.iov_len = size; 1973 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1974 kunmap(page); 1975 return res; 1976} 1977EXPORT_SYMBOL(sock_no_sendpage); 1978 1979/* 1980 * Default Socket Callbacks 1981 */ 1982 1983static void sock_def_wakeup(struct sock *sk) 1984{ 1985 struct socket_wq *wq; 1986 1987 rcu_read_lock(); 1988 wq = rcu_dereference(sk->sk_wq); 1989 if (wq_has_sleeper(wq)) 1990 wake_up_interruptible_all(&wq->wait); 1991 rcu_read_unlock(); 1992} 1993 1994static void sock_def_error_report(struct sock *sk) 1995{ 1996 struct socket_wq *wq; 1997 1998 rcu_read_lock(); 1999 wq = rcu_dereference(sk->sk_wq); 2000 if (wq_has_sleeper(wq)) 2001 wake_up_interruptible_poll(&wq->wait, POLLERR); 2002 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2003 rcu_read_unlock(); 2004} 2005 2006static void sock_def_readable(struct sock *sk, int len) 2007{ 2008 struct socket_wq *wq; 2009 2010 rcu_read_lock(); 2011 wq = rcu_dereference(sk->sk_wq); 2012 if (wq_has_sleeper(wq)) 2013 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2014 POLLRDNORM | POLLRDBAND); 2015 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2016 rcu_read_unlock(); 2017} 2018 2019static void sock_def_write_space(struct sock *sk) 2020{ 2021 struct socket_wq *wq; 2022 2023 rcu_read_lock(); 2024 2025 /* Do not wake up a writer until he can make "significant" 2026 * progress. --DaveM 2027 */ 2028 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2029 wq = rcu_dereference(sk->sk_wq); 2030 if (wq_has_sleeper(wq)) 2031 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2032 POLLWRNORM | POLLWRBAND); 2033 2034 /* Should agree with poll, otherwise some programs break */ 2035 if (sock_writeable(sk)) 2036 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2037 } 2038 2039 rcu_read_unlock(); 2040} 2041 2042static void sock_def_destruct(struct sock *sk) 2043{ 2044 kfree(sk->sk_protinfo); 2045} 2046 2047void sk_send_sigurg(struct sock *sk) 2048{ 2049 if (sk->sk_socket && sk->sk_socket->file) 2050 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2051 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2052} 2053EXPORT_SYMBOL(sk_send_sigurg); 2054 2055void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2056 unsigned long expires) 2057{ 2058 if (!mod_timer(timer, expires)) 2059 sock_hold(sk); 2060} 2061EXPORT_SYMBOL(sk_reset_timer); 2062 2063void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2064{ 2065 if (timer_pending(timer) && del_timer(timer)) 2066 __sock_put(sk); 2067} 2068EXPORT_SYMBOL(sk_stop_timer); 2069 2070void sock_init_data(struct socket *sock, struct sock *sk) 2071{ 2072 skb_queue_head_init(&sk->sk_receive_queue); 2073 skb_queue_head_init(&sk->sk_write_queue); 2074 skb_queue_head_init(&sk->sk_error_queue); 2075#ifdef CONFIG_NET_DMA 2076 skb_queue_head_init(&sk->sk_async_wait_queue); 2077#endif 2078 2079 sk->sk_send_head = NULL; 2080 2081 init_timer(&sk->sk_timer); 2082 2083 sk->sk_allocation = GFP_KERNEL; 2084 sk->sk_rcvbuf = sysctl_rmem_default; 2085 sk->sk_sndbuf = sysctl_wmem_default; 2086 sk->sk_state = TCP_CLOSE; 2087 sk_set_socket(sk, sock); 2088 2089 sock_set_flag(sk, SOCK_ZAPPED); 2090 2091 if (sock) { 2092 sk->sk_type = sock->type; 2093 sk->sk_wq = sock->wq; 2094 sock->sk = sk; 2095 } else 2096 sk->sk_wq = NULL; 2097 2098 spin_lock_init(&sk->sk_dst_lock); 2099 rwlock_init(&sk->sk_callback_lock); 2100 lockdep_set_class_and_name(&sk->sk_callback_lock, 2101 af_callback_keys + sk->sk_family, 2102 af_family_clock_key_strings[sk->sk_family]); 2103 2104 sk->sk_state_change = sock_def_wakeup; 2105 sk->sk_data_ready = sock_def_readable; 2106 sk->sk_write_space = sock_def_write_space; 2107 sk->sk_error_report = sock_def_error_report; 2108 sk->sk_destruct = sock_def_destruct; 2109 2110 sk->sk_sndmsg_page = NULL; 2111 sk->sk_sndmsg_off = 0; 2112 sk->sk_peek_off = -1; 2113 2114 sk->sk_peer_pid = NULL; 2115 sk->sk_peer_cred = NULL; 2116 sk->sk_write_pending = 0; 2117 sk->sk_rcvlowat = 1; 2118 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2119 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2120 2121 sk->sk_stamp = ktime_set(-1L, 0); 2122 2123 /* 2124 * Before updating sk_refcnt, we must commit prior changes to memory 2125 * (Documentation/RCU/rculist_nulls.txt for details) 2126 */ 2127 smp_wmb(); 2128 atomic_set(&sk->sk_refcnt, 1); 2129 atomic_set(&sk->sk_drops, 0); 2130} 2131EXPORT_SYMBOL(sock_init_data); 2132 2133void lock_sock_nested(struct sock *sk, int subclass) 2134{ 2135 might_sleep(); 2136 spin_lock_bh(&sk->sk_lock.slock); 2137 if (sk->sk_lock.owned) 2138 __lock_sock(sk); 2139 sk->sk_lock.owned = 1; 2140 spin_unlock(&sk->sk_lock.slock); 2141 /* 2142 * The sk_lock has mutex_lock() semantics here: 2143 */ 2144 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2145 local_bh_enable(); 2146} 2147EXPORT_SYMBOL(lock_sock_nested); 2148 2149void release_sock(struct sock *sk) 2150{ 2151 /* 2152 * The sk_lock has mutex_unlock() semantics: 2153 */ 2154 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2155 2156 spin_lock_bh(&sk->sk_lock.slock); 2157 if (sk->sk_backlog.tail) 2158 __release_sock(sk); 2159 sk->sk_lock.owned = 0; 2160 if (waitqueue_active(&sk->sk_lock.wq)) 2161 wake_up(&sk->sk_lock.wq); 2162 spin_unlock_bh(&sk->sk_lock.slock); 2163} 2164EXPORT_SYMBOL(release_sock); 2165 2166/** 2167 * lock_sock_fast - fast version of lock_sock 2168 * @sk: socket 2169 * 2170 * This version should be used for very small section, where process wont block 2171 * return false if fast path is taken 2172 * sk_lock.slock locked, owned = 0, BH disabled 2173 * return true if slow path is taken 2174 * sk_lock.slock unlocked, owned = 1, BH enabled 2175 */ 2176bool lock_sock_fast(struct sock *sk) 2177{ 2178 might_sleep(); 2179 spin_lock_bh(&sk->sk_lock.slock); 2180 2181 if (!sk->sk_lock.owned) 2182 /* 2183 * Note : We must disable BH 2184 */ 2185 return false; 2186 2187 __lock_sock(sk); 2188 sk->sk_lock.owned = 1; 2189 spin_unlock(&sk->sk_lock.slock); 2190 /* 2191 * The sk_lock has mutex_lock() semantics here: 2192 */ 2193 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2194 local_bh_enable(); 2195 return true; 2196} 2197EXPORT_SYMBOL(lock_sock_fast); 2198 2199int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2200{ 2201 struct timeval tv; 2202 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2203 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2204 tv = ktime_to_timeval(sk->sk_stamp); 2205 if (tv.tv_sec == -1) 2206 return -ENOENT; 2207 if (tv.tv_sec == 0) { 2208 sk->sk_stamp = ktime_get_real(); 2209 tv = ktime_to_timeval(sk->sk_stamp); 2210 } 2211 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2212} 2213EXPORT_SYMBOL(sock_get_timestamp); 2214 2215int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2216{ 2217 struct timespec ts; 2218 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2219 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2220 ts = ktime_to_timespec(sk->sk_stamp); 2221 if (ts.tv_sec == -1) 2222 return -ENOENT; 2223 if (ts.tv_sec == 0) { 2224 sk->sk_stamp = ktime_get_real(); 2225 ts = ktime_to_timespec(sk->sk_stamp); 2226 } 2227 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2228} 2229EXPORT_SYMBOL(sock_get_timestampns); 2230 2231void sock_enable_timestamp(struct sock *sk, int flag) 2232{ 2233 if (!sock_flag(sk, flag)) { 2234 unsigned long previous_flags = sk->sk_flags; 2235 2236 sock_set_flag(sk, flag); 2237 /* 2238 * we just set one of the two flags which require net 2239 * time stamping, but time stamping might have been on 2240 * already because of the other one 2241 */ 2242 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2243 net_enable_timestamp(); 2244 } 2245} 2246 2247/* 2248 * Get a socket option on an socket. 2249 * 2250 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2251 * asynchronous errors should be reported by getsockopt. We assume 2252 * this means if you specify SO_ERROR (otherwise whats the point of it). 2253 */ 2254int sock_common_getsockopt(struct socket *sock, int level, int optname, 2255 char __user *optval, int __user *optlen) 2256{ 2257 struct sock *sk = sock->sk; 2258 2259 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2260} 2261EXPORT_SYMBOL(sock_common_getsockopt); 2262 2263#ifdef CONFIG_COMPAT 2264int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2265 char __user *optval, int __user *optlen) 2266{ 2267 struct sock *sk = sock->sk; 2268 2269 if (sk->sk_prot->compat_getsockopt != NULL) 2270 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2271 optval, optlen); 2272 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2273} 2274EXPORT_SYMBOL(compat_sock_common_getsockopt); 2275#endif 2276 2277int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2278 struct msghdr *msg, size_t size, int flags) 2279{ 2280 struct sock *sk = sock->sk; 2281 int addr_len = 0; 2282 int err; 2283 2284 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2285 flags & ~MSG_DONTWAIT, &addr_len); 2286 if (err >= 0) 2287 msg->msg_namelen = addr_len; 2288 return err; 2289} 2290EXPORT_SYMBOL(sock_common_recvmsg); 2291 2292/* 2293 * Set socket options on an inet socket. 2294 */ 2295int sock_common_setsockopt(struct socket *sock, int level, int optname, 2296 char __user *optval, unsigned int optlen) 2297{ 2298 struct sock *sk = sock->sk; 2299 2300 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2301} 2302EXPORT_SYMBOL(sock_common_setsockopt); 2303 2304#ifdef CONFIG_COMPAT 2305int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2306 char __user *optval, unsigned int optlen) 2307{ 2308 struct sock *sk = sock->sk; 2309 2310 if (sk->sk_prot->compat_setsockopt != NULL) 2311 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2312 optval, optlen); 2313 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2314} 2315EXPORT_SYMBOL(compat_sock_common_setsockopt); 2316#endif 2317 2318void sk_common_release(struct sock *sk) 2319{ 2320 if (sk->sk_prot->destroy) 2321 sk->sk_prot->destroy(sk); 2322 2323 /* 2324 * Observation: when sock_common_release is called, processes have 2325 * no access to socket. But net still has. 2326 * Step one, detach it from networking: 2327 * 2328 * A. Remove from hash tables. 2329 */ 2330 2331 sk->sk_prot->unhash(sk); 2332 2333 /* 2334 * In this point socket cannot receive new packets, but it is possible 2335 * that some packets are in flight because some CPU runs receiver and 2336 * did hash table lookup before we unhashed socket. They will achieve 2337 * receive queue and will be purged by socket destructor. 2338 * 2339 * Also we still have packets pending on receive queue and probably, 2340 * our own packets waiting in device queues. sock_destroy will drain 2341 * receive queue, but transmitted packets will delay socket destruction 2342 * until the last reference will be released. 2343 */ 2344 2345 sock_orphan(sk); 2346 2347 xfrm_sk_free_policy(sk); 2348 2349 sk_refcnt_debug_release(sk); 2350 sock_put(sk); 2351} 2352EXPORT_SYMBOL(sk_common_release); 2353 2354#ifdef CONFIG_PROC_FS 2355#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2356struct prot_inuse { 2357 int val[PROTO_INUSE_NR]; 2358}; 2359 2360static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2361 2362#ifdef CONFIG_NET_NS 2363void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2364{ 2365 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2366} 2367EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2368 2369int sock_prot_inuse_get(struct net *net, struct proto *prot) 2370{ 2371 int cpu, idx = prot->inuse_idx; 2372 int res = 0; 2373 2374 for_each_possible_cpu(cpu) 2375 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2376 2377 return res >= 0 ? res : 0; 2378} 2379EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2380 2381static int __net_init sock_inuse_init_net(struct net *net) 2382{ 2383 net->core.inuse = alloc_percpu(struct prot_inuse); 2384 return net->core.inuse ? 0 : -ENOMEM; 2385} 2386 2387static void __net_exit sock_inuse_exit_net(struct net *net) 2388{ 2389 free_percpu(net->core.inuse); 2390} 2391 2392static struct pernet_operations net_inuse_ops = { 2393 .init = sock_inuse_init_net, 2394 .exit = sock_inuse_exit_net, 2395}; 2396 2397static __init int net_inuse_init(void) 2398{ 2399 if (register_pernet_subsys(&net_inuse_ops)) 2400 panic("Cannot initialize net inuse counters"); 2401 2402 return 0; 2403} 2404 2405core_initcall(net_inuse_init); 2406#else 2407static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2408 2409void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2410{ 2411 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2412} 2413EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2414 2415int sock_prot_inuse_get(struct net *net, struct proto *prot) 2416{ 2417 int cpu, idx = prot->inuse_idx; 2418 int res = 0; 2419 2420 for_each_possible_cpu(cpu) 2421 res += per_cpu(prot_inuse, cpu).val[idx]; 2422 2423 return res >= 0 ? res : 0; 2424} 2425EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2426#endif 2427 2428static void assign_proto_idx(struct proto *prot) 2429{ 2430 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2431 2432 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2433 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2434 return; 2435 } 2436 2437 set_bit(prot->inuse_idx, proto_inuse_idx); 2438} 2439 2440static void release_proto_idx(struct proto *prot) 2441{ 2442 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2443 clear_bit(prot->inuse_idx, proto_inuse_idx); 2444} 2445#else 2446static inline void assign_proto_idx(struct proto *prot) 2447{ 2448} 2449 2450static inline void release_proto_idx(struct proto *prot) 2451{ 2452} 2453#endif 2454 2455int proto_register(struct proto *prot, int alloc_slab) 2456{ 2457 if (alloc_slab) { 2458 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2459 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2460 NULL); 2461 2462 if (prot->slab == NULL) { 2463 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2464 prot->name); 2465 goto out; 2466 } 2467 2468 if (prot->rsk_prot != NULL) { 2469 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2470 if (prot->rsk_prot->slab_name == NULL) 2471 goto out_free_sock_slab; 2472 2473 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2474 prot->rsk_prot->obj_size, 0, 2475 SLAB_HWCACHE_ALIGN, NULL); 2476 2477 if (prot->rsk_prot->slab == NULL) { 2478 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2479 prot->name); 2480 goto out_free_request_sock_slab_name; 2481 } 2482 } 2483 2484 if (prot->twsk_prot != NULL) { 2485 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2486 2487 if (prot->twsk_prot->twsk_slab_name == NULL) 2488 goto out_free_request_sock_slab; 2489 2490 prot->twsk_prot->twsk_slab = 2491 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2492 prot->twsk_prot->twsk_obj_size, 2493 0, 2494 SLAB_HWCACHE_ALIGN | 2495 prot->slab_flags, 2496 NULL); 2497 if (prot->twsk_prot->twsk_slab == NULL) 2498 goto out_free_timewait_sock_slab_name; 2499 } 2500 } 2501 2502 mutex_lock(&proto_list_mutex); 2503 list_add(&prot->node, &proto_list); 2504 assign_proto_idx(prot); 2505 mutex_unlock(&proto_list_mutex); 2506 return 0; 2507 2508out_free_timewait_sock_slab_name: 2509 kfree(prot->twsk_prot->twsk_slab_name); 2510out_free_request_sock_slab: 2511 if (prot->rsk_prot && prot->rsk_prot->slab) { 2512 kmem_cache_destroy(prot->rsk_prot->slab); 2513 prot->rsk_prot->slab = NULL; 2514 } 2515out_free_request_sock_slab_name: 2516 if (prot->rsk_prot) 2517 kfree(prot->rsk_prot->slab_name); 2518out_free_sock_slab: 2519 kmem_cache_destroy(prot->slab); 2520 prot->slab = NULL; 2521out: 2522 return -ENOBUFS; 2523} 2524EXPORT_SYMBOL(proto_register); 2525 2526void proto_unregister(struct proto *prot) 2527{ 2528 mutex_lock(&proto_list_mutex); 2529 release_proto_idx(prot); 2530 list_del(&prot->node); 2531 mutex_unlock(&proto_list_mutex); 2532 2533 if (prot->slab != NULL) { 2534 kmem_cache_destroy(prot->slab); 2535 prot->slab = NULL; 2536 } 2537 2538 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2539 kmem_cache_destroy(prot->rsk_prot->slab); 2540 kfree(prot->rsk_prot->slab_name); 2541 prot->rsk_prot->slab = NULL; 2542 } 2543 2544 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2545 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2546 kfree(prot->twsk_prot->twsk_slab_name); 2547 prot->twsk_prot->twsk_slab = NULL; 2548 } 2549} 2550EXPORT_SYMBOL(proto_unregister); 2551 2552#ifdef CONFIG_PROC_FS 2553static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2554 __acquires(proto_list_mutex) 2555{ 2556 mutex_lock(&proto_list_mutex); 2557 return seq_list_start_head(&proto_list, *pos); 2558} 2559 2560static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2561{ 2562 return seq_list_next(v, &proto_list, pos); 2563} 2564 2565static void proto_seq_stop(struct seq_file *seq, void *v) 2566 __releases(proto_list_mutex) 2567{ 2568 mutex_unlock(&proto_list_mutex); 2569} 2570 2571static char proto_method_implemented(const void *method) 2572{ 2573 return method == NULL ? 'n' : 'y'; 2574} 2575static long sock_prot_memory_allocated(struct proto *proto) 2576{ 2577 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; 2578} 2579 2580static char *sock_prot_memory_pressure(struct proto *proto) 2581{ 2582 return proto->memory_pressure != NULL ? 2583 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2584} 2585 2586static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2587{ 2588 2589 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2590 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2591 proto->name, 2592 proto->obj_size, 2593 sock_prot_inuse_get(seq_file_net(seq), proto), 2594 sock_prot_memory_allocated(proto), 2595 sock_prot_memory_pressure(proto), 2596 proto->max_header, 2597 proto->slab == NULL ? "no" : "yes", 2598 module_name(proto->owner), 2599 proto_method_implemented(proto->close), 2600 proto_method_implemented(proto->connect), 2601 proto_method_implemented(proto->disconnect), 2602 proto_method_implemented(proto->accept), 2603 proto_method_implemented(proto->ioctl), 2604 proto_method_implemented(proto->init), 2605 proto_method_implemented(proto->destroy), 2606 proto_method_implemented(proto->shutdown), 2607 proto_method_implemented(proto->setsockopt), 2608 proto_method_implemented(proto->getsockopt), 2609 proto_method_implemented(proto->sendmsg), 2610 proto_method_implemented(proto->recvmsg), 2611 proto_method_implemented(proto->sendpage), 2612 proto_method_implemented(proto->bind), 2613 proto_method_implemented(proto->backlog_rcv), 2614 proto_method_implemented(proto->hash), 2615 proto_method_implemented(proto->unhash), 2616 proto_method_implemented(proto->get_port), 2617 proto_method_implemented(proto->enter_memory_pressure)); 2618} 2619 2620static int proto_seq_show(struct seq_file *seq, void *v) 2621{ 2622 if (v == &proto_list) 2623 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2624 "protocol", 2625 "size", 2626 "sockets", 2627 "memory", 2628 "press", 2629 "maxhdr", 2630 "slab", 2631 "module", 2632 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2633 else 2634 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2635 return 0; 2636} 2637 2638static const struct seq_operations proto_seq_ops = { 2639 .start = proto_seq_start, 2640 .next = proto_seq_next, 2641 .stop = proto_seq_stop, 2642 .show = proto_seq_show, 2643}; 2644 2645static int proto_seq_open(struct inode *inode, struct file *file) 2646{ 2647 return seq_open_net(inode, file, &proto_seq_ops, 2648 sizeof(struct seq_net_private)); 2649} 2650 2651static const struct file_operations proto_seq_fops = { 2652 .owner = THIS_MODULE, 2653 .open = proto_seq_open, 2654 .read = seq_read, 2655 .llseek = seq_lseek, 2656 .release = seq_release_net, 2657}; 2658 2659static __net_init int proto_init_net(struct net *net) 2660{ 2661 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2662 return -ENOMEM; 2663 2664 return 0; 2665} 2666 2667static __net_exit void proto_exit_net(struct net *net) 2668{ 2669 proc_net_remove(net, "protocols"); 2670} 2671 2672 2673static __net_initdata struct pernet_operations proto_net_ops = { 2674 .init = proto_init_net, 2675 .exit = proto_exit_net, 2676}; 2677 2678static int __init proto_init(void) 2679{ 2680 return register_pernet_subsys(&proto_net_ops); 2681} 2682 2683subsys_initcall(proto_init); 2684 2685#endif /* PROC_FS */ 2686