sock.c revision ef64a54f6e558155b4f149bb10666b9e914b6c54
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#include <linux/capability.h> 93#include <linux/errno.h> 94#include <linux/types.h> 95#include <linux/socket.h> 96#include <linux/in.h> 97#include <linux/kernel.h> 98#include <linux/module.h> 99#include <linux/proc_fs.h> 100#include <linux/seq_file.h> 101#include <linux/sched.h> 102#include <linux/timer.h> 103#include <linux/string.h> 104#include <linux/sockios.h> 105#include <linux/net.h> 106#include <linux/mm.h> 107#include <linux/slab.h> 108#include <linux/interrupt.h> 109#include <linux/poll.h> 110#include <linux/tcp.h> 111#include <linux/init.h> 112#include <linux/highmem.h> 113#include <linux/user_namespace.h> 114#include <linux/jump_label.h> 115#include <linux/memcontrol.h> 116 117#include <asm/uaccess.h> 118#include <asm/system.h> 119 120#include <linux/netdevice.h> 121#include <net/protocol.h> 122#include <linux/skbuff.h> 123#include <net/net_namespace.h> 124#include <net/request_sock.h> 125#include <net/sock.h> 126#include <linux/net_tstamp.h> 127#include <net/xfrm.h> 128#include <linux/ipsec.h> 129#include <net/cls_cgroup.h> 130#include <net/netprio_cgroup.h> 131 132#include <linux/filter.h> 133 134#include <trace/events/sock.h> 135 136#ifdef CONFIG_INET 137#include <net/tcp.h> 138#endif 139 140static DEFINE_MUTEX(proto_list_mutex); 141static LIST_HEAD(proto_list); 142 143#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 144int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) 145{ 146 struct proto *proto; 147 int ret = 0; 148 149 mutex_lock(&proto_list_mutex); 150 list_for_each_entry(proto, &proto_list, node) { 151 if (proto->init_cgroup) { 152 ret = proto->init_cgroup(cgrp, ss); 153 if (ret) 154 goto out; 155 } 156 } 157 158 mutex_unlock(&proto_list_mutex); 159 return ret; 160out: 161 list_for_each_entry_continue_reverse(proto, &proto_list, node) 162 if (proto->destroy_cgroup) 163 proto->destroy_cgroup(cgrp, ss); 164 mutex_unlock(&proto_list_mutex); 165 return ret; 166} 167 168void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) 169{ 170 struct proto *proto; 171 172 mutex_lock(&proto_list_mutex); 173 list_for_each_entry_reverse(proto, &proto_list, node) 174 if (proto->destroy_cgroup) 175 proto->destroy_cgroup(cgrp, ss); 176 mutex_unlock(&proto_list_mutex); 177} 178#endif 179 180/* 181 * Each address family might have different locking rules, so we have 182 * one slock key per address family: 183 */ 184static struct lock_class_key af_family_keys[AF_MAX]; 185static struct lock_class_key af_family_slock_keys[AF_MAX]; 186 187struct jump_label_key memcg_socket_limit_enabled; 188EXPORT_SYMBOL(memcg_socket_limit_enabled); 189 190/* 191 * Make lock validator output more readable. (we pre-construct these 192 * strings build-time, so that runtime initialization of socket 193 * locks is fast): 194 */ 195static const char *const af_family_key_strings[AF_MAX+1] = { 196 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 197 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 198 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 199 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 200 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 201 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 202 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 203 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 204 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 205 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 206 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 207 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 208 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 209 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 210}; 211static const char *const af_family_slock_key_strings[AF_MAX+1] = { 212 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 213 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 214 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 215 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 216 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 217 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 218 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 219 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 220 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 221 "slock-27" , "slock-28" , "slock-AF_CAN" , 222 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 223 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 224 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 225 "slock-AF_NFC" , "slock-AF_MAX" 226}; 227static const char *const af_family_clock_key_strings[AF_MAX+1] = { 228 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 229 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 230 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 231 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 232 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 233 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 234 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 235 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 236 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 237 "clock-27" , "clock-28" , "clock-AF_CAN" , 238 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 239 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 240 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 241 "clock-AF_NFC" , "clock-AF_MAX" 242}; 243 244/* 245 * sk_callback_lock locking rules are per-address-family, 246 * so split the lock classes by using a per-AF key: 247 */ 248static struct lock_class_key af_callback_keys[AF_MAX]; 249 250/* Take into consideration the size of the struct sk_buff overhead in the 251 * determination of these values, since that is non-constant across 252 * platforms. This makes socket queueing behavior and performance 253 * not depend upon such differences. 254 */ 255#define _SK_MEM_PACKETS 256 256#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 257#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 258#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 259 260/* Run time adjustable parameters. */ 261__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 262__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 263__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 264__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 265 266/* Maximal space eaten by iovec or ancillary data plus some space */ 267int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 268EXPORT_SYMBOL(sysctl_optmem_max); 269 270#if defined(CONFIG_CGROUPS) 271#if !defined(CONFIG_NET_CLS_CGROUP) 272int net_cls_subsys_id = -1; 273EXPORT_SYMBOL_GPL(net_cls_subsys_id); 274#endif 275#if !defined(CONFIG_NETPRIO_CGROUP) 276int net_prio_subsys_id = -1; 277EXPORT_SYMBOL_GPL(net_prio_subsys_id); 278#endif 279#endif 280 281static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 282{ 283 struct timeval tv; 284 285 if (optlen < sizeof(tv)) 286 return -EINVAL; 287 if (copy_from_user(&tv, optval, sizeof(tv))) 288 return -EFAULT; 289 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 290 return -EDOM; 291 292 if (tv.tv_sec < 0) { 293 static int warned __read_mostly; 294 295 *timeo_p = 0; 296 if (warned < 10 && net_ratelimit()) { 297 warned++; 298 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 299 "tries to set negative timeout\n", 300 current->comm, task_pid_nr(current)); 301 } 302 return 0; 303 } 304 *timeo_p = MAX_SCHEDULE_TIMEOUT; 305 if (tv.tv_sec == 0 && tv.tv_usec == 0) 306 return 0; 307 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 308 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 309 return 0; 310} 311 312static void sock_warn_obsolete_bsdism(const char *name) 313{ 314 static int warned; 315 static char warncomm[TASK_COMM_LEN]; 316 if (strcmp(warncomm, current->comm) && warned < 5) { 317 strcpy(warncomm, current->comm); 318 printk(KERN_WARNING "process `%s' is using obsolete " 319 "%s SO_BSDCOMPAT\n", warncomm, name); 320 warned++; 321 } 322} 323 324#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 325 326static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 327{ 328 if (sk->sk_flags & flags) { 329 sk->sk_flags &= ~flags; 330 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 331 net_disable_timestamp(); 332 } 333} 334 335 336int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 337{ 338 int err; 339 int skb_len; 340 unsigned long flags; 341 struct sk_buff_head *list = &sk->sk_receive_queue; 342 343 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 344 atomic_inc(&sk->sk_drops); 345 trace_sock_rcvqueue_full(sk, skb); 346 return -ENOMEM; 347 } 348 349 err = sk_filter(sk, skb); 350 if (err) 351 return err; 352 353 if (!sk_rmem_schedule(sk, skb->truesize)) { 354 atomic_inc(&sk->sk_drops); 355 return -ENOBUFS; 356 } 357 358 skb->dev = NULL; 359 skb_set_owner_r(skb, sk); 360 361 /* Cache the SKB length before we tack it onto the receive 362 * queue. Once it is added it no longer belongs to us and 363 * may be freed by other threads of control pulling packets 364 * from the queue. 365 */ 366 skb_len = skb->len; 367 368 /* we escape from rcu protected region, make sure we dont leak 369 * a norefcounted dst 370 */ 371 skb_dst_force(skb); 372 373 spin_lock_irqsave(&list->lock, flags); 374 skb->dropcount = atomic_read(&sk->sk_drops); 375 __skb_queue_tail(list, skb); 376 spin_unlock_irqrestore(&list->lock, flags); 377 378 if (!sock_flag(sk, SOCK_DEAD)) 379 sk->sk_data_ready(sk, skb_len); 380 return 0; 381} 382EXPORT_SYMBOL(sock_queue_rcv_skb); 383 384int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 385{ 386 int rc = NET_RX_SUCCESS; 387 388 if (sk_filter(sk, skb)) 389 goto discard_and_relse; 390 391 skb->dev = NULL; 392 393 if (sk_rcvqueues_full(sk, skb)) { 394 atomic_inc(&sk->sk_drops); 395 goto discard_and_relse; 396 } 397 if (nested) 398 bh_lock_sock_nested(sk); 399 else 400 bh_lock_sock(sk); 401 if (!sock_owned_by_user(sk)) { 402 /* 403 * trylock + unlock semantics: 404 */ 405 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 406 407 rc = sk_backlog_rcv(sk, skb); 408 409 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 410 } else if (sk_add_backlog(sk, skb)) { 411 bh_unlock_sock(sk); 412 atomic_inc(&sk->sk_drops); 413 goto discard_and_relse; 414 } 415 416 bh_unlock_sock(sk); 417out: 418 sock_put(sk); 419 return rc; 420discard_and_relse: 421 kfree_skb(skb); 422 goto out; 423} 424EXPORT_SYMBOL(sk_receive_skb); 425 426void sk_reset_txq(struct sock *sk) 427{ 428 sk_tx_queue_clear(sk); 429} 430EXPORT_SYMBOL(sk_reset_txq); 431 432struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 433{ 434 struct dst_entry *dst = __sk_dst_get(sk); 435 436 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 437 sk_tx_queue_clear(sk); 438 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 439 dst_release(dst); 440 return NULL; 441 } 442 443 return dst; 444} 445EXPORT_SYMBOL(__sk_dst_check); 446 447struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 448{ 449 struct dst_entry *dst = sk_dst_get(sk); 450 451 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 452 sk_dst_reset(sk); 453 dst_release(dst); 454 return NULL; 455 } 456 457 return dst; 458} 459EXPORT_SYMBOL(sk_dst_check); 460 461static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 462{ 463 int ret = -ENOPROTOOPT; 464#ifdef CONFIG_NETDEVICES 465 struct net *net = sock_net(sk); 466 char devname[IFNAMSIZ]; 467 int index; 468 469 /* Sorry... */ 470 ret = -EPERM; 471 if (!capable(CAP_NET_RAW)) 472 goto out; 473 474 ret = -EINVAL; 475 if (optlen < 0) 476 goto out; 477 478 /* Bind this socket to a particular device like "eth0", 479 * as specified in the passed interface name. If the 480 * name is "" or the option length is zero the socket 481 * is not bound. 482 */ 483 if (optlen > IFNAMSIZ - 1) 484 optlen = IFNAMSIZ - 1; 485 memset(devname, 0, sizeof(devname)); 486 487 ret = -EFAULT; 488 if (copy_from_user(devname, optval, optlen)) 489 goto out; 490 491 index = 0; 492 if (devname[0] != '\0') { 493 struct net_device *dev; 494 495 rcu_read_lock(); 496 dev = dev_get_by_name_rcu(net, devname); 497 if (dev) 498 index = dev->ifindex; 499 rcu_read_unlock(); 500 ret = -ENODEV; 501 if (!dev) 502 goto out; 503 } 504 505 lock_sock(sk); 506 sk->sk_bound_dev_if = index; 507 sk_dst_reset(sk); 508 release_sock(sk); 509 510 ret = 0; 511 512out: 513#endif 514 515 return ret; 516} 517 518static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 519{ 520 if (valbool) 521 sock_set_flag(sk, bit); 522 else 523 sock_reset_flag(sk, bit); 524} 525 526/* 527 * This is meant for all protocols to use and covers goings on 528 * at the socket level. Everything here is generic. 529 */ 530 531int sock_setsockopt(struct socket *sock, int level, int optname, 532 char __user *optval, unsigned int optlen) 533{ 534 struct sock *sk = sock->sk; 535 int val; 536 int valbool; 537 struct linger ling; 538 int ret = 0; 539 540 /* 541 * Options without arguments 542 */ 543 544 if (optname == SO_BINDTODEVICE) 545 return sock_bindtodevice(sk, optval, optlen); 546 547 if (optlen < sizeof(int)) 548 return -EINVAL; 549 550 if (get_user(val, (int __user *)optval)) 551 return -EFAULT; 552 553 valbool = val ? 1 : 0; 554 555 lock_sock(sk); 556 557 switch (optname) { 558 case SO_DEBUG: 559 if (val && !capable(CAP_NET_ADMIN)) 560 ret = -EACCES; 561 else 562 sock_valbool_flag(sk, SOCK_DBG, valbool); 563 break; 564 case SO_REUSEADDR: 565 sk->sk_reuse = valbool; 566 break; 567 case SO_TYPE: 568 case SO_PROTOCOL: 569 case SO_DOMAIN: 570 case SO_ERROR: 571 ret = -ENOPROTOOPT; 572 break; 573 case SO_DONTROUTE: 574 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 575 break; 576 case SO_BROADCAST: 577 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 578 break; 579 case SO_SNDBUF: 580 /* Don't error on this BSD doesn't and if you think 581 about it this is right. Otherwise apps have to 582 play 'guess the biggest size' games. RCVBUF/SNDBUF 583 are treated in BSD as hints */ 584 585 if (val > sysctl_wmem_max) 586 val = sysctl_wmem_max; 587set_sndbuf: 588 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 589 if ((val * 2) < SOCK_MIN_SNDBUF) 590 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 591 else 592 sk->sk_sndbuf = val * 2; 593 594 /* 595 * Wake up sending tasks if we 596 * upped the value. 597 */ 598 sk->sk_write_space(sk); 599 break; 600 601 case SO_SNDBUFFORCE: 602 if (!capable(CAP_NET_ADMIN)) { 603 ret = -EPERM; 604 break; 605 } 606 goto set_sndbuf; 607 608 case SO_RCVBUF: 609 /* Don't error on this BSD doesn't and if you think 610 about it this is right. Otherwise apps have to 611 play 'guess the biggest size' games. RCVBUF/SNDBUF 612 are treated in BSD as hints */ 613 614 if (val > sysctl_rmem_max) 615 val = sysctl_rmem_max; 616set_rcvbuf: 617 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 618 /* 619 * We double it on the way in to account for 620 * "struct sk_buff" etc. overhead. Applications 621 * assume that the SO_RCVBUF setting they make will 622 * allow that much actual data to be received on that 623 * socket. 624 * 625 * Applications are unaware that "struct sk_buff" and 626 * other overheads allocate from the receive buffer 627 * during socket buffer allocation. 628 * 629 * And after considering the possible alternatives, 630 * returning the value we actually used in getsockopt 631 * is the most desirable behavior. 632 */ 633 if ((val * 2) < SOCK_MIN_RCVBUF) 634 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 635 else 636 sk->sk_rcvbuf = val * 2; 637 break; 638 639 case SO_RCVBUFFORCE: 640 if (!capable(CAP_NET_ADMIN)) { 641 ret = -EPERM; 642 break; 643 } 644 goto set_rcvbuf; 645 646 case SO_KEEPALIVE: 647#ifdef CONFIG_INET 648 if (sk->sk_protocol == IPPROTO_TCP) 649 tcp_set_keepalive(sk, valbool); 650#endif 651 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 652 break; 653 654 case SO_OOBINLINE: 655 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 656 break; 657 658 case SO_NO_CHECK: 659 sk->sk_no_check = valbool; 660 break; 661 662 case SO_PRIORITY: 663 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 664 sk->sk_priority = val; 665 else 666 ret = -EPERM; 667 break; 668 669 case SO_LINGER: 670 if (optlen < sizeof(ling)) { 671 ret = -EINVAL; /* 1003.1g */ 672 break; 673 } 674 if (copy_from_user(&ling, optval, sizeof(ling))) { 675 ret = -EFAULT; 676 break; 677 } 678 if (!ling.l_onoff) 679 sock_reset_flag(sk, SOCK_LINGER); 680 else { 681#if (BITS_PER_LONG == 32) 682 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 683 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 684 else 685#endif 686 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 687 sock_set_flag(sk, SOCK_LINGER); 688 } 689 break; 690 691 case SO_BSDCOMPAT: 692 sock_warn_obsolete_bsdism("setsockopt"); 693 break; 694 695 case SO_PASSCRED: 696 if (valbool) 697 set_bit(SOCK_PASSCRED, &sock->flags); 698 else 699 clear_bit(SOCK_PASSCRED, &sock->flags); 700 break; 701 702 case SO_TIMESTAMP: 703 case SO_TIMESTAMPNS: 704 if (valbool) { 705 if (optname == SO_TIMESTAMP) 706 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 707 else 708 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 709 sock_set_flag(sk, SOCK_RCVTSTAMP); 710 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 711 } else { 712 sock_reset_flag(sk, SOCK_RCVTSTAMP); 713 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 714 } 715 break; 716 717 case SO_TIMESTAMPING: 718 if (val & ~SOF_TIMESTAMPING_MASK) { 719 ret = -EINVAL; 720 break; 721 } 722 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 723 val & SOF_TIMESTAMPING_TX_HARDWARE); 724 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 725 val & SOF_TIMESTAMPING_TX_SOFTWARE); 726 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 727 val & SOF_TIMESTAMPING_RX_HARDWARE); 728 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 729 sock_enable_timestamp(sk, 730 SOCK_TIMESTAMPING_RX_SOFTWARE); 731 else 732 sock_disable_timestamp(sk, 733 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 734 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 735 val & SOF_TIMESTAMPING_SOFTWARE); 736 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 737 val & SOF_TIMESTAMPING_SYS_HARDWARE); 738 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 739 val & SOF_TIMESTAMPING_RAW_HARDWARE); 740 break; 741 742 case SO_RCVLOWAT: 743 if (val < 0) 744 val = INT_MAX; 745 sk->sk_rcvlowat = val ? : 1; 746 break; 747 748 case SO_RCVTIMEO: 749 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 750 break; 751 752 case SO_SNDTIMEO: 753 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 754 break; 755 756 case SO_ATTACH_FILTER: 757 ret = -EINVAL; 758 if (optlen == sizeof(struct sock_fprog)) { 759 struct sock_fprog fprog; 760 761 ret = -EFAULT; 762 if (copy_from_user(&fprog, optval, sizeof(fprog))) 763 break; 764 765 ret = sk_attach_filter(&fprog, sk); 766 } 767 break; 768 769 case SO_DETACH_FILTER: 770 ret = sk_detach_filter(sk); 771 break; 772 773 case SO_PASSSEC: 774 if (valbool) 775 set_bit(SOCK_PASSSEC, &sock->flags); 776 else 777 clear_bit(SOCK_PASSSEC, &sock->flags); 778 break; 779 case SO_MARK: 780 if (!capable(CAP_NET_ADMIN)) 781 ret = -EPERM; 782 else 783 sk->sk_mark = val; 784 break; 785 786 /* We implement the SO_SNDLOWAT etc to 787 not be settable (1003.1g 5.3) */ 788 case SO_RXQ_OVFL: 789 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 790 break; 791 792 case SO_WIFI_STATUS: 793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 794 break; 795 796 case SO_PEEK_OFF: 797 if (sock->ops->set_peek_off) 798 sock->ops->set_peek_off(sk, val); 799 else 800 ret = -EOPNOTSUPP; 801 break; 802 default: 803 ret = -ENOPROTOOPT; 804 break; 805 } 806 release_sock(sk); 807 return ret; 808} 809EXPORT_SYMBOL(sock_setsockopt); 810 811 812void cred_to_ucred(struct pid *pid, const struct cred *cred, 813 struct ucred *ucred) 814{ 815 ucred->pid = pid_vnr(pid); 816 ucred->uid = ucred->gid = -1; 817 if (cred) { 818 struct user_namespace *current_ns = current_user_ns(); 819 820 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 821 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 822 } 823} 824EXPORT_SYMBOL_GPL(cred_to_ucred); 825 826int sock_getsockopt(struct socket *sock, int level, int optname, 827 char __user *optval, int __user *optlen) 828{ 829 struct sock *sk = sock->sk; 830 831 union { 832 int val; 833 struct linger ling; 834 struct timeval tm; 835 } v; 836 837 int lv = sizeof(int); 838 int len; 839 840 if (get_user(len, optlen)) 841 return -EFAULT; 842 if (len < 0) 843 return -EINVAL; 844 845 memset(&v, 0, sizeof(v)); 846 847 switch (optname) { 848 case SO_DEBUG: 849 v.val = sock_flag(sk, SOCK_DBG); 850 break; 851 852 case SO_DONTROUTE: 853 v.val = sock_flag(sk, SOCK_LOCALROUTE); 854 break; 855 856 case SO_BROADCAST: 857 v.val = !!sock_flag(sk, SOCK_BROADCAST); 858 break; 859 860 case SO_SNDBUF: 861 v.val = sk->sk_sndbuf; 862 break; 863 864 case SO_RCVBUF: 865 v.val = sk->sk_rcvbuf; 866 break; 867 868 case SO_REUSEADDR: 869 v.val = sk->sk_reuse; 870 break; 871 872 case SO_KEEPALIVE: 873 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 874 break; 875 876 case SO_TYPE: 877 v.val = sk->sk_type; 878 break; 879 880 case SO_PROTOCOL: 881 v.val = sk->sk_protocol; 882 break; 883 884 case SO_DOMAIN: 885 v.val = sk->sk_family; 886 break; 887 888 case SO_ERROR: 889 v.val = -sock_error(sk); 890 if (v.val == 0) 891 v.val = xchg(&sk->sk_err_soft, 0); 892 break; 893 894 case SO_OOBINLINE: 895 v.val = !!sock_flag(sk, SOCK_URGINLINE); 896 break; 897 898 case SO_NO_CHECK: 899 v.val = sk->sk_no_check; 900 break; 901 902 case SO_PRIORITY: 903 v.val = sk->sk_priority; 904 break; 905 906 case SO_LINGER: 907 lv = sizeof(v.ling); 908 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 909 v.ling.l_linger = sk->sk_lingertime / HZ; 910 break; 911 912 case SO_BSDCOMPAT: 913 sock_warn_obsolete_bsdism("getsockopt"); 914 break; 915 916 case SO_TIMESTAMP: 917 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 918 !sock_flag(sk, SOCK_RCVTSTAMPNS); 919 break; 920 921 case SO_TIMESTAMPNS: 922 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 923 break; 924 925 case SO_TIMESTAMPING: 926 v.val = 0; 927 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 928 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 929 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 930 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 931 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 932 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 933 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 934 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 935 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 936 v.val |= SOF_TIMESTAMPING_SOFTWARE; 937 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 938 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 939 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 940 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 941 break; 942 943 case SO_RCVTIMEO: 944 lv = sizeof(struct timeval); 945 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 946 v.tm.tv_sec = 0; 947 v.tm.tv_usec = 0; 948 } else { 949 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 950 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 951 } 952 break; 953 954 case SO_SNDTIMEO: 955 lv = sizeof(struct timeval); 956 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 957 v.tm.tv_sec = 0; 958 v.tm.tv_usec = 0; 959 } else { 960 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 961 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 962 } 963 break; 964 965 case SO_RCVLOWAT: 966 v.val = sk->sk_rcvlowat; 967 break; 968 969 case SO_SNDLOWAT: 970 v.val = 1; 971 break; 972 973 case SO_PASSCRED: 974 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 975 break; 976 977 case SO_PEERCRED: 978 { 979 struct ucred peercred; 980 if (len > sizeof(peercred)) 981 len = sizeof(peercred); 982 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 983 if (copy_to_user(optval, &peercred, len)) 984 return -EFAULT; 985 goto lenout; 986 } 987 988 case SO_PEERNAME: 989 { 990 char address[128]; 991 992 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 993 return -ENOTCONN; 994 if (lv < len) 995 return -EINVAL; 996 if (copy_to_user(optval, address, len)) 997 return -EFAULT; 998 goto lenout; 999 } 1000 1001 /* Dubious BSD thing... Probably nobody even uses it, but 1002 * the UNIX standard wants it for whatever reason... -DaveM 1003 */ 1004 case SO_ACCEPTCONN: 1005 v.val = sk->sk_state == TCP_LISTEN; 1006 break; 1007 1008 case SO_PASSSEC: 1009 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 1010 break; 1011 1012 case SO_PEERSEC: 1013 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1014 1015 case SO_MARK: 1016 v.val = sk->sk_mark; 1017 break; 1018 1019 case SO_RXQ_OVFL: 1020 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 1021 break; 1022 1023 case SO_WIFI_STATUS: 1024 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 1025 break; 1026 1027 case SO_PEEK_OFF: 1028 if (!sock->ops->set_peek_off) 1029 return -EOPNOTSUPP; 1030 1031 v.val = sk->sk_peek_off; 1032 break; 1033 default: 1034 return -ENOPROTOOPT; 1035 } 1036 1037 if (len > lv) 1038 len = lv; 1039 if (copy_to_user(optval, &v, len)) 1040 return -EFAULT; 1041lenout: 1042 if (put_user(len, optlen)) 1043 return -EFAULT; 1044 return 0; 1045} 1046 1047/* 1048 * Initialize an sk_lock. 1049 * 1050 * (We also register the sk_lock with the lock validator.) 1051 */ 1052static inline void sock_lock_init(struct sock *sk) 1053{ 1054 sock_lock_init_class_and_name(sk, 1055 af_family_slock_key_strings[sk->sk_family], 1056 af_family_slock_keys + sk->sk_family, 1057 af_family_key_strings[sk->sk_family], 1058 af_family_keys + sk->sk_family); 1059} 1060 1061/* 1062 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1063 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1064 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1065 */ 1066static void sock_copy(struct sock *nsk, const struct sock *osk) 1067{ 1068#ifdef CONFIG_SECURITY_NETWORK 1069 void *sptr = nsk->sk_security; 1070#endif 1071 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1072 1073 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1074 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1075 1076#ifdef CONFIG_SECURITY_NETWORK 1077 nsk->sk_security = sptr; 1078 security_sk_clone(osk, nsk); 1079#endif 1080} 1081 1082/* 1083 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1084 * un-modified. Special care is taken when initializing object to zero. 1085 */ 1086static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1087{ 1088 if (offsetof(struct sock, sk_node.next) != 0) 1089 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1090 memset(&sk->sk_node.pprev, 0, 1091 size - offsetof(struct sock, sk_node.pprev)); 1092} 1093 1094void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1095{ 1096 unsigned long nulls1, nulls2; 1097 1098 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1099 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1100 if (nulls1 > nulls2) 1101 swap(nulls1, nulls2); 1102 1103 if (nulls1 != 0) 1104 memset((char *)sk, 0, nulls1); 1105 memset((char *)sk + nulls1 + sizeof(void *), 0, 1106 nulls2 - nulls1 - sizeof(void *)); 1107 memset((char *)sk + nulls2 + sizeof(void *), 0, 1108 size - nulls2 - sizeof(void *)); 1109} 1110EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1111 1112static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1113 int family) 1114{ 1115 struct sock *sk; 1116 struct kmem_cache *slab; 1117 1118 slab = prot->slab; 1119 if (slab != NULL) { 1120 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1121 if (!sk) 1122 return sk; 1123 if (priority & __GFP_ZERO) { 1124 if (prot->clear_sk) 1125 prot->clear_sk(sk, prot->obj_size); 1126 else 1127 sk_prot_clear_nulls(sk, prot->obj_size); 1128 } 1129 } else 1130 sk = kmalloc(prot->obj_size, priority); 1131 1132 if (sk != NULL) { 1133 kmemcheck_annotate_bitfield(sk, flags); 1134 1135 if (security_sk_alloc(sk, family, priority)) 1136 goto out_free; 1137 1138 if (!try_module_get(prot->owner)) 1139 goto out_free_sec; 1140 sk_tx_queue_clear(sk); 1141 } 1142 1143 return sk; 1144 1145out_free_sec: 1146 security_sk_free(sk); 1147out_free: 1148 if (slab != NULL) 1149 kmem_cache_free(slab, sk); 1150 else 1151 kfree(sk); 1152 return NULL; 1153} 1154 1155static void sk_prot_free(struct proto *prot, struct sock *sk) 1156{ 1157 struct kmem_cache *slab; 1158 struct module *owner; 1159 1160 owner = prot->owner; 1161 slab = prot->slab; 1162 1163 security_sk_free(sk); 1164 if (slab != NULL) 1165 kmem_cache_free(slab, sk); 1166 else 1167 kfree(sk); 1168 module_put(owner); 1169} 1170 1171#ifdef CONFIG_CGROUPS 1172void sock_update_classid(struct sock *sk) 1173{ 1174 u32 classid; 1175 1176 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1177 classid = task_cls_classid(current); 1178 rcu_read_unlock(); 1179 if (classid && classid != sk->sk_classid) 1180 sk->sk_classid = classid; 1181} 1182EXPORT_SYMBOL(sock_update_classid); 1183 1184void sock_update_netprioidx(struct sock *sk) 1185{ 1186 if (in_interrupt()) 1187 return; 1188 1189 sk->sk_cgrp_prioidx = task_netprioidx(current); 1190} 1191EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1192#endif 1193 1194/** 1195 * sk_alloc - All socket objects are allocated here 1196 * @net: the applicable net namespace 1197 * @family: protocol family 1198 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1199 * @prot: struct proto associated with this new sock instance 1200 */ 1201struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1202 struct proto *prot) 1203{ 1204 struct sock *sk; 1205 1206 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1207 if (sk) { 1208 sk->sk_family = family; 1209 /* 1210 * See comment in struct sock definition to understand 1211 * why we need sk_prot_creator -acme 1212 */ 1213 sk->sk_prot = sk->sk_prot_creator = prot; 1214 sock_lock_init(sk); 1215 sock_net_set(sk, get_net(net)); 1216 atomic_set(&sk->sk_wmem_alloc, 1); 1217 1218 sock_update_classid(sk); 1219 sock_update_netprioidx(sk); 1220 } 1221 1222 return sk; 1223} 1224EXPORT_SYMBOL(sk_alloc); 1225 1226static void __sk_free(struct sock *sk) 1227{ 1228 struct sk_filter *filter; 1229 1230 if (sk->sk_destruct) 1231 sk->sk_destruct(sk); 1232 1233 filter = rcu_dereference_check(sk->sk_filter, 1234 atomic_read(&sk->sk_wmem_alloc) == 0); 1235 if (filter) { 1236 sk_filter_uncharge(sk, filter); 1237 RCU_INIT_POINTER(sk->sk_filter, NULL); 1238 } 1239 1240 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1241 1242 if (atomic_read(&sk->sk_omem_alloc)) 1243 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1244 __func__, atomic_read(&sk->sk_omem_alloc)); 1245 1246 if (sk->sk_peer_cred) 1247 put_cred(sk->sk_peer_cred); 1248 put_pid(sk->sk_peer_pid); 1249 put_net(sock_net(sk)); 1250 sk_prot_free(sk->sk_prot_creator, sk); 1251} 1252 1253void sk_free(struct sock *sk) 1254{ 1255 /* 1256 * We subtract one from sk_wmem_alloc and can know if 1257 * some packets are still in some tx queue. 1258 * If not null, sock_wfree() will call __sk_free(sk) later 1259 */ 1260 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1261 __sk_free(sk); 1262} 1263EXPORT_SYMBOL(sk_free); 1264 1265/* 1266 * Last sock_put should drop reference to sk->sk_net. It has already 1267 * been dropped in sk_change_net. Taking reference to stopping namespace 1268 * is not an option. 1269 * Take reference to a socket to remove it from hash _alive_ and after that 1270 * destroy it in the context of init_net. 1271 */ 1272void sk_release_kernel(struct sock *sk) 1273{ 1274 if (sk == NULL || sk->sk_socket == NULL) 1275 return; 1276 1277 sock_hold(sk); 1278 sock_release(sk->sk_socket); 1279 release_net(sock_net(sk)); 1280 sock_net_set(sk, get_net(&init_net)); 1281 sock_put(sk); 1282} 1283EXPORT_SYMBOL(sk_release_kernel); 1284 1285static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1286{ 1287 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1288 sock_update_memcg(newsk); 1289} 1290 1291/** 1292 * sk_clone_lock - clone a socket, and lock its clone 1293 * @sk: the socket to clone 1294 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1295 * 1296 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1297 */ 1298struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1299{ 1300 struct sock *newsk; 1301 1302 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1303 if (newsk != NULL) { 1304 struct sk_filter *filter; 1305 1306 sock_copy(newsk, sk); 1307 1308 /* SANITY */ 1309 get_net(sock_net(newsk)); 1310 sk_node_init(&newsk->sk_node); 1311 sock_lock_init(newsk); 1312 bh_lock_sock(newsk); 1313 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1314 newsk->sk_backlog.len = 0; 1315 1316 atomic_set(&newsk->sk_rmem_alloc, 0); 1317 /* 1318 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1319 */ 1320 atomic_set(&newsk->sk_wmem_alloc, 1); 1321 atomic_set(&newsk->sk_omem_alloc, 0); 1322 skb_queue_head_init(&newsk->sk_receive_queue); 1323 skb_queue_head_init(&newsk->sk_write_queue); 1324#ifdef CONFIG_NET_DMA 1325 skb_queue_head_init(&newsk->sk_async_wait_queue); 1326#endif 1327 1328 spin_lock_init(&newsk->sk_dst_lock); 1329 rwlock_init(&newsk->sk_callback_lock); 1330 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1331 af_callback_keys + newsk->sk_family, 1332 af_family_clock_key_strings[newsk->sk_family]); 1333 1334 newsk->sk_dst_cache = NULL; 1335 newsk->sk_wmem_queued = 0; 1336 newsk->sk_forward_alloc = 0; 1337 newsk->sk_send_head = NULL; 1338 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1339 1340 sock_reset_flag(newsk, SOCK_DONE); 1341 skb_queue_head_init(&newsk->sk_error_queue); 1342 1343 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1344 if (filter != NULL) 1345 sk_filter_charge(newsk, filter); 1346 1347 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1348 /* It is still raw copy of parent, so invalidate 1349 * destructor and make plain sk_free() */ 1350 newsk->sk_destruct = NULL; 1351 bh_unlock_sock(newsk); 1352 sk_free(newsk); 1353 newsk = NULL; 1354 goto out; 1355 } 1356 1357 newsk->sk_err = 0; 1358 newsk->sk_priority = 0; 1359 /* 1360 * Before updating sk_refcnt, we must commit prior changes to memory 1361 * (Documentation/RCU/rculist_nulls.txt for details) 1362 */ 1363 smp_wmb(); 1364 atomic_set(&newsk->sk_refcnt, 2); 1365 1366 /* 1367 * Increment the counter in the same struct proto as the master 1368 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1369 * is the same as sk->sk_prot->socks, as this field was copied 1370 * with memcpy). 1371 * 1372 * This _changes_ the previous behaviour, where 1373 * tcp_create_openreq_child always was incrementing the 1374 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1375 * to be taken into account in all callers. -acme 1376 */ 1377 sk_refcnt_debug_inc(newsk); 1378 sk_set_socket(newsk, NULL); 1379 newsk->sk_wq = NULL; 1380 1381 sk_update_clone(sk, newsk); 1382 1383 if (newsk->sk_prot->sockets_allocated) 1384 sk_sockets_allocated_inc(newsk); 1385 1386 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1387 net_enable_timestamp(); 1388 } 1389out: 1390 return newsk; 1391} 1392EXPORT_SYMBOL_GPL(sk_clone_lock); 1393 1394void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1395{ 1396 __sk_dst_set(sk, dst); 1397 sk->sk_route_caps = dst->dev->features; 1398 if (sk->sk_route_caps & NETIF_F_GSO) 1399 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1400 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1401 if (sk_can_gso(sk)) { 1402 if (dst->header_len) { 1403 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1404 } else { 1405 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1406 sk->sk_gso_max_size = dst->dev->gso_max_size; 1407 } 1408 } 1409} 1410EXPORT_SYMBOL_GPL(sk_setup_caps); 1411 1412void __init sk_init(void) 1413{ 1414 if (totalram_pages <= 4096) { 1415 sysctl_wmem_max = 32767; 1416 sysctl_rmem_max = 32767; 1417 sysctl_wmem_default = 32767; 1418 sysctl_rmem_default = 32767; 1419 } else if (totalram_pages >= 131072) { 1420 sysctl_wmem_max = 131071; 1421 sysctl_rmem_max = 131071; 1422 } 1423} 1424 1425/* 1426 * Simple resource managers for sockets. 1427 */ 1428 1429 1430/* 1431 * Write buffer destructor automatically called from kfree_skb. 1432 */ 1433void sock_wfree(struct sk_buff *skb) 1434{ 1435 struct sock *sk = skb->sk; 1436 unsigned int len = skb->truesize; 1437 1438 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1439 /* 1440 * Keep a reference on sk_wmem_alloc, this will be released 1441 * after sk_write_space() call 1442 */ 1443 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1444 sk->sk_write_space(sk); 1445 len = 1; 1446 } 1447 /* 1448 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1449 * could not do because of in-flight packets 1450 */ 1451 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1452 __sk_free(sk); 1453} 1454EXPORT_SYMBOL(sock_wfree); 1455 1456/* 1457 * Read buffer destructor automatically called from kfree_skb. 1458 */ 1459void sock_rfree(struct sk_buff *skb) 1460{ 1461 struct sock *sk = skb->sk; 1462 unsigned int len = skb->truesize; 1463 1464 atomic_sub(len, &sk->sk_rmem_alloc); 1465 sk_mem_uncharge(sk, len); 1466} 1467EXPORT_SYMBOL(sock_rfree); 1468 1469 1470int sock_i_uid(struct sock *sk) 1471{ 1472 int uid; 1473 1474 read_lock_bh(&sk->sk_callback_lock); 1475 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1476 read_unlock_bh(&sk->sk_callback_lock); 1477 return uid; 1478} 1479EXPORT_SYMBOL(sock_i_uid); 1480 1481unsigned long sock_i_ino(struct sock *sk) 1482{ 1483 unsigned long ino; 1484 1485 read_lock_bh(&sk->sk_callback_lock); 1486 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1487 read_unlock_bh(&sk->sk_callback_lock); 1488 return ino; 1489} 1490EXPORT_SYMBOL(sock_i_ino); 1491 1492/* 1493 * Allocate a skb from the socket's send buffer. 1494 */ 1495struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1496 gfp_t priority) 1497{ 1498 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1499 struct sk_buff *skb = alloc_skb(size, priority); 1500 if (skb) { 1501 skb_set_owner_w(skb, sk); 1502 return skb; 1503 } 1504 } 1505 return NULL; 1506} 1507EXPORT_SYMBOL(sock_wmalloc); 1508 1509/* 1510 * Allocate a skb from the socket's receive buffer. 1511 */ 1512struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1513 gfp_t priority) 1514{ 1515 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1516 struct sk_buff *skb = alloc_skb(size, priority); 1517 if (skb) { 1518 skb_set_owner_r(skb, sk); 1519 return skb; 1520 } 1521 } 1522 return NULL; 1523} 1524 1525/* 1526 * Allocate a memory block from the socket's option memory buffer. 1527 */ 1528void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1529{ 1530 if ((unsigned)size <= sysctl_optmem_max && 1531 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1532 void *mem; 1533 /* First do the add, to avoid the race if kmalloc 1534 * might sleep. 1535 */ 1536 atomic_add(size, &sk->sk_omem_alloc); 1537 mem = kmalloc(size, priority); 1538 if (mem) 1539 return mem; 1540 atomic_sub(size, &sk->sk_omem_alloc); 1541 } 1542 return NULL; 1543} 1544EXPORT_SYMBOL(sock_kmalloc); 1545 1546/* 1547 * Free an option memory block. 1548 */ 1549void sock_kfree_s(struct sock *sk, void *mem, int size) 1550{ 1551 kfree(mem); 1552 atomic_sub(size, &sk->sk_omem_alloc); 1553} 1554EXPORT_SYMBOL(sock_kfree_s); 1555 1556/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1557 I think, these locks should be removed for datagram sockets. 1558 */ 1559static long sock_wait_for_wmem(struct sock *sk, long timeo) 1560{ 1561 DEFINE_WAIT(wait); 1562 1563 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1564 for (;;) { 1565 if (!timeo) 1566 break; 1567 if (signal_pending(current)) 1568 break; 1569 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1570 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1571 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1572 break; 1573 if (sk->sk_shutdown & SEND_SHUTDOWN) 1574 break; 1575 if (sk->sk_err) 1576 break; 1577 timeo = schedule_timeout(timeo); 1578 } 1579 finish_wait(sk_sleep(sk), &wait); 1580 return timeo; 1581} 1582 1583 1584/* 1585 * Generic send/receive buffer handlers 1586 */ 1587 1588struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1589 unsigned long data_len, int noblock, 1590 int *errcode) 1591{ 1592 struct sk_buff *skb; 1593 gfp_t gfp_mask; 1594 long timeo; 1595 int err; 1596 1597 gfp_mask = sk->sk_allocation; 1598 if (gfp_mask & __GFP_WAIT) 1599 gfp_mask |= __GFP_REPEAT; 1600 1601 timeo = sock_sndtimeo(sk, noblock); 1602 while (1) { 1603 err = sock_error(sk); 1604 if (err != 0) 1605 goto failure; 1606 1607 err = -EPIPE; 1608 if (sk->sk_shutdown & SEND_SHUTDOWN) 1609 goto failure; 1610 1611 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1612 skb = alloc_skb(header_len, gfp_mask); 1613 if (skb) { 1614 int npages; 1615 int i; 1616 1617 /* No pages, we're done... */ 1618 if (!data_len) 1619 break; 1620 1621 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1622 skb->truesize += data_len; 1623 skb_shinfo(skb)->nr_frags = npages; 1624 for (i = 0; i < npages; i++) { 1625 struct page *page; 1626 1627 page = alloc_pages(sk->sk_allocation, 0); 1628 if (!page) { 1629 err = -ENOBUFS; 1630 skb_shinfo(skb)->nr_frags = i; 1631 kfree_skb(skb); 1632 goto failure; 1633 } 1634 1635 __skb_fill_page_desc(skb, i, 1636 page, 0, 1637 (data_len >= PAGE_SIZE ? 1638 PAGE_SIZE : 1639 data_len)); 1640 data_len -= PAGE_SIZE; 1641 } 1642 1643 /* Full success... */ 1644 break; 1645 } 1646 err = -ENOBUFS; 1647 goto failure; 1648 } 1649 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1650 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1651 err = -EAGAIN; 1652 if (!timeo) 1653 goto failure; 1654 if (signal_pending(current)) 1655 goto interrupted; 1656 timeo = sock_wait_for_wmem(sk, timeo); 1657 } 1658 1659 skb_set_owner_w(skb, sk); 1660 return skb; 1661 1662interrupted: 1663 err = sock_intr_errno(timeo); 1664failure: 1665 *errcode = err; 1666 return NULL; 1667} 1668EXPORT_SYMBOL(sock_alloc_send_pskb); 1669 1670struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1671 int noblock, int *errcode) 1672{ 1673 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1674} 1675EXPORT_SYMBOL(sock_alloc_send_skb); 1676 1677static void __lock_sock(struct sock *sk) 1678 __releases(&sk->sk_lock.slock) 1679 __acquires(&sk->sk_lock.slock) 1680{ 1681 DEFINE_WAIT(wait); 1682 1683 for (;;) { 1684 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1685 TASK_UNINTERRUPTIBLE); 1686 spin_unlock_bh(&sk->sk_lock.slock); 1687 schedule(); 1688 spin_lock_bh(&sk->sk_lock.slock); 1689 if (!sock_owned_by_user(sk)) 1690 break; 1691 } 1692 finish_wait(&sk->sk_lock.wq, &wait); 1693} 1694 1695static void __release_sock(struct sock *sk) 1696 __releases(&sk->sk_lock.slock) 1697 __acquires(&sk->sk_lock.slock) 1698{ 1699 struct sk_buff *skb = sk->sk_backlog.head; 1700 1701 do { 1702 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1703 bh_unlock_sock(sk); 1704 1705 do { 1706 struct sk_buff *next = skb->next; 1707 1708 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1709 skb->next = NULL; 1710 sk_backlog_rcv(sk, skb); 1711 1712 /* 1713 * We are in process context here with softirqs 1714 * disabled, use cond_resched_softirq() to preempt. 1715 * This is safe to do because we've taken the backlog 1716 * queue private: 1717 */ 1718 cond_resched_softirq(); 1719 1720 skb = next; 1721 } while (skb != NULL); 1722 1723 bh_lock_sock(sk); 1724 } while ((skb = sk->sk_backlog.head) != NULL); 1725 1726 /* 1727 * Doing the zeroing here guarantee we can not loop forever 1728 * while a wild producer attempts to flood us. 1729 */ 1730 sk->sk_backlog.len = 0; 1731} 1732 1733/** 1734 * sk_wait_data - wait for data to arrive at sk_receive_queue 1735 * @sk: sock to wait on 1736 * @timeo: for how long 1737 * 1738 * Now socket state including sk->sk_err is changed only under lock, 1739 * hence we may omit checks after joining wait queue. 1740 * We check receive queue before schedule() only as optimization; 1741 * it is very likely that release_sock() added new data. 1742 */ 1743int sk_wait_data(struct sock *sk, long *timeo) 1744{ 1745 int rc; 1746 DEFINE_WAIT(wait); 1747 1748 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1749 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1750 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1751 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 finish_wait(sk_sleep(sk), &wait); 1753 return rc; 1754} 1755EXPORT_SYMBOL(sk_wait_data); 1756 1757/** 1758 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1759 * @sk: socket 1760 * @size: memory size to allocate 1761 * @kind: allocation type 1762 * 1763 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1764 * rmem allocation. This function assumes that protocols which have 1765 * memory_pressure use sk_wmem_queued as write buffer accounting. 1766 */ 1767int __sk_mem_schedule(struct sock *sk, int size, int kind) 1768{ 1769 struct proto *prot = sk->sk_prot; 1770 int amt = sk_mem_pages(size); 1771 long allocated; 1772 int parent_status = UNDER_LIMIT; 1773 1774 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1775 1776 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1777 1778 /* Under limit. */ 1779 if (parent_status == UNDER_LIMIT && 1780 allocated <= sk_prot_mem_limits(sk, 0)) { 1781 sk_leave_memory_pressure(sk); 1782 return 1; 1783 } 1784 1785 /* Under pressure. (we or our parents) */ 1786 if ((parent_status > SOFT_LIMIT) || 1787 allocated > sk_prot_mem_limits(sk, 1)) 1788 sk_enter_memory_pressure(sk); 1789 1790 /* Over hard limit (we or our parents) */ 1791 if ((parent_status == OVER_LIMIT) || 1792 (allocated > sk_prot_mem_limits(sk, 2))) 1793 goto suppress_allocation; 1794 1795 /* guarantee minimum buffer size under pressure */ 1796 if (kind == SK_MEM_RECV) { 1797 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1798 return 1; 1799 1800 } else { /* SK_MEM_SEND */ 1801 if (sk->sk_type == SOCK_STREAM) { 1802 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1803 return 1; 1804 } else if (atomic_read(&sk->sk_wmem_alloc) < 1805 prot->sysctl_wmem[0]) 1806 return 1; 1807 } 1808 1809 if (sk_has_memory_pressure(sk)) { 1810 int alloc; 1811 1812 if (!sk_under_memory_pressure(sk)) 1813 return 1; 1814 alloc = sk_sockets_allocated_read_positive(sk); 1815 if (sk_prot_mem_limits(sk, 2) > alloc * 1816 sk_mem_pages(sk->sk_wmem_queued + 1817 atomic_read(&sk->sk_rmem_alloc) + 1818 sk->sk_forward_alloc)) 1819 return 1; 1820 } 1821 1822suppress_allocation: 1823 1824 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1825 sk_stream_moderate_sndbuf(sk); 1826 1827 /* Fail only if socket is _under_ its sndbuf. 1828 * In this case we cannot block, so that we have to fail. 1829 */ 1830 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1831 return 1; 1832 } 1833 1834 trace_sock_exceed_buf_limit(sk, prot, allocated); 1835 1836 /* Alas. Undo changes. */ 1837 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1838 1839 sk_memory_allocated_sub(sk, amt); 1840 1841 return 0; 1842} 1843EXPORT_SYMBOL(__sk_mem_schedule); 1844 1845/** 1846 * __sk_reclaim - reclaim memory_allocated 1847 * @sk: socket 1848 */ 1849void __sk_mem_reclaim(struct sock *sk) 1850{ 1851 sk_memory_allocated_sub(sk, 1852 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 1853 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1854 1855 if (sk_under_memory_pressure(sk) && 1856 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 1857 sk_leave_memory_pressure(sk); 1858} 1859EXPORT_SYMBOL(__sk_mem_reclaim); 1860 1861 1862/* 1863 * Set of default routines for initialising struct proto_ops when 1864 * the protocol does not support a particular function. In certain 1865 * cases where it makes no sense for a protocol to have a "do nothing" 1866 * function, some default processing is provided. 1867 */ 1868 1869int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1870{ 1871 return -EOPNOTSUPP; 1872} 1873EXPORT_SYMBOL(sock_no_bind); 1874 1875int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1876 int len, int flags) 1877{ 1878 return -EOPNOTSUPP; 1879} 1880EXPORT_SYMBOL(sock_no_connect); 1881 1882int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1883{ 1884 return -EOPNOTSUPP; 1885} 1886EXPORT_SYMBOL(sock_no_socketpair); 1887 1888int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1889{ 1890 return -EOPNOTSUPP; 1891} 1892EXPORT_SYMBOL(sock_no_accept); 1893 1894int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1895 int *len, int peer) 1896{ 1897 return -EOPNOTSUPP; 1898} 1899EXPORT_SYMBOL(sock_no_getname); 1900 1901unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1902{ 1903 return 0; 1904} 1905EXPORT_SYMBOL(sock_no_poll); 1906 1907int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1908{ 1909 return -EOPNOTSUPP; 1910} 1911EXPORT_SYMBOL(sock_no_ioctl); 1912 1913int sock_no_listen(struct socket *sock, int backlog) 1914{ 1915 return -EOPNOTSUPP; 1916} 1917EXPORT_SYMBOL(sock_no_listen); 1918 1919int sock_no_shutdown(struct socket *sock, int how) 1920{ 1921 return -EOPNOTSUPP; 1922} 1923EXPORT_SYMBOL(sock_no_shutdown); 1924 1925int sock_no_setsockopt(struct socket *sock, int level, int optname, 1926 char __user *optval, unsigned int optlen) 1927{ 1928 return -EOPNOTSUPP; 1929} 1930EXPORT_SYMBOL(sock_no_setsockopt); 1931 1932int sock_no_getsockopt(struct socket *sock, int level, int optname, 1933 char __user *optval, int __user *optlen) 1934{ 1935 return -EOPNOTSUPP; 1936} 1937EXPORT_SYMBOL(sock_no_getsockopt); 1938 1939int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1940 size_t len) 1941{ 1942 return -EOPNOTSUPP; 1943} 1944EXPORT_SYMBOL(sock_no_sendmsg); 1945 1946int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1947 size_t len, int flags) 1948{ 1949 return -EOPNOTSUPP; 1950} 1951EXPORT_SYMBOL(sock_no_recvmsg); 1952 1953int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1954{ 1955 /* Mirror missing mmap method error code */ 1956 return -ENODEV; 1957} 1958EXPORT_SYMBOL(sock_no_mmap); 1959 1960ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1961{ 1962 ssize_t res; 1963 struct msghdr msg = {.msg_flags = flags}; 1964 struct kvec iov; 1965 char *kaddr = kmap(page); 1966 iov.iov_base = kaddr + offset; 1967 iov.iov_len = size; 1968 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1969 kunmap(page); 1970 return res; 1971} 1972EXPORT_SYMBOL(sock_no_sendpage); 1973 1974/* 1975 * Default Socket Callbacks 1976 */ 1977 1978static void sock_def_wakeup(struct sock *sk) 1979{ 1980 struct socket_wq *wq; 1981 1982 rcu_read_lock(); 1983 wq = rcu_dereference(sk->sk_wq); 1984 if (wq_has_sleeper(wq)) 1985 wake_up_interruptible_all(&wq->wait); 1986 rcu_read_unlock(); 1987} 1988 1989static void sock_def_error_report(struct sock *sk) 1990{ 1991 struct socket_wq *wq; 1992 1993 rcu_read_lock(); 1994 wq = rcu_dereference(sk->sk_wq); 1995 if (wq_has_sleeper(wq)) 1996 wake_up_interruptible_poll(&wq->wait, POLLERR); 1997 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1998 rcu_read_unlock(); 1999} 2000 2001static void sock_def_readable(struct sock *sk, int len) 2002{ 2003 struct socket_wq *wq; 2004 2005 rcu_read_lock(); 2006 wq = rcu_dereference(sk->sk_wq); 2007 if (wq_has_sleeper(wq)) 2008 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2009 POLLRDNORM | POLLRDBAND); 2010 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2011 rcu_read_unlock(); 2012} 2013 2014static void sock_def_write_space(struct sock *sk) 2015{ 2016 struct socket_wq *wq; 2017 2018 rcu_read_lock(); 2019 2020 /* Do not wake up a writer until he can make "significant" 2021 * progress. --DaveM 2022 */ 2023 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2024 wq = rcu_dereference(sk->sk_wq); 2025 if (wq_has_sleeper(wq)) 2026 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2027 POLLWRNORM | POLLWRBAND); 2028 2029 /* Should agree with poll, otherwise some programs break */ 2030 if (sock_writeable(sk)) 2031 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2032 } 2033 2034 rcu_read_unlock(); 2035} 2036 2037static void sock_def_destruct(struct sock *sk) 2038{ 2039 kfree(sk->sk_protinfo); 2040} 2041 2042void sk_send_sigurg(struct sock *sk) 2043{ 2044 if (sk->sk_socket && sk->sk_socket->file) 2045 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2046 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2047} 2048EXPORT_SYMBOL(sk_send_sigurg); 2049 2050void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2051 unsigned long expires) 2052{ 2053 if (!mod_timer(timer, expires)) 2054 sock_hold(sk); 2055} 2056EXPORT_SYMBOL(sk_reset_timer); 2057 2058void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2059{ 2060 if (timer_pending(timer) && del_timer(timer)) 2061 __sock_put(sk); 2062} 2063EXPORT_SYMBOL(sk_stop_timer); 2064 2065void sock_init_data(struct socket *sock, struct sock *sk) 2066{ 2067 skb_queue_head_init(&sk->sk_receive_queue); 2068 skb_queue_head_init(&sk->sk_write_queue); 2069 skb_queue_head_init(&sk->sk_error_queue); 2070#ifdef CONFIG_NET_DMA 2071 skb_queue_head_init(&sk->sk_async_wait_queue); 2072#endif 2073 2074 sk->sk_send_head = NULL; 2075 2076 init_timer(&sk->sk_timer); 2077 2078 sk->sk_allocation = GFP_KERNEL; 2079 sk->sk_rcvbuf = sysctl_rmem_default; 2080 sk->sk_sndbuf = sysctl_wmem_default; 2081 sk->sk_state = TCP_CLOSE; 2082 sk_set_socket(sk, sock); 2083 2084 sock_set_flag(sk, SOCK_ZAPPED); 2085 2086 if (sock) { 2087 sk->sk_type = sock->type; 2088 sk->sk_wq = sock->wq; 2089 sock->sk = sk; 2090 } else 2091 sk->sk_wq = NULL; 2092 2093 spin_lock_init(&sk->sk_dst_lock); 2094 rwlock_init(&sk->sk_callback_lock); 2095 lockdep_set_class_and_name(&sk->sk_callback_lock, 2096 af_callback_keys + sk->sk_family, 2097 af_family_clock_key_strings[sk->sk_family]); 2098 2099 sk->sk_state_change = sock_def_wakeup; 2100 sk->sk_data_ready = sock_def_readable; 2101 sk->sk_write_space = sock_def_write_space; 2102 sk->sk_error_report = sock_def_error_report; 2103 sk->sk_destruct = sock_def_destruct; 2104 2105 sk->sk_sndmsg_page = NULL; 2106 sk->sk_sndmsg_off = 0; 2107 sk->sk_peek_off = -1; 2108 2109 sk->sk_peer_pid = NULL; 2110 sk->sk_peer_cred = NULL; 2111 sk->sk_write_pending = 0; 2112 sk->sk_rcvlowat = 1; 2113 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2114 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2115 2116 sk->sk_stamp = ktime_set(-1L, 0); 2117 2118 /* 2119 * Before updating sk_refcnt, we must commit prior changes to memory 2120 * (Documentation/RCU/rculist_nulls.txt for details) 2121 */ 2122 smp_wmb(); 2123 atomic_set(&sk->sk_refcnt, 1); 2124 atomic_set(&sk->sk_drops, 0); 2125} 2126EXPORT_SYMBOL(sock_init_data); 2127 2128void lock_sock_nested(struct sock *sk, int subclass) 2129{ 2130 might_sleep(); 2131 spin_lock_bh(&sk->sk_lock.slock); 2132 if (sk->sk_lock.owned) 2133 __lock_sock(sk); 2134 sk->sk_lock.owned = 1; 2135 spin_unlock(&sk->sk_lock.slock); 2136 /* 2137 * The sk_lock has mutex_lock() semantics here: 2138 */ 2139 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2140 local_bh_enable(); 2141} 2142EXPORT_SYMBOL(lock_sock_nested); 2143 2144void release_sock(struct sock *sk) 2145{ 2146 /* 2147 * The sk_lock has mutex_unlock() semantics: 2148 */ 2149 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2150 2151 spin_lock_bh(&sk->sk_lock.slock); 2152 if (sk->sk_backlog.tail) 2153 __release_sock(sk); 2154 sk->sk_lock.owned = 0; 2155 if (waitqueue_active(&sk->sk_lock.wq)) 2156 wake_up(&sk->sk_lock.wq); 2157 spin_unlock_bh(&sk->sk_lock.slock); 2158} 2159EXPORT_SYMBOL(release_sock); 2160 2161/** 2162 * lock_sock_fast - fast version of lock_sock 2163 * @sk: socket 2164 * 2165 * This version should be used for very small section, where process wont block 2166 * return false if fast path is taken 2167 * sk_lock.slock locked, owned = 0, BH disabled 2168 * return true if slow path is taken 2169 * sk_lock.slock unlocked, owned = 1, BH enabled 2170 */ 2171bool lock_sock_fast(struct sock *sk) 2172{ 2173 might_sleep(); 2174 spin_lock_bh(&sk->sk_lock.slock); 2175 2176 if (!sk->sk_lock.owned) 2177 /* 2178 * Note : We must disable BH 2179 */ 2180 return false; 2181 2182 __lock_sock(sk); 2183 sk->sk_lock.owned = 1; 2184 spin_unlock(&sk->sk_lock.slock); 2185 /* 2186 * The sk_lock has mutex_lock() semantics here: 2187 */ 2188 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2189 local_bh_enable(); 2190 return true; 2191} 2192EXPORT_SYMBOL(lock_sock_fast); 2193 2194int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2195{ 2196 struct timeval tv; 2197 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2198 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2199 tv = ktime_to_timeval(sk->sk_stamp); 2200 if (tv.tv_sec == -1) 2201 return -ENOENT; 2202 if (tv.tv_sec == 0) { 2203 sk->sk_stamp = ktime_get_real(); 2204 tv = ktime_to_timeval(sk->sk_stamp); 2205 } 2206 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2207} 2208EXPORT_SYMBOL(sock_get_timestamp); 2209 2210int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2211{ 2212 struct timespec ts; 2213 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2214 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2215 ts = ktime_to_timespec(sk->sk_stamp); 2216 if (ts.tv_sec == -1) 2217 return -ENOENT; 2218 if (ts.tv_sec == 0) { 2219 sk->sk_stamp = ktime_get_real(); 2220 ts = ktime_to_timespec(sk->sk_stamp); 2221 } 2222 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2223} 2224EXPORT_SYMBOL(sock_get_timestampns); 2225 2226void sock_enable_timestamp(struct sock *sk, int flag) 2227{ 2228 if (!sock_flag(sk, flag)) { 2229 unsigned long previous_flags = sk->sk_flags; 2230 2231 sock_set_flag(sk, flag); 2232 /* 2233 * we just set one of the two flags which require net 2234 * time stamping, but time stamping might have been on 2235 * already because of the other one 2236 */ 2237 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2238 net_enable_timestamp(); 2239 } 2240} 2241 2242/* 2243 * Get a socket option on an socket. 2244 * 2245 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2246 * asynchronous errors should be reported by getsockopt. We assume 2247 * this means if you specify SO_ERROR (otherwise whats the point of it). 2248 */ 2249int sock_common_getsockopt(struct socket *sock, int level, int optname, 2250 char __user *optval, int __user *optlen) 2251{ 2252 struct sock *sk = sock->sk; 2253 2254 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2255} 2256EXPORT_SYMBOL(sock_common_getsockopt); 2257 2258#ifdef CONFIG_COMPAT 2259int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2260 char __user *optval, int __user *optlen) 2261{ 2262 struct sock *sk = sock->sk; 2263 2264 if (sk->sk_prot->compat_getsockopt != NULL) 2265 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2266 optval, optlen); 2267 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2268} 2269EXPORT_SYMBOL(compat_sock_common_getsockopt); 2270#endif 2271 2272int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2273 struct msghdr *msg, size_t size, int flags) 2274{ 2275 struct sock *sk = sock->sk; 2276 int addr_len = 0; 2277 int err; 2278 2279 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2280 flags & ~MSG_DONTWAIT, &addr_len); 2281 if (err >= 0) 2282 msg->msg_namelen = addr_len; 2283 return err; 2284} 2285EXPORT_SYMBOL(sock_common_recvmsg); 2286 2287/* 2288 * Set socket options on an inet socket. 2289 */ 2290int sock_common_setsockopt(struct socket *sock, int level, int optname, 2291 char __user *optval, unsigned int optlen) 2292{ 2293 struct sock *sk = sock->sk; 2294 2295 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2296} 2297EXPORT_SYMBOL(sock_common_setsockopt); 2298 2299#ifdef CONFIG_COMPAT 2300int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2301 char __user *optval, unsigned int optlen) 2302{ 2303 struct sock *sk = sock->sk; 2304 2305 if (sk->sk_prot->compat_setsockopt != NULL) 2306 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2307 optval, optlen); 2308 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2309} 2310EXPORT_SYMBOL(compat_sock_common_setsockopt); 2311#endif 2312 2313void sk_common_release(struct sock *sk) 2314{ 2315 if (sk->sk_prot->destroy) 2316 sk->sk_prot->destroy(sk); 2317 2318 /* 2319 * Observation: when sock_common_release is called, processes have 2320 * no access to socket. But net still has. 2321 * Step one, detach it from networking: 2322 * 2323 * A. Remove from hash tables. 2324 */ 2325 2326 sk->sk_prot->unhash(sk); 2327 2328 /* 2329 * In this point socket cannot receive new packets, but it is possible 2330 * that some packets are in flight because some CPU runs receiver and 2331 * did hash table lookup before we unhashed socket. They will achieve 2332 * receive queue and will be purged by socket destructor. 2333 * 2334 * Also we still have packets pending on receive queue and probably, 2335 * our own packets waiting in device queues. sock_destroy will drain 2336 * receive queue, but transmitted packets will delay socket destruction 2337 * until the last reference will be released. 2338 */ 2339 2340 sock_orphan(sk); 2341 2342 xfrm_sk_free_policy(sk); 2343 2344 sk_refcnt_debug_release(sk); 2345 sock_put(sk); 2346} 2347EXPORT_SYMBOL(sk_common_release); 2348 2349#ifdef CONFIG_PROC_FS 2350#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2351struct prot_inuse { 2352 int val[PROTO_INUSE_NR]; 2353}; 2354 2355static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2356 2357#ifdef CONFIG_NET_NS 2358void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2359{ 2360 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2361} 2362EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2363 2364int sock_prot_inuse_get(struct net *net, struct proto *prot) 2365{ 2366 int cpu, idx = prot->inuse_idx; 2367 int res = 0; 2368 2369 for_each_possible_cpu(cpu) 2370 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2371 2372 return res >= 0 ? res : 0; 2373} 2374EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2375 2376static int __net_init sock_inuse_init_net(struct net *net) 2377{ 2378 net->core.inuse = alloc_percpu(struct prot_inuse); 2379 return net->core.inuse ? 0 : -ENOMEM; 2380} 2381 2382static void __net_exit sock_inuse_exit_net(struct net *net) 2383{ 2384 free_percpu(net->core.inuse); 2385} 2386 2387static struct pernet_operations net_inuse_ops = { 2388 .init = sock_inuse_init_net, 2389 .exit = sock_inuse_exit_net, 2390}; 2391 2392static __init int net_inuse_init(void) 2393{ 2394 if (register_pernet_subsys(&net_inuse_ops)) 2395 panic("Cannot initialize net inuse counters"); 2396 2397 return 0; 2398} 2399 2400core_initcall(net_inuse_init); 2401#else 2402static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2403 2404void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2405{ 2406 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2407} 2408EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2409 2410int sock_prot_inuse_get(struct net *net, struct proto *prot) 2411{ 2412 int cpu, idx = prot->inuse_idx; 2413 int res = 0; 2414 2415 for_each_possible_cpu(cpu) 2416 res += per_cpu(prot_inuse, cpu).val[idx]; 2417 2418 return res >= 0 ? res : 0; 2419} 2420EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2421#endif 2422 2423static void assign_proto_idx(struct proto *prot) 2424{ 2425 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2426 2427 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2428 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2429 return; 2430 } 2431 2432 set_bit(prot->inuse_idx, proto_inuse_idx); 2433} 2434 2435static void release_proto_idx(struct proto *prot) 2436{ 2437 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2438 clear_bit(prot->inuse_idx, proto_inuse_idx); 2439} 2440#else 2441static inline void assign_proto_idx(struct proto *prot) 2442{ 2443} 2444 2445static inline void release_proto_idx(struct proto *prot) 2446{ 2447} 2448#endif 2449 2450int proto_register(struct proto *prot, int alloc_slab) 2451{ 2452 if (alloc_slab) { 2453 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2454 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2455 NULL); 2456 2457 if (prot->slab == NULL) { 2458 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2459 prot->name); 2460 goto out; 2461 } 2462 2463 if (prot->rsk_prot != NULL) { 2464 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2465 if (prot->rsk_prot->slab_name == NULL) 2466 goto out_free_sock_slab; 2467 2468 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2469 prot->rsk_prot->obj_size, 0, 2470 SLAB_HWCACHE_ALIGN, NULL); 2471 2472 if (prot->rsk_prot->slab == NULL) { 2473 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2474 prot->name); 2475 goto out_free_request_sock_slab_name; 2476 } 2477 } 2478 2479 if (prot->twsk_prot != NULL) { 2480 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2481 2482 if (prot->twsk_prot->twsk_slab_name == NULL) 2483 goto out_free_request_sock_slab; 2484 2485 prot->twsk_prot->twsk_slab = 2486 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2487 prot->twsk_prot->twsk_obj_size, 2488 0, 2489 SLAB_HWCACHE_ALIGN | 2490 prot->slab_flags, 2491 NULL); 2492 if (prot->twsk_prot->twsk_slab == NULL) 2493 goto out_free_timewait_sock_slab_name; 2494 } 2495 } 2496 2497 mutex_lock(&proto_list_mutex); 2498 list_add(&prot->node, &proto_list); 2499 assign_proto_idx(prot); 2500 mutex_unlock(&proto_list_mutex); 2501 return 0; 2502 2503out_free_timewait_sock_slab_name: 2504 kfree(prot->twsk_prot->twsk_slab_name); 2505out_free_request_sock_slab: 2506 if (prot->rsk_prot && prot->rsk_prot->slab) { 2507 kmem_cache_destroy(prot->rsk_prot->slab); 2508 prot->rsk_prot->slab = NULL; 2509 } 2510out_free_request_sock_slab_name: 2511 if (prot->rsk_prot) 2512 kfree(prot->rsk_prot->slab_name); 2513out_free_sock_slab: 2514 kmem_cache_destroy(prot->slab); 2515 prot->slab = NULL; 2516out: 2517 return -ENOBUFS; 2518} 2519EXPORT_SYMBOL(proto_register); 2520 2521void proto_unregister(struct proto *prot) 2522{ 2523 mutex_lock(&proto_list_mutex); 2524 release_proto_idx(prot); 2525 list_del(&prot->node); 2526 mutex_unlock(&proto_list_mutex); 2527 2528 if (prot->slab != NULL) { 2529 kmem_cache_destroy(prot->slab); 2530 prot->slab = NULL; 2531 } 2532 2533 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2534 kmem_cache_destroy(prot->rsk_prot->slab); 2535 kfree(prot->rsk_prot->slab_name); 2536 prot->rsk_prot->slab = NULL; 2537 } 2538 2539 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2540 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2541 kfree(prot->twsk_prot->twsk_slab_name); 2542 prot->twsk_prot->twsk_slab = NULL; 2543 } 2544} 2545EXPORT_SYMBOL(proto_unregister); 2546 2547#ifdef CONFIG_PROC_FS 2548static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2549 __acquires(proto_list_mutex) 2550{ 2551 mutex_lock(&proto_list_mutex); 2552 return seq_list_start_head(&proto_list, *pos); 2553} 2554 2555static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2556{ 2557 return seq_list_next(v, &proto_list, pos); 2558} 2559 2560static void proto_seq_stop(struct seq_file *seq, void *v) 2561 __releases(proto_list_mutex) 2562{ 2563 mutex_unlock(&proto_list_mutex); 2564} 2565 2566static char proto_method_implemented(const void *method) 2567{ 2568 return method == NULL ? 'n' : 'y'; 2569} 2570static long sock_prot_memory_allocated(struct proto *proto) 2571{ 2572 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; 2573} 2574 2575static char *sock_prot_memory_pressure(struct proto *proto) 2576{ 2577 return proto->memory_pressure != NULL ? 2578 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2579} 2580 2581static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2582{ 2583 2584 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2585 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2586 proto->name, 2587 proto->obj_size, 2588 sock_prot_inuse_get(seq_file_net(seq), proto), 2589 sock_prot_memory_allocated(proto), 2590 sock_prot_memory_pressure(proto), 2591 proto->max_header, 2592 proto->slab == NULL ? "no" : "yes", 2593 module_name(proto->owner), 2594 proto_method_implemented(proto->close), 2595 proto_method_implemented(proto->connect), 2596 proto_method_implemented(proto->disconnect), 2597 proto_method_implemented(proto->accept), 2598 proto_method_implemented(proto->ioctl), 2599 proto_method_implemented(proto->init), 2600 proto_method_implemented(proto->destroy), 2601 proto_method_implemented(proto->shutdown), 2602 proto_method_implemented(proto->setsockopt), 2603 proto_method_implemented(proto->getsockopt), 2604 proto_method_implemented(proto->sendmsg), 2605 proto_method_implemented(proto->recvmsg), 2606 proto_method_implemented(proto->sendpage), 2607 proto_method_implemented(proto->bind), 2608 proto_method_implemented(proto->backlog_rcv), 2609 proto_method_implemented(proto->hash), 2610 proto_method_implemented(proto->unhash), 2611 proto_method_implemented(proto->get_port), 2612 proto_method_implemented(proto->enter_memory_pressure)); 2613} 2614 2615static int proto_seq_show(struct seq_file *seq, void *v) 2616{ 2617 if (v == &proto_list) 2618 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2619 "protocol", 2620 "size", 2621 "sockets", 2622 "memory", 2623 "press", 2624 "maxhdr", 2625 "slab", 2626 "module", 2627 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2628 else 2629 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2630 return 0; 2631} 2632 2633static const struct seq_operations proto_seq_ops = { 2634 .start = proto_seq_start, 2635 .next = proto_seq_next, 2636 .stop = proto_seq_stop, 2637 .show = proto_seq_show, 2638}; 2639 2640static int proto_seq_open(struct inode *inode, struct file *file) 2641{ 2642 return seq_open_net(inode, file, &proto_seq_ops, 2643 sizeof(struct seq_net_private)); 2644} 2645 2646static const struct file_operations proto_seq_fops = { 2647 .owner = THIS_MODULE, 2648 .open = proto_seq_open, 2649 .read = seq_read, 2650 .llseek = seq_lseek, 2651 .release = seq_release_net, 2652}; 2653 2654static __net_init int proto_init_net(struct net *net) 2655{ 2656 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2657 return -ENOMEM; 2658 2659 return 0; 2660} 2661 2662static __net_exit void proto_exit_net(struct net *net) 2663{ 2664 proc_net_remove(net, "protocols"); 2665} 2666 2667 2668static __net_initdata struct pernet_operations proto_net_ops = { 2669 .init = proto_init_net, 2670 .exit = proto_exit_net, 2671}; 2672 2673static int __init proto_init(void) 2674{ 2675 return register_pernet_subsys(&proto_net_ops); 2676} 2677 2678subsys_initcall(proto_init); 2679 2680#endif /* PROC_FS */ 2681