sock.c revision b4b9e3558508980fc0cd161a545ffb55a1f13ee9
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 93 94#include <linux/capability.h> 95#include <linux/errno.h> 96#include <linux/types.h> 97#include <linux/socket.h> 98#include <linux/in.h> 99#include <linux/kernel.h> 100#include <linux/module.h> 101#include <linux/proc_fs.h> 102#include <linux/seq_file.h> 103#include <linux/sched.h> 104#include <linux/timer.h> 105#include <linux/string.h> 106#include <linux/sockios.h> 107#include <linux/net.h> 108#include <linux/mm.h> 109#include <linux/slab.h> 110#include <linux/interrupt.h> 111#include <linux/poll.h> 112#include <linux/tcp.h> 113#include <linux/init.h> 114#include <linux/highmem.h> 115#include <linux/user_namespace.h> 116#include <linux/static_key.h> 117#include <linux/memcontrol.h> 118#include <linux/prefetch.h> 119 120#include <asm/uaccess.h> 121 122#include <linux/netdevice.h> 123#include <net/protocol.h> 124#include <linux/skbuff.h> 125#include <net/net_namespace.h> 126#include <net/request_sock.h> 127#include <net/sock.h> 128#include <linux/net_tstamp.h> 129#include <net/xfrm.h> 130#include <linux/ipsec.h> 131#include <net/cls_cgroup.h> 132#include <net/netprio_cgroup.h> 133 134#include <linux/filter.h> 135 136#include <trace/events/sock.h> 137 138#ifdef CONFIG_INET 139#include <net/tcp.h> 140#endif 141 142static DEFINE_MUTEX(proto_list_mutex); 143static LIST_HEAD(proto_list); 144 145#ifdef CONFIG_MEMCG_KMEM 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 147{ 148 struct proto *proto; 149 int ret = 0; 150 151 mutex_lock(&proto_list_mutex); 152 list_for_each_entry(proto, &proto_list, node) { 153 if (proto->init_cgroup) { 154 ret = proto->init_cgroup(memcg, ss); 155 if (ret) 156 goto out; 157 } 158 } 159 160 mutex_unlock(&proto_list_mutex); 161 return ret; 162out: 163 list_for_each_entry_continue_reverse(proto, &proto_list, node) 164 if (proto->destroy_cgroup) 165 proto->destroy_cgroup(memcg); 166 mutex_unlock(&proto_list_mutex); 167 return ret; 168} 169 170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 171{ 172 struct proto *proto; 173 174 mutex_lock(&proto_list_mutex); 175 list_for_each_entry_reverse(proto, &proto_list, node) 176 if (proto->destroy_cgroup) 177 proto->destroy_cgroup(memcg); 178 mutex_unlock(&proto_list_mutex); 179} 180#endif 181 182/* 183 * Each address family might have different locking rules, so we have 184 * one slock key per address family: 185 */ 186static struct lock_class_key af_family_keys[AF_MAX]; 187static struct lock_class_key af_family_slock_keys[AF_MAX]; 188 189struct static_key memcg_socket_limit_enabled; 190EXPORT_SYMBOL(memcg_socket_limit_enabled); 191 192/* 193 * Make lock validator output more readable. (we pre-construct these 194 * strings build-time, so that runtime initialization of socket 195 * locks is fast): 196 */ 197static const char *const af_family_key_strings[AF_MAX+1] = { 198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 212}; 213static const char *const af_family_slock_key_strings[AF_MAX+1] = { 214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 223 "slock-27" , "slock-28" , "slock-AF_CAN" , 224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 227 "slock-AF_NFC" , "slock-AF_MAX" 228}; 229static const char *const af_family_clock_key_strings[AF_MAX+1] = { 230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 239 "clock-27" , "clock-28" , "clock-AF_CAN" , 240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 243 "clock-AF_NFC" , "clock-AF_MAX" 244}; 245 246/* 247 * sk_callback_lock locking rules are per-address-family, 248 * so split the lock classes by using a per-AF key: 249 */ 250static struct lock_class_key af_callback_keys[AF_MAX]; 251 252/* Take into consideration the size of the struct sk_buff overhead in the 253 * determination of these values, since that is non-constant across 254 * platforms. This makes socket queueing behavior and performance 255 * not depend upon such differences. 256 */ 257#define _SK_MEM_PACKETS 256 258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 261 262/* Run time adjustable parameters. */ 263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 264EXPORT_SYMBOL(sysctl_wmem_max); 265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 266EXPORT_SYMBOL(sysctl_rmem_max); 267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 269 270/* Maximal space eaten by iovec or ancillary data plus some space */ 271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 272EXPORT_SYMBOL(sysctl_optmem_max); 273 274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; 275EXPORT_SYMBOL_GPL(memalloc_socks); 276 277/** 278 * sk_set_memalloc - sets %SOCK_MEMALLOC 279 * @sk: socket to set it on 280 * 281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 282 * It's the responsibility of the admin to adjust min_free_kbytes 283 * to meet the requirements 284 */ 285void sk_set_memalloc(struct sock *sk) 286{ 287 sock_set_flag(sk, SOCK_MEMALLOC); 288 sk->sk_allocation |= __GFP_MEMALLOC; 289 static_key_slow_inc(&memalloc_socks); 290} 291EXPORT_SYMBOL_GPL(sk_set_memalloc); 292 293void sk_clear_memalloc(struct sock *sk) 294{ 295 sock_reset_flag(sk, SOCK_MEMALLOC); 296 sk->sk_allocation &= ~__GFP_MEMALLOC; 297 static_key_slow_dec(&memalloc_socks); 298} 299EXPORT_SYMBOL_GPL(sk_clear_memalloc); 300 301int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 302{ 303 int ret; 304 unsigned long pflags = current->flags; 305 306 /* these should have been dropped before queueing */ 307 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 308 309 current->flags |= PF_MEMALLOC; 310 ret = sk->sk_backlog_rcv(sk, skb); 311 tsk_restore_flags(current, pflags, PF_MEMALLOC); 312 313 return ret; 314} 315EXPORT_SYMBOL(__sk_backlog_rcv); 316 317#if defined(CONFIG_CGROUPS) 318#if !defined(CONFIG_NET_CLS_CGROUP) 319int net_cls_subsys_id = -1; 320EXPORT_SYMBOL_GPL(net_cls_subsys_id); 321#endif 322#if !defined(CONFIG_NETPRIO_CGROUP) 323int net_prio_subsys_id = -1; 324EXPORT_SYMBOL_GPL(net_prio_subsys_id); 325#endif 326#endif 327 328static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 329{ 330 struct timeval tv; 331 332 if (optlen < sizeof(tv)) 333 return -EINVAL; 334 if (copy_from_user(&tv, optval, sizeof(tv))) 335 return -EFAULT; 336 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 337 return -EDOM; 338 339 if (tv.tv_sec < 0) { 340 static int warned __read_mostly; 341 342 *timeo_p = 0; 343 if (warned < 10 && net_ratelimit()) { 344 warned++; 345 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 346 __func__, current->comm, task_pid_nr(current)); 347 } 348 return 0; 349 } 350 *timeo_p = MAX_SCHEDULE_TIMEOUT; 351 if (tv.tv_sec == 0 && tv.tv_usec == 0) 352 return 0; 353 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 354 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 355 return 0; 356} 357 358static void sock_warn_obsolete_bsdism(const char *name) 359{ 360 static int warned; 361 static char warncomm[TASK_COMM_LEN]; 362 if (strcmp(warncomm, current->comm) && warned < 5) { 363 strcpy(warncomm, current->comm); 364 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 365 warncomm, name); 366 warned++; 367 } 368} 369 370#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 371 372static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 373{ 374 if (sk->sk_flags & flags) { 375 sk->sk_flags &= ~flags; 376 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 377 net_disable_timestamp(); 378 } 379} 380 381 382int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 383{ 384 int err; 385 int skb_len; 386 unsigned long flags; 387 struct sk_buff_head *list = &sk->sk_receive_queue; 388 389 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 390 atomic_inc(&sk->sk_drops); 391 trace_sock_rcvqueue_full(sk, skb); 392 return -ENOMEM; 393 } 394 395 err = sk_filter(sk, skb); 396 if (err) 397 return err; 398 399 if (!sk_rmem_schedule(sk, skb->truesize)) { 400 atomic_inc(&sk->sk_drops); 401 return -ENOBUFS; 402 } 403 404 skb->dev = NULL; 405 skb_set_owner_r(skb, sk); 406 407 /* Cache the SKB length before we tack it onto the receive 408 * queue. Once it is added it no longer belongs to us and 409 * may be freed by other threads of control pulling packets 410 * from the queue. 411 */ 412 skb_len = skb->len; 413 414 /* we escape from rcu protected region, make sure we dont leak 415 * a norefcounted dst 416 */ 417 skb_dst_force(skb); 418 419 spin_lock_irqsave(&list->lock, flags); 420 skb->dropcount = atomic_read(&sk->sk_drops); 421 __skb_queue_tail(list, skb); 422 spin_unlock_irqrestore(&list->lock, flags); 423 424 if (!sock_flag(sk, SOCK_DEAD)) 425 sk->sk_data_ready(sk, skb_len); 426 return 0; 427} 428EXPORT_SYMBOL(sock_queue_rcv_skb); 429 430int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 431{ 432 int rc = NET_RX_SUCCESS; 433 434 if (sk_filter(sk, skb)) 435 goto discard_and_relse; 436 437 skb->dev = NULL; 438 439 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { 440 atomic_inc(&sk->sk_drops); 441 goto discard_and_relse; 442 } 443 if (nested) 444 bh_lock_sock_nested(sk); 445 else 446 bh_lock_sock(sk); 447 if (!sock_owned_by_user(sk)) { 448 /* 449 * trylock + unlock semantics: 450 */ 451 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 452 453 rc = sk_backlog_rcv(sk, skb); 454 455 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 456 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 457 bh_unlock_sock(sk); 458 atomic_inc(&sk->sk_drops); 459 goto discard_and_relse; 460 } 461 462 bh_unlock_sock(sk); 463out: 464 sock_put(sk); 465 return rc; 466discard_and_relse: 467 kfree_skb(skb); 468 goto out; 469} 470EXPORT_SYMBOL(sk_receive_skb); 471 472void sk_reset_txq(struct sock *sk) 473{ 474 sk_tx_queue_clear(sk); 475} 476EXPORT_SYMBOL(sk_reset_txq); 477 478struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 479{ 480 struct dst_entry *dst = __sk_dst_get(sk); 481 482 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 483 sk_tx_queue_clear(sk); 484 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 485 dst_release(dst); 486 return NULL; 487 } 488 489 return dst; 490} 491EXPORT_SYMBOL(__sk_dst_check); 492 493struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 494{ 495 struct dst_entry *dst = sk_dst_get(sk); 496 497 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 498 sk_dst_reset(sk); 499 dst_release(dst); 500 return NULL; 501 } 502 503 return dst; 504} 505EXPORT_SYMBOL(sk_dst_check); 506 507static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 508{ 509 int ret = -ENOPROTOOPT; 510#ifdef CONFIG_NETDEVICES 511 struct net *net = sock_net(sk); 512 char devname[IFNAMSIZ]; 513 int index; 514 515 /* Sorry... */ 516 ret = -EPERM; 517 if (!capable(CAP_NET_RAW)) 518 goto out; 519 520 ret = -EINVAL; 521 if (optlen < 0) 522 goto out; 523 524 /* Bind this socket to a particular device like "eth0", 525 * as specified in the passed interface name. If the 526 * name is "" or the option length is zero the socket 527 * is not bound. 528 */ 529 if (optlen > IFNAMSIZ - 1) 530 optlen = IFNAMSIZ - 1; 531 memset(devname, 0, sizeof(devname)); 532 533 ret = -EFAULT; 534 if (copy_from_user(devname, optval, optlen)) 535 goto out; 536 537 index = 0; 538 if (devname[0] != '\0') { 539 struct net_device *dev; 540 541 rcu_read_lock(); 542 dev = dev_get_by_name_rcu(net, devname); 543 if (dev) 544 index = dev->ifindex; 545 rcu_read_unlock(); 546 ret = -ENODEV; 547 if (!dev) 548 goto out; 549 } 550 551 lock_sock(sk); 552 sk->sk_bound_dev_if = index; 553 sk_dst_reset(sk); 554 release_sock(sk); 555 556 ret = 0; 557 558out: 559#endif 560 561 return ret; 562} 563 564static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 565{ 566 if (valbool) 567 sock_set_flag(sk, bit); 568 else 569 sock_reset_flag(sk, bit); 570} 571 572/* 573 * This is meant for all protocols to use and covers goings on 574 * at the socket level. Everything here is generic. 575 */ 576 577int sock_setsockopt(struct socket *sock, int level, int optname, 578 char __user *optval, unsigned int optlen) 579{ 580 struct sock *sk = sock->sk; 581 int val; 582 int valbool; 583 struct linger ling; 584 int ret = 0; 585 586 /* 587 * Options without arguments 588 */ 589 590 if (optname == SO_BINDTODEVICE) 591 return sock_bindtodevice(sk, optval, optlen); 592 593 if (optlen < sizeof(int)) 594 return -EINVAL; 595 596 if (get_user(val, (int __user *)optval)) 597 return -EFAULT; 598 599 valbool = val ? 1 : 0; 600 601 lock_sock(sk); 602 603 switch (optname) { 604 case SO_DEBUG: 605 if (val && !capable(CAP_NET_ADMIN)) 606 ret = -EACCES; 607 else 608 sock_valbool_flag(sk, SOCK_DBG, valbool); 609 break; 610 case SO_REUSEADDR: 611 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 612 break; 613 case SO_TYPE: 614 case SO_PROTOCOL: 615 case SO_DOMAIN: 616 case SO_ERROR: 617 ret = -ENOPROTOOPT; 618 break; 619 case SO_DONTROUTE: 620 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 621 break; 622 case SO_BROADCAST: 623 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 624 break; 625 case SO_SNDBUF: 626 /* Don't error on this BSD doesn't and if you think 627 * about it this is right. Otherwise apps have to 628 * play 'guess the biggest size' games. RCVBUF/SNDBUF 629 * are treated in BSD as hints 630 */ 631 val = min_t(u32, val, sysctl_wmem_max); 632set_sndbuf: 633 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 634 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 635 /* Wake up sending tasks if we upped the value. */ 636 sk->sk_write_space(sk); 637 break; 638 639 case SO_SNDBUFFORCE: 640 if (!capable(CAP_NET_ADMIN)) { 641 ret = -EPERM; 642 break; 643 } 644 goto set_sndbuf; 645 646 case SO_RCVBUF: 647 /* Don't error on this BSD doesn't and if you think 648 * about it this is right. Otherwise apps have to 649 * play 'guess the biggest size' games. RCVBUF/SNDBUF 650 * are treated in BSD as hints 651 */ 652 val = min_t(u32, val, sysctl_rmem_max); 653set_rcvbuf: 654 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 655 /* 656 * We double it on the way in to account for 657 * "struct sk_buff" etc. overhead. Applications 658 * assume that the SO_RCVBUF setting they make will 659 * allow that much actual data to be received on that 660 * socket. 661 * 662 * Applications are unaware that "struct sk_buff" and 663 * other overheads allocate from the receive buffer 664 * during socket buffer allocation. 665 * 666 * And after considering the possible alternatives, 667 * returning the value we actually used in getsockopt 668 * is the most desirable behavior. 669 */ 670 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 671 break; 672 673 case SO_RCVBUFFORCE: 674 if (!capable(CAP_NET_ADMIN)) { 675 ret = -EPERM; 676 break; 677 } 678 goto set_rcvbuf; 679 680 case SO_KEEPALIVE: 681#ifdef CONFIG_INET 682 if (sk->sk_protocol == IPPROTO_TCP) 683 tcp_set_keepalive(sk, valbool); 684#endif 685 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 686 break; 687 688 case SO_OOBINLINE: 689 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 690 break; 691 692 case SO_NO_CHECK: 693 sk->sk_no_check = valbool; 694 break; 695 696 case SO_PRIORITY: 697 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 698 sk->sk_priority = val; 699 else 700 ret = -EPERM; 701 break; 702 703 case SO_LINGER: 704 if (optlen < sizeof(ling)) { 705 ret = -EINVAL; /* 1003.1g */ 706 break; 707 } 708 if (copy_from_user(&ling, optval, sizeof(ling))) { 709 ret = -EFAULT; 710 break; 711 } 712 if (!ling.l_onoff) 713 sock_reset_flag(sk, SOCK_LINGER); 714 else { 715#if (BITS_PER_LONG == 32) 716 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 717 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 718 else 719#endif 720 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 721 sock_set_flag(sk, SOCK_LINGER); 722 } 723 break; 724 725 case SO_BSDCOMPAT: 726 sock_warn_obsolete_bsdism("setsockopt"); 727 break; 728 729 case SO_PASSCRED: 730 if (valbool) 731 set_bit(SOCK_PASSCRED, &sock->flags); 732 else 733 clear_bit(SOCK_PASSCRED, &sock->flags); 734 break; 735 736 case SO_TIMESTAMP: 737 case SO_TIMESTAMPNS: 738 if (valbool) { 739 if (optname == SO_TIMESTAMP) 740 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 741 else 742 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 743 sock_set_flag(sk, SOCK_RCVTSTAMP); 744 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 745 } else { 746 sock_reset_flag(sk, SOCK_RCVTSTAMP); 747 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 748 } 749 break; 750 751 case SO_TIMESTAMPING: 752 if (val & ~SOF_TIMESTAMPING_MASK) { 753 ret = -EINVAL; 754 break; 755 } 756 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 757 val & SOF_TIMESTAMPING_TX_HARDWARE); 758 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 759 val & SOF_TIMESTAMPING_TX_SOFTWARE); 760 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 761 val & SOF_TIMESTAMPING_RX_HARDWARE); 762 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 763 sock_enable_timestamp(sk, 764 SOCK_TIMESTAMPING_RX_SOFTWARE); 765 else 766 sock_disable_timestamp(sk, 767 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 768 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 769 val & SOF_TIMESTAMPING_SOFTWARE); 770 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 771 val & SOF_TIMESTAMPING_SYS_HARDWARE); 772 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 773 val & SOF_TIMESTAMPING_RAW_HARDWARE); 774 break; 775 776 case SO_RCVLOWAT: 777 if (val < 0) 778 val = INT_MAX; 779 sk->sk_rcvlowat = val ? : 1; 780 break; 781 782 case SO_RCVTIMEO: 783 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 784 break; 785 786 case SO_SNDTIMEO: 787 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 788 break; 789 790 case SO_ATTACH_FILTER: 791 ret = -EINVAL; 792 if (optlen == sizeof(struct sock_fprog)) { 793 struct sock_fprog fprog; 794 795 ret = -EFAULT; 796 if (copy_from_user(&fprog, optval, sizeof(fprog))) 797 break; 798 799 ret = sk_attach_filter(&fprog, sk); 800 } 801 break; 802 803 case SO_DETACH_FILTER: 804 ret = sk_detach_filter(sk); 805 break; 806 807 case SO_PASSSEC: 808 if (valbool) 809 set_bit(SOCK_PASSSEC, &sock->flags); 810 else 811 clear_bit(SOCK_PASSSEC, &sock->flags); 812 break; 813 case SO_MARK: 814 if (!capable(CAP_NET_ADMIN)) 815 ret = -EPERM; 816 else 817 sk->sk_mark = val; 818 break; 819 820 /* We implement the SO_SNDLOWAT etc to 821 not be settable (1003.1g 5.3) */ 822 case SO_RXQ_OVFL: 823 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 824 break; 825 826 case SO_WIFI_STATUS: 827 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 828 break; 829 830 case SO_PEEK_OFF: 831 if (sock->ops->set_peek_off) 832 sock->ops->set_peek_off(sk, val); 833 else 834 ret = -EOPNOTSUPP; 835 break; 836 837 case SO_NOFCS: 838 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 839 break; 840 841 default: 842 ret = -ENOPROTOOPT; 843 break; 844 } 845 release_sock(sk); 846 return ret; 847} 848EXPORT_SYMBOL(sock_setsockopt); 849 850 851void cred_to_ucred(struct pid *pid, const struct cred *cred, 852 struct ucred *ucred) 853{ 854 ucred->pid = pid_vnr(pid); 855 ucred->uid = ucred->gid = -1; 856 if (cred) { 857 struct user_namespace *current_ns = current_user_ns(); 858 859 ucred->uid = from_kuid(current_ns, cred->euid); 860 ucred->gid = from_kgid(current_ns, cred->egid); 861 } 862} 863EXPORT_SYMBOL_GPL(cred_to_ucred); 864 865int sock_getsockopt(struct socket *sock, int level, int optname, 866 char __user *optval, int __user *optlen) 867{ 868 struct sock *sk = sock->sk; 869 870 union { 871 int val; 872 struct linger ling; 873 struct timeval tm; 874 } v; 875 876 int lv = sizeof(int); 877 int len; 878 879 if (get_user(len, optlen)) 880 return -EFAULT; 881 if (len < 0) 882 return -EINVAL; 883 884 memset(&v, 0, sizeof(v)); 885 886 switch (optname) { 887 case SO_DEBUG: 888 v.val = sock_flag(sk, SOCK_DBG); 889 break; 890 891 case SO_DONTROUTE: 892 v.val = sock_flag(sk, SOCK_LOCALROUTE); 893 break; 894 895 case SO_BROADCAST: 896 v.val = sock_flag(sk, SOCK_BROADCAST); 897 break; 898 899 case SO_SNDBUF: 900 v.val = sk->sk_sndbuf; 901 break; 902 903 case SO_RCVBUF: 904 v.val = sk->sk_rcvbuf; 905 break; 906 907 case SO_REUSEADDR: 908 v.val = sk->sk_reuse; 909 break; 910 911 case SO_KEEPALIVE: 912 v.val = sock_flag(sk, SOCK_KEEPOPEN); 913 break; 914 915 case SO_TYPE: 916 v.val = sk->sk_type; 917 break; 918 919 case SO_PROTOCOL: 920 v.val = sk->sk_protocol; 921 break; 922 923 case SO_DOMAIN: 924 v.val = sk->sk_family; 925 break; 926 927 case SO_ERROR: 928 v.val = -sock_error(sk); 929 if (v.val == 0) 930 v.val = xchg(&sk->sk_err_soft, 0); 931 break; 932 933 case SO_OOBINLINE: 934 v.val = sock_flag(sk, SOCK_URGINLINE); 935 break; 936 937 case SO_NO_CHECK: 938 v.val = sk->sk_no_check; 939 break; 940 941 case SO_PRIORITY: 942 v.val = sk->sk_priority; 943 break; 944 945 case SO_LINGER: 946 lv = sizeof(v.ling); 947 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 948 v.ling.l_linger = sk->sk_lingertime / HZ; 949 break; 950 951 case SO_BSDCOMPAT: 952 sock_warn_obsolete_bsdism("getsockopt"); 953 break; 954 955 case SO_TIMESTAMP: 956 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 957 !sock_flag(sk, SOCK_RCVTSTAMPNS); 958 break; 959 960 case SO_TIMESTAMPNS: 961 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 962 break; 963 964 case SO_TIMESTAMPING: 965 v.val = 0; 966 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 967 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 968 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 969 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 970 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 971 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 972 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 973 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 974 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 975 v.val |= SOF_TIMESTAMPING_SOFTWARE; 976 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 977 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 978 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 979 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 980 break; 981 982 case SO_RCVTIMEO: 983 lv = sizeof(struct timeval); 984 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 985 v.tm.tv_sec = 0; 986 v.tm.tv_usec = 0; 987 } else { 988 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 989 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 990 } 991 break; 992 993 case SO_SNDTIMEO: 994 lv = sizeof(struct timeval); 995 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 996 v.tm.tv_sec = 0; 997 v.tm.tv_usec = 0; 998 } else { 999 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 1000 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 1001 } 1002 break; 1003 1004 case SO_RCVLOWAT: 1005 v.val = sk->sk_rcvlowat; 1006 break; 1007 1008 case SO_SNDLOWAT: 1009 v.val = 1; 1010 break; 1011 1012 case SO_PASSCRED: 1013 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1014 break; 1015 1016 case SO_PEERCRED: 1017 { 1018 struct ucred peercred; 1019 if (len > sizeof(peercred)) 1020 len = sizeof(peercred); 1021 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1022 if (copy_to_user(optval, &peercred, len)) 1023 return -EFAULT; 1024 goto lenout; 1025 } 1026 1027 case SO_PEERNAME: 1028 { 1029 char address[128]; 1030 1031 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 1032 return -ENOTCONN; 1033 if (lv < len) 1034 return -EINVAL; 1035 if (copy_to_user(optval, address, len)) 1036 return -EFAULT; 1037 goto lenout; 1038 } 1039 1040 /* Dubious BSD thing... Probably nobody even uses it, but 1041 * the UNIX standard wants it for whatever reason... -DaveM 1042 */ 1043 case SO_ACCEPTCONN: 1044 v.val = sk->sk_state == TCP_LISTEN; 1045 break; 1046 1047 case SO_PASSSEC: 1048 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1049 break; 1050 1051 case SO_PEERSEC: 1052 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1053 1054 case SO_MARK: 1055 v.val = sk->sk_mark; 1056 break; 1057 1058 case SO_RXQ_OVFL: 1059 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1060 break; 1061 1062 case SO_WIFI_STATUS: 1063 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1064 break; 1065 1066 case SO_PEEK_OFF: 1067 if (!sock->ops->set_peek_off) 1068 return -EOPNOTSUPP; 1069 1070 v.val = sk->sk_peek_off; 1071 break; 1072 case SO_NOFCS: 1073 v.val = sock_flag(sk, SOCK_NOFCS); 1074 break; 1075 default: 1076 return -ENOPROTOOPT; 1077 } 1078 1079 if (len > lv) 1080 len = lv; 1081 if (copy_to_user(optval, &v, len)) 1082 return -EFAULT; 1083lenout: 1084 if (put_user(len, optlen)) 1085 return -EFAULT; 1086 return 0; 1087} 1088 1089/* 1090 * Initialize an sk_lock. 1091 * 1092 * (We also register the sk_lock with the lock validator.) 1093 */ 1094static inline void sock_lock_init(struct sock *sk) 1095{ 1096 sock_lock_init_class_and_name(sk, 1097 af_family_slock_key_strings[sk->sk_family], 1098 af_family_slock_keys + sk->sk_family, 1099 af_family_key_strings[sk->sk_family], 1100 af_family_keys + sk->sk_family); 1101} 1102 1103/* 1104 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1105 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1106 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1107 */ 1108static void sock_copy(struct sock *nsk, const struct sock *osk) 1109{ 1110#ifdef CONFIG_SECURITY_NETWORK 1111 void *sptr = nsk->sk_security; 1112#endif 1113 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1114 1115 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1116 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1117 1118#ifdef CONFIG_SECURITY_NETWORK 1119 nsk->sk_security = sptr; 1120 security_sk_clone(osk, nsk); 1121#endif 1122} 1123 1124/* 1125 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1126 * un-modified. Special care is taken when initializing object to zero. 1127 */ 1128static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1129{ 1130 if (offsetof(struct sock, sk_node.next) != 0) 1131 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1132 memset(&sk->sk_node.pprev, 0, 1133 size - offsetof(struct sock, sk_node.pprev)); 1134} 1135 1136void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1137{ 1138 unsigned long nulls1, nulls2; 1139 1140 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1141 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1142 if (nulls1 > nulls2) 1143 swap(nulls1, nulls2); 1144 1145 if (nulls1 != 0) 1146 memset((char *)sk, 0, nulls1); 1147 memset((char *)sk + nulls1 + sizeof(void *), 0, 1148 nulls2 - nulls1 - sizeof(void *)); 1149 memset((char *)sk + nulls2 + sizeof(void *), 0, 1150 size - nulls2 - sizeof(void *)); 1151} 1152EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1153 1154static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1155 int family) 1156{ 1157 struct sock *sk; 1158 struct kmem_cache *slab; 1159 1160 slab = prot->slab; 1161 if (slab != NULL) { 1162 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1163 if (!sk) 1164 return sk; 1165 if (priority & __GFP_ZERO) { 1166 if (prot->clear_sk) 1167 prot->clear_sk(sk, prot->obj_size); 1168 else 1169 sk_prot_clear_nulls(sk, prot->obj_size); 1170 } 1171 } else 1172 sk = kmalloc(prot->obj_size, priority); 1173 1174 if (sk != NULL) { 1175 kmemcheck_annotate_bitfield(sk, flags); 1176 1177 if (security_sk_alloc(sk, family, priority)) 1178 goto out_free; 1179 1180 if (!try_module_get(prot->owner)) 1181 goto out_free_sec; 1182 sk_tx_queue_clear(sk); 1183 } 1184 1185 return sk; 1186 1187out_free_sec: 1188 security_sk_free(sk); 1189out_free: 1190 if (slab != NULL) 1191 kmem_cache_free(slab, sk); 1192 else 1193 kfree(sk); 1194 return NULL; 1195} 1196 1197static void sk_prot_free(struct proto *prot, struct sock *sk) 1198{ 1199 struct kmem_cache *slab; 1200 struct module *owner; 1201 1202 owner = prot->owner; 1203 slab = prot->slab; 1204 1205 security_sk_free(sk); 1206 if (slab != NULL) 1207 kmem_cache_free(slab, sk); 1208 else 1209 kfree(sk); 1210 module_put(owner); 1211} 1212 1213#ifdef CONFIG_CGROUPS 1214void sock_update_classid(struct sock *sk) 1215{ 1216 u32 classid; 1217 1218 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1219 classid = task_cls_classid(current); 1220 rcu_read_unlock(); 1221 if (classid && classid != sk->sk_classid) 1222 sk->sk_classid = classid; 1223} 1224EXPORT_SYMBOL(sock_update_classid); 1225 1226void sock_update_netprioidx(struct sock *sk, struct task_struct *task) 1227{ 1228 if (in_interrupt()) 1229 return; 1230 1231 sk->sk_cgrp_prioidx = task_netprioidx(task); 1232} 1233EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1234#endif 1235 1236/** 1237 * sk_alloc - All socket objects are allocated here 1238 * @net: the applicable net namespace 1239 * @family: protocol family 1240 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1241 * @prot: struct proto associated with this new sock instance 1242 */ 1243struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1244 struct proto *prot) 1245{ 1246 struct sock *sk; 1247 1248 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1249 if (sk) { 1250 sk->sk_family = family; 1251 /* 1252 * See comment in struct sock definition to understand 1253 * why we need sk_prot_creator -acme 1254 */ 1255 sk->sk_prot = sk->sk_prot_creator = prot; 1256 sock_lock_init(sk); 1257 sock_net_set(sk, get_net(net)); 1258 atomic_set(&sk->sk_wmem_alloc, 1); 1259 1260 sock_update_classid(sk); 1261 sock_update_netprioidx(sk, current); 1262 } 1263 1264 return sk; 1265} 1266EXPORT_SYMBOL(sk_alloc); 1267 1268static void __sk_free(struct sock *sk) 1269{ 1270 struct sk_filter *filter; 1271 1272 if (sk->sk_destruct) 1273 sk->sk_destruct(sk); 1274 1275 filter = rcu_dereference_check(sk->sk_filter, 1276 atomic_read(&sk->sk_wmem_alloc) == 0); 1277 if (filter) { 1278 sk_filter_uncharge(sk, filter); 1279 RCU_INIT_POINTER(sk->sk_filter, NULL); 1280 } 1281 1282 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1283 1284 if (atomic_read(&sk->sk_omem_alloc)) 1285 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1286 __func__, atomic_read(&sk->sk_omem_alloc)); 1287 1288 if (sk->sk_peer_cred) 1289 put_cred(sk->sk_peer_cred); 1290 put_pid(sk->sk_peer_pid); 1291 put_net(sock_net(sk)); 1292 sk_prot_free(sk->sk_prot_creator, sk); 1293} 1294 1295void sk_free(struct sock *sk) 1296{ 1297 /* 1298 * We subtract one from sk_wmem_alloc and can know if 1299 * some packets are still in some tx queue. 1300 * If not null, sock_wfree() will call __sk_free(sk) later 1301 */ 1302 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1303 __sk_free(sk); 1304} 1305EXPORT_SYMBOL(sk_free); 1306 1307/* 1308 * Last sock_put should drop reference to sk->sk_net. It has already 1309 * been dropped in sk_change_net. Taking reference to stopping namespace 1310 * is not an option. 1311 * Take reference to a socket to remove it from hash _alive_ and after that 1312 * destroy it in the context of init_net. 1313 */ 1314void sk_release_kernel(struct sock *sk) 1315{ 1316 if (sk == NULL || sk->sk_socket == NULL) 1317 return; 1318 1319 sock_hold(sk); 1320 sock_release(sk->sk_socket); 1321 release_net(sock_net(sk)); 1322 sock_net_set(sk, get_net(&init_net)); 1323 sock_put(sk); 1324} 1325EXPORT_SYMBOL(sk_release_kernel); 1326 1327static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1328{ 1329 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1330 sock_update_memcg(newsk); 1331} 1332 1333/** 1334 * sk_clone_lock - clone a socket, and lock its clone 1335 * @sk: the socket to clone 1336 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1337 * 1338 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1339 */ 1340struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1341{ 1342 struct sock *newsk; 1343 1344 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1345 if (newsk != NULL) { 1346 struct sk_filter *filter; 1347 1348 sock_copy(newsk, sk); 1349 1350 /* SANITY */ 1351 get_net(sock_net(newsk)); 1352 sk_node_init(&newsk->sk_node); 1353 sock_lock_init(newsk); 1354 bh_lock_sock(newsk); 1355 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1356 newsk->sk_backlog.len = 0; 1357 1358 atomic_set(&newsk->sk_rmem_alloc, 0); 1359 /* 1360 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1361 */ 1362 atomic_set(&newsk->sk_wmem_alloc, 1); 1363 atomic_set(&newsk->sk_omem_alloc, 0); 1364 skb_queue_head_init(&newsk->sk_receive_queue); 1365 skb_queue_head_init(&newsk->sk_write_queue); 1366#ifdef CONFIG_NET_DMA 1367 skb_queue_head_init(&newsk->sk_async_wait_queue); 1368#endif 1369 1370 spin_lock_init(&newsk->sk_dst_lock); 1371 rwlock_init(&newsk->sk_callback_lock); 1372 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1373 af_callback_keys + newsk->sk_family, 1374 af_family_clock_key_strings[newsk->sk_family]); 1375 1376 newsk->sk_dst_cache = NULL; 1377 newsk->sk_wmem_queued = 0; 1378 newsk->sk_forward_alloc = 0; 1379 newsk->sk_send_head = NULL; 1380 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1381 1382 sock_reset_flag(newsk, SOCK_DONE); 1383 skb_queue_head_init(&newsk->sk_error_queue); 1384 1385 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1386 if (filter != NULL) 1387 sk_filter_charge(newsk, filter); 1388 1389 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1390 /* It is still raw copy of parent, so invalidate 1391 * destructor and make plain sk_free() */ 1392 newsk->sk_destruct = NULL; 1393 bh_unlock_sock(newsk); 1394 sk_free(newsk); 1395 newsk = NULL; 1396 goto out; 1397 } 1398 1399 newsk->sk_err = 0; 1400 newsk->sk_priority = 0; 1401 /* 1402 * Before updating sk_refcnt, we must commit prior changes to memory 1403 * (Documentation/RCU/rculist_nulls.txt for details) 1404 */ 1405 smp_wmb(); 1406 atomic_set(&newsk->sk_refcnt, 2); 1407 1408 /* 1409 * Increment the counter in the same struct proto as the master 1410 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1411 * is the same as sk->sk_prot->socks, as this field was copied 1412 * with memcpy). 1413 * 1414 * This _changes_ the previous behaviour, where 1415 * tcp_create_openreq_child always was incrementing the 1416 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1417 * to be taken into account in all callers. -acme 1418 */ 1419 sk_refcnt_debug_inc(newsk); 1420 sk_set_socket(newsk, NULL); 1421 newsk->sk_wq = NULL; 1422 1423 sk_update_clone(sk, newsk); 1424 1425 if (newsk->sk_prot->sockets_allocated) 1426 sk_sockets_allocated_inc(newsk); 1427 1428 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1429 net_enable_timestamp(); 1430 } 1431out: 1432 return newsk; 1433} 1434EXPORT_SYMBOL_GPL(sk_clone_lock); 1435 1436void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1437{ 1438 __sk_dst_set(sk, dst); 1439 sk->sk_route_caps = dst->dev->features; 1440 if (sk->sk_route_caps & NETIF_F_GSO) 1441 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1442 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1443 if (sk_can_gso(sk)) { 1444 if (dst->header_len) { 1445 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1446 } else { 1447 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1448 sk->sk_gso_max_size = dst->dev->gso_max_size; 1449 } 1450 } 1451} 1452EXPORT_SYMBOL_GPL(sk_setup_caps); 1453 1454void __init sk_init(void) 1455{ 1456 if (totalram_pages <= 4096) { 1457 sysctl_wmem_max = 32767; 1458 sysctl_rmem_max = 32767; 1459 sysctl_wmem_default = 32767; 1460 sysctl_rmem_default = 32767; 1461 } else if (totalram_pages >= 131072) { 1462 sysctl_wmem_max = 131071; 1463 sysctl_rmem_max = 131071; 1464 } 1465} 1466 1467/* 1468 * Simple resource managers for sockets. 1469 */ 1470 1471 1472/* 1473 * Write buffer destructor automatically called from kfree_skb. 1474 */ 1475void sock_wfree(struct sk_buff *skb) 1476{ 1477 struct sock *sk = skb->sk; 1478 unsigned int len = skb->truesize; 1479 1480 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1481 /* 1482 * Keep a reference on sk_wmem_alloc, this will be released 1483 * after sk_write_space() call 1484 */ 1485 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1486 sk->sk_write_space(sk); 1487 len = 1; 1488 } 1489 /* 1490 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1491 * could not do because of in-flight packets 1492 */ 1493 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1494 __sk_free(sk); 1495} 1496EXPORT_SYMBOL(sock_wfree); 1497 1498/* 1499 * Read buffer destructor automatically called from kfree_skb. 1500 */ 1501void sock_rfree(struct sk_buff *skb) 1502{ 1503 struct sock *sk = skb->sk; 1504 unsigned int len = skb->truesize; 1505 1506 atomic_sub(len, &sk->sk_rmem_alloc); 1507 sk_mem_uncharge(sk, len); 1508} 1509EXPORT_SYMBOL(sock_rfree); 1510 1511void sock_edemux(struct sk_buff *skb) 1512{ 1513 sock_put(skb->sk); 1514} 1515EXPORT_SYMBOL(sock_edemux); 1516 1517int sock_i_uid(struct sock *sk) 1518{ 1519 int uid; 1520 1521 read_lock_bh(&sk->sk_callback_lock); 1522 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1523 read_unlock_bh(&sk->sk_callback_lock); 1524 return uid; 1525} 1526EXPORT_SYMBOL(sock_i_uid); 1527 1528unsigned long sock_i_ino(struct sock *sk) 1529{ 1530 unsigned long ino; 1531 1532 read_lock_bh(&sk->sk_callback_lock); 1533 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1534 read_unlock_bh(&sk->sk_callback_lock); 1535 return ino; 1536} 1537EXPORT_SYMBOL(sock_i_ino); 1538 1539/* 1540 * Allocate a skb from the socket's send buffer. 1541 */ 1542struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1543 gfp_t priority) 1544{ 1545 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1546 struct sk_buff *skb = alloc_skb(size, priority); 1547 if (skb) { 1548 skb_set_owner_w(skb, sk); 1549 return skb; 1550 } 1551 } 1552 return NULL; 1553} 1554EXPORT_SYMBOL(sock_wmalloc); 1555 1556/* 1557 * Allocate a skb from the socket's receive buffer. 1558 */ 1559struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1560 gfp_t priority) 1561{ 1562 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1563 struct sk_buff *skb = alloc_skb(size, priority); 1564 if (skb) { 1565 skb_set_owner_r(skb, sk); 1566 return skb; 1567 } 1568 } 1569 return NULL; 1570} 1571 1572/* 1573 * Allocate a memory block from the socket's option memory buffer. 1574 */ 1575void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1576{ 1577 if ((unsigned int)size <= sysctl_optmem_max && 1578 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1579 void *mem; 1580 /* First do the add, to avoid the race if kmalloc 1581 * might sleep. 1582 */ 1583 atomic_add(size, &sk->sk_omem_alloc); 1584 mem = kmalloc(size, priority); 1585 if (mem) 1586 return mem; 1587 atomic_sub(size, &sk->sk_omem_alloc); 1588 } 1589 return NULL; 1590} 1591EXPORT_SYMBOL(sock_kmalloc); 1592 1593/* 1594 * Free an option memory block. 1595 */ 1596void sock_kfree_s(struct sock *sk, void *mem, int size) 1597{ 1598 kfree(mem); 1599 atomic_sub(size, &sk->sk_omem_alloc); 1600} 1601EXPORT_SYMBOL(sock_kfree_s); 1602 1603/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1604 I think, these locks should be removed for datagram sockets. 1605 */ 1606static long sock_wait_for_wmem(struct sock *sk, long timeo) 1607{ 1608 DEFINE_WAIT(wait); 1609 1610 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1611 for (;;) { 1612 if (!timeo) 1613 break; 1614 if (signal_pending(current)) 1615 break; 1616 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1617 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1618 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1619 break; 1620 if (sk->sk_shutdown & SEND_SHUTDOWN) 1621 break; 1622 if (sk->sk_err) 1623 break; 1624 timeo = schedule_timeout(timeo); 1625 } 1626 finish_wait(sk_sleep(sk), &wait); 1627 return timeo; 1628} 1629 1630 1631/* 1632 * Generic send/receive buffer handlers 1633 */ 1634 1635struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1636 unsigned long data_len, int noblock, 1637 int *errcode) 1638{ 1639 struct sk_buff *skb; 1640 gfp_t gfp_mask; 1641 long timeo; 1642 int err; 1643 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1644 1645 err = -EMSGSIZE; 1646 if (npages > MAX_SKB_FRAGS) 1647 goto failure; 1648 1649 gfp_mask = sk->sk_allocation; 1650 if (gfp_mask & __GFP_WAIT) 1651 gfp_mask |= __GFP_REPEAT; 1652 1653 timeo = sock_sndtimeo(sk, noblock); 1654 while (1) { 1655 err = sock_error(sk); 1656 if (err != 0) 1657 goto failure; 1658 1659 err = -EPIPE; 1660 if (sk->sk_shutdown & SEND_SHUTDOWN) 1661 goto failure; 1662 1663 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1664 skb = alloc_skb(header_len, gfp_mask); 1665 if (skb) { 1666 int i; 1667 1668 /* No pages, we're done... */ 1669 if (!data_len) 1670 break; 1671 1672 skb->truesize += data_len; 1673 skb_shinfo(skb)->nr_frags = npages; 1674 for (i = 0; i < npages; i++) { 1675 struct page *page; 1676 1677 page = alloc_pages(sk->sk_allocation, 0); 1678 if (!page) { 1679 err = -ENOBUFS; 1680 skb_shinfo(skb)->nr_frags = i; 1681 kfree_skb(skb); 1682 goto failure; 1683 } 1684 1685 __skb_fill_page_desc(skb, i, 1686 page, 0, 1687 (data_len >= PAGE_SIZE ? 1688 PAGE_SIZE : 1689 data_len)); 1690 data_len -= PAGE_SIZE; 1691 } 1692 1693 /* Full success... */ 1694 break; 1695 } 1696 err = -ENOBUFS; 1697 goto failure; 1698 } 1699 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1700 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1701 err = -EAGAIN; 1702 if (!timeo) 1703 goto failure; 1704 if (signal_pending(current)) 1705 goto interrupted; 1706 timeo = sock_wait_for_wmem(sk, timeo); 1707 } 1708 1709 skb_set_owner_w(skb, sk); 1710 return skb; 1711 1712interrupted: 1713 err = sock_intr_errno(timeo); 1714failure: 1715 *errcode = err; 1716 return NULL; 1717} 1718EXPORT_SYMBOL(sock_alloc_send_pskb); 1719 1720struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1721 int noblock, int *errcode) 1722{ 1723 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1724} 1725EXPORT_SYMBOL(sock_alloc_send_skb); 1726 1727static void __lock_sock(struct sock *sk) 1728 __releases(&sk->sk_lock.slock) 1729 __acquires(&sk->sk_lock.slock) 1730{ 1731 DEFINE_WAIT(wait); 1732 1733 for (;;) { 1734 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1735 TASK_UNINTERRUPTIBLE); 1736 spin_unlock_bh(&sk->sk_lock.slock); 1737 schedule(); 1738 spin_lock_bh(&sk->sk_lock.slock); 1739 if (!sock_owned_by_user(sk)) 1740 break; 1741 } 1742 finish_wait(&sk->sk_lock.wq, &wait); 1743} 1744 1745static void __release_sock(struct sock *sk) 1746 __releases(&sk->sk_lock.slock) 1747 __acquires(&sk->sk_lock.slock) 1748{ 1749 struct sk_buff *skb = sk->sk_backlog.head; 1750 1751 do { 1752 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1753 bh_unlock_sock(sk); 1754 1755 do { 1756 struct sk_buff *next = skb->next; 1757 1758 prefetch(next); 1759 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1760 skb->next = NULL; 1761 sk_backlog_rcv(sk, skb); 1762 1763 /* 1764 * We are in process context here with softirqs 1765 * disabled, use cond_resched_softirq() to preempt. 1766 * This is safe to do because we've taken the backlog 1767 * queue private: 1768 */ 1769 cond_resched_softirq(); 1770 1771 skb = next; 1772 } while (skb != NULL); 1773 1774 bh_lock_sock(sk); 1775 } while ((skb = sk->sk_backlog.head) != NULL); 1776 1777 /* 1778 * Doing the zeroing here guarantee we can not loop forever 1779 * while a wild producer attempts to flood us. 1780 */ 1781 sk->sk_backlog.len = 0; 1782} 1783 1784/** 1785 * sk_wait_data - wait for data to arrive at sk_receive_queue 1786 * @sk: sock to wait on 1787 * @timeo: for how long 1788 * 1789 * Now socket state including sk->sk_err is changed only under lock, 1790 * hence we may omit checks after joining wait queue. 1791 * We check receive queue before schedule() only as optimization; 1792 * it is very likely that release_sock() added new data. 1793 */ 1794int sk_wait_data(struct sock *sk, long *timeo) 1795{ 1796 int rc; 1797 DEFINE_WAIT(wait); 1798 1799 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1800 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1801 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1802 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1803 finish_wait(sk_sleep(sk), &wait); 1804 return rc; 1805} 1806EXPORT_SYMBOL(sk_wait_data); 1807 1808/** 1809 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1810 * @sk: socket 1811 * @size: memory size to allocate 1812 * @kind: allocation type 1813 * 1814 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1815 * rmem allocation. This function assumes that protocols which have 1816 * memory_pressure use sk_wmem_queued as write buffer accounting. 1817 */ 1818int __sk_mem_schedule(struct sock *sk, int size, int kind) 1819{ 1820 struct proto *prot = sk->sk_prot; 1821 int amt = sk_mem_pages(size); 1822 long allocated; 1823 int parent_status = UNDER_LIMIT; 1824 1825 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1826 1827 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1828 1829 /* Under limit. */ 1830 if (parent_status == UNDER_LIMIT && 1831 allocated <= sk_prot_mem_limits(sk, 0)) { 1832 sk_leave_memory_pressure(sk); 1833 return 1; 1834 } 1835 1836 /* Under pressure. (we or our parents) */ 1837 if ((parent_status > SOFT_LIMIT) || 1838 allocated > sk_prot_mem_limits(sk, 1)) 1839 sk_enter_memory_pressure(sk); 1840 1841 /* Over hard limit (we or our parents) */ 1842 if ((parent_status == OVER_LIMIT) || 1843 (allocated > sk_prot_mem_limits(sk, 2))) 1844 goto suppress_allocation; 1845 1846 /* guarantee minimum buffer size under pressure */ 1847 if (kind == SK_MEM_RECV) { 1848 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1849 return 1; 1850 1851 } else { /* SK_MEM_SEND */ 1852 if (sk->sk_type == SOCK_STREAM) { 1853 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1854 return 1; 1855 } else if (atomic_read(&sk->sk_wmem_alloc) < 1856 prot->sysctl_wmem[0]) 1857 return 1; 1858 } 1859 1860 if (sk_has_memory_pressure(sk)) { 1861 int alloc; 1862 1863 if (!sk_under_memory_pressure(sk)) 1864 return 1; 1865 alloc = sk_sockets_allocated_read_positive(sk); 1866 if (sk_prot_mem_limits(sk, 2) > alloc * 1867 sk_mem_pages(sk->sk_wmem_queued + 1868 atomic_read(&sk->sk_rmem_alloc) + 1869 sk->sk_forward_alloc)) 1870 return 1; 1871 } 1872 1873suppress_allocation: 1874 1875 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1876 sk_stream_moderate_sndbuf(sk); 1877 1878 /* Fail only if socket is _under_ its sndbuf. 1879 * In this case we cannot block, so that we have to fail. 1880 */ 1881 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1882 return 1; 1883 } 1884 1885 trace_sock_exceed_buf_limit(sk, prot, allocated); 1886 1887 /* Alas. Undo changes. */ 1888 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1889 1890 sk_memory_allocated_sub(sk, amt); 1891 1892 return 0; 1893} 1894EXPORT_SYMBOL(__sk_mem_schedule); 1895 1896/** 1897 * __sk_reclaim - reclaim memory_allocated 1898 * @sk: socket 1899 */ 1900void __sk_mem_reclaim(struct sock *sk) 1901{ 1902 sk_memory_allocated_sub(sk, 1903 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 1904 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1905 1906 if (sk_under_memory_pressure(sk) && 1907 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 1908 sk_leave_memory_pressure(sk); 1909} 1910EXPORT_SYMBOL(__sk_mem_reclaim); 1911 1912 1913/* 1914 * Set of default routines for initialising struct proto_ops when 1915 * the protocol does not support a particular function. In certain 1916 * cases where it makes no sense for a protocol to have a "do nothing" 1917 * function, some default processing is provided. 1918 */ 1919 1920int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1921{ 1922 return -EOPNOTSUPP; 1923} 1924EXPORT_SYMBOL(sock_no_bind); 1925 1926int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1927 int len, int flags) 1928{ 1929 return -EOPNOTSUPP; 1930} 1931EXPORT_SYMBOL(sock_no_connect); 1932 1933int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1934{ 1935 return -EOPNOTSUPP; 1936} 1937EXPORT_SYMBOL(sock_no_socketpair); 1938 1939int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1940{ 1941 return -EOPNOTSUPP; 1942} 1943EXPORT_SYMBOL(sock_no_accept); 1944 1945int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1946 int *len, int peer) 1947{ 1948 return -EOPNOTSUPP; 1949} 1950EXPORT_SYMBOL(sock_no_getname); 1951 1952unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1953{ 1954 return 0; 1955} 1956EXPORT_SYMBOL(sock_no_poll); 1957 1958int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1959{ 1960 return -EOPNOTSUPP; 1961} 1962EXPORT_SYMBOL(sock_no_ioctl); 1963 1964int sock_no_listen(struct socket *sock, int backlog) 1965{ 1966 return -EOPNOTSUPP; 1967} 1968EXPORT_SYMBOL(sock_no_listen); 1969 1970int sock_no_shutdown(struct socket *sock, int how) 1971{ 1972 return -EOPNOTSUPP; 1973} 1974EXPORT_SYMBOL(sock_no_shutdown); 1975 1976int sock_no_setsockopt(struct socket *sock, int level, int optname, 1977 char __user *optval, unsigned int optlen) 1978{ 1979 return -EOPNOTSUPP; 1980} 1981EXPORT_SYMBOL(sock_no_setsockopt); 1982 1983int sock_no_getsockopt(struct socket *sock, int level, int optname, 1984 char __user *optval, int __user *optlen) 1985{ 1986 return -EOPNOTSUPP; 1987} 1988EXPORT_SYMBOL(sock_no_getsockopt); 1989 1990int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1991 size_t len) 1992{ 1993 return -EOPNOTSUPP; 1994} 1995EXPORT_SYMBOL(sock_no_sendmsg); 1996 1997int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1998 size_t len, int flags) 1999{ 2000 return -EOPNOTSUPP; 2001} 2002EXPORT_SYMBOL(sock_no_recvmsg); 2003 2004int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2005{ 2006 /* Mirror missing mmap method error code */ 2007 return -ENODEV; 2008} 2009EXPORT_SYMBOL(sock_no_mmap); 2010 2011ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2012{ 2013 ssize_t res; 2014 struct msghdr msg = {.msg_flags = flags}; 2015 struct kvec iov; 2016 char *kaddr = kmap(page); 2017 iov.iov_base = kaddr + offset; 2018 iov.iov_len = size; 2019 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2020 kunmap(page); 2021 return res; 2022} 2023EXPORT_SYMBOL(sock_no_sendpage); 2024 2025/* 2026 * Default Socket Callbacks 2027 */ 2028 2029static void sock_def_wakeup(struct sock *sk) 2030{ 2031 struct socket_wq *wq; 2032 2033 rcu_read_lock(); 2034 wq = rcu_dereference(sk->sk_wq); 2035 if (wq_has_sleeper(wq)) 2036 wake_up_interruptible_all(&wq->wait); 2037 rcu_read_unlock(); 2038} 2039 2040static void sock_def_error_report(struct sock *sk) 2041{ 2042 struct socket_wq *wq; 2043 2044 rcu_read_lock(); 2045 wq = rcu_dereference(sk->sk_wq); 2046 if (wq_has_sleeper(wq)) 2047 wake_up_interruptible_poll(&wq->wait, POLLERR); 2048 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2049 rcu_read_unlock(); 2050} 2051 2052static void sock_def_readable(struct sock *sk, int len) 2053{ 2054 struct socket_wq *wq; 2055 2056 rcu_read_lock(); 2057 wq = rcu_dereference(sk->sk_wq); 2058 if (wq_has_sleeper(wq)) 2059 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2060 POLLRDNORM | POLLRDBAND); 2061 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2062 rcu_read_unlock(); 2063} 2064 2065static void sock_def_write_space(struct sock *sk) 2066{ 2067 struct socket_wq *wq; 2068 2069 rcu_read_lock(); 2070 2071 /* Do not wake up a writer until he can make "significant" 2072 * progress. --DaveM 2073 */ 2074 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2075 wq = rcu_dereference(sk->sk_wq); 2076 if (wq_has_sleeper(wq)) 2077 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2078 POLLWRNORM | POLLWRBAND); 2079 2080 /* Should agree with poll, otherwise some programs break */ 2081 if (sock_writeable(sk)) 2082 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2083 } 2084 2085 rcu_read_unlock(); 2086} 2087 2088static void sock_def_destruct(struct sock *sk) 2089{ 2090 kfree(sk->sk_protinfo); 2091} 2092 2093void sk_send_sigurg(struct sock *sk) 2094{ 2095 if (sk->sk_socket && sk->sk_socket->file) 2096 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2097 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2098} 2099EXPORT_SYMBOL(sk_send_sigurg); 2100 2101void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2102 unsigned long expires) 2103{ 2104 if (!mod_timer(timer, expires)) 2105 sock_hold(sk); 2106} 2107EXPORT_SYMBOL(sk_reset_timer); 2108 2109void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2110{ 2111 if (timer_pending(timer) && del_timer(timer)) 2112 __sock_put(sk); 2113} 2114EXPORT_SYMBOL(sk_stop_timer); 2115 2116void sock_init_data(struct socket *sock, struct sock *sk) 2117{ 2118 skb_queue_head_init(&sk->sk_receive_queue); 2119 skb_queue_head_init(&sk->sk_write_queue); 2120 skb_queue_head_init(&sk->sk_error_queue); 2121#ifdef CONFIG_NET_DMA 2122 skb_queue_head_init(&sk->sk_async_wait_queue); 2123#endif 2124 2125 sk->sk_send_head = NULL; 2126 2127 init_timer(&sk->sk_timer); 2128 2129 sk->sk_allocation = GFP_KERNEL; 2130 sk->sk_rcvbuf = sysctl_rmem_default; 2131 sk->sk_sndbuf = sysctl_wmem_default; 2132 sk->sk_state = TCP_CLOSE; 2133 sk_set_socket(sk, sock); 2134 2135 sock_set_flag(sk, SOCK_ZAPPED); 2136 2137 if (sock) { 2138 sk->sk_type = sock->type; 2139 sk->sk_wq = sock->wq; 2140 sock->sk = sk; 2141 } else 2142 sk->sk_wq = NULL; 2143 2144 spin_lock_init(&sk->sk_dst_lock); 2145 rwlock_init(&sk->sk_callback_lock); 2146 lockdep_set_class_and_name(&sk->sk_callback_lock, 2147 af_callback_keys + sk->sk_family, 2148 af_family_clock_key_strings[sk->sk_family]); 2149 2150 sk->sk_state_change = sock_def_wakeup; 2151 sk->sk_data_ready = sock_def_readable; 2152 sk->sk_write_space = sock_def_write_space; 2153 sk->sk_error_report = sock_def_error_report; 2154 sk->sk_destruct = sock_def_destruct; 2155 2156 sk->sk_sndmsg_page = NULL; 2157 sk->sk_sndmsg_off = 0; 2158 sk->sk_peek_off = -1; 2159 2160 sk->sk_peer_pid = NULL; 2161 sk->sk_peer_cred = NULL; 2162 sk->sk_write_pending = 0; 2163 sk->sk_rcvlowat = 1; 2164 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2165 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2166 2167 sk->sk_stamp = ktime_set(-1L, 0); 2168 2169 /* 2170 * Before updating sk_refcnt, we must commit prior changes to memory 2171 * (Documentation/RCU/rculist_nulls.txt for details) 2172 */ 2173 smp_wmb(); 2174 atomic_set(&sk->sk_refcnt, 1); 2175 atomic_set(&sk->sk_drops, 0); 2176} 2177EXPORT_SYMBOL(sock_init_data); 2178 2179void lock_sock_nested(struct sock *sk, int subclass) 2180{ 2181 might_sleep(); 2182 spin_lock_bh(&sk->sk_lock.slock); 2183 if (sk->sk_lock.owned) 2184 __lock_sock(sk); 2185 sk->sk_lock.owned = 1; 2186 spin_unlock(&sk->sk_lock.slock); 2187 /* 2188 * The sk_lock has mutex_lock() semantics here: 2189 */ 2190 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2191 local_bh_enable(); 2192} 2193EXPORT_SYMBOL(lock_sock_nested); 2194 2195void release_sock(struct sock *sk) 2196{ 2197 /* 2198 * The sk_lock has mutex_unlock() semantics: 2199 */ 2200 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2201 2202 spin_lock_bh(&sk->sk_lock.slock); 2203 if (sk->sk_backlog.tail) 2204 __release_sock(sk); 2205 2206 if (sk->sk_prot->release_cb) 2207 sk->sk_prot->release_cb(sk); 2208 2209 sk->sk_lock.owned = 0; 2210 if (waitqueue_active(&sk->sk_lock.wq)) 2211 wake_up(&sk->sk_lock.wq); 2212 spin_unlock_bh(&sk->sk_lock.slock); 2213} 2214EXPORT_SYMBOL(release_sock); 2215 2216/** 2217 * lock_sock_fast - fast version of lock_sock 2218 * @sk: socket 2219 * 2220 * This version should be used for very small section, where process wont block 2221 * return false if fast path is taken 2222 * sk_lock.slock locked, owned = 0, BH disabled 2223 * return true if slow path is taken 2224 * sk_lock.slock unlocked, owned = 1, BH enabled 2225 */ 2226bool lock_sock_fast(struct sock *sk) 2227{ 2228 might_sleep(); 2229 spin_lock_bh(&sk->sk_lock.slock); 2230 2231 if (!sk->sk_lock.owned) 2232 /* 2233 * Note : We must disable BH 2234 */ 2235 return false; 2236 2237 __lock_sock(sk); 2238 sk->sk_lock.owned = 1; 2239 spin_unlock(&sk->sk_lock.slock); 2240 /* 2241 * The sk_lock has mutex_lock() semantics here: 2242 */ 2243 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2244 local_bh_enable(); 2245 return true; 2246} 2247EXPORT_SYMBOL(lock_sock_fast); 2248 2249int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2250{ 2251 struct timeval tv; 2252 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2253 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2254 tv = ktime_to_timeval(sk->sk_stamp); 2255 if (tv.tv_sec == -1) 2256 return -ENOENT; 2257 if (tv.tv_sec == 0) { 2258 sk->sk_stamp = ktime_get_real(); 2259 tv = ktime_to_timeval(sk->sk_stamp); 2260 } 2261 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2262} 2263EXPORT_SYMBOL(sock_get_timestamp); 2264 2265int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2266{ 2267 struct timespec ts; 2268 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2269 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2270 ts = ktime_to_timespec(sk->sk_stamp); 2271 if (ts.tv_sec == -1) 2272 return -ENOENT; 2273 if (ts.tv_sec == 0) { 2274 sk->sk_stamp = ktime_get_real(); 2275 ts = ktime_to_timespec(sk->sk_stamp); 2276 } 2277 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2278} 2279EXPORT_SYMBOL(sock_get_timestampns); 2280 2281void sock_enable_timestamp(struct sock *sk, int flag) 2282{ 2283 if (!sock_flag(sk, flag)) { 2284 unsigned long previous_flags = sk->sk_flags; 2285 2286 sock_set_flag(sk, flag); 2287 /* 2288 * we just set one of the two flags which require net 2289 * time stamping, but time stamping might have been on 2290 * already because of the other one 2291 */ 2292 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2293 net_enable_timestamp(); 2294 } 2295} 2296 2297/* 2298 * Get a socket option on an socket. 2299 * 2300 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2301 * asynchronous errors should be reported by getsockopt. We assume 2302 * this means if you specify SO_ERROR (otherwise whats the point of it). 2303 */ 2304int sock_common_getsockopt(struct socket *sock, int level, int optname, 2305 char __user *optval, int __user *optlen) 2306{ 2307 struct sock *sk = sock->sk; 2308 2309 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2310} 2311EXPORT_SYMBOL(sock_common_getsockopt); 2312 2313#ifdef CONFIG_COMPAT 2314int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2315 char __user *optval, int __user *optlen) 2316{ 2317 struct sock *sk = sock->sk; 2318 2319 if (sk->sk_prot->compat_getsockopt != NULL) 2320 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2321 optval, optlen); 2322 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2323} 2324EXPORT_SYMBOL(compat_sock_common_getsockopt); 2325#endif 2326 2327int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2328 struct msghdr *msg, size_t size, int flags) 2329{ 2330 struct sock *sk = sock->sk; 2331 int addr_len = 0; 2332 int err; 2333 2334 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2335 flags & ~MSG_DONTWAIT, &addr_len); 2336 if (err >= 0) 2337 msg->msg_namelen = addr_len; 2338 return err; 2339} 2340EXPORT_SYMBOL(sock_common_recvmsg); 2341 2342/* 2343 * Set socket options on an inet socket. 2344 */ 2345int sock_common_setsockopt(struct socket *sock, int level, int optname, 2346 char __user *optval, unsigned int optlen) 2347{ 2348 struct sock *sk = sock->sk; 2349 2350 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2351} 2352EXPORT_SYMBOL(sock_common_setsockopt); 2353 2354#ifdef CONFIG_COMPAT 2355int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2356 char __user *optval, unsigned int optlen) 2357{ 2358 struct sock *sk = sock->sk; 2359 2360 if (sk->sk_prot->compat_setsockopt != NULL) 2361 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2362 optval, optlen); 2363 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2364} 2365EXPORT_SYMBOL(compat_sock_common_setsockopt); 2366#endif 2367 2368void sk_common_release(struct sock *sk) 2369{ 2370 if (sk->sk_prot->destroy) 2371 sk->sk_prot->destroy(sk); 2372 2373 /* 2374 * Observation: when sock_common_release is called, processes have 2375 * no access to socket. But net still has. 2376 * Step one, detach it from networking: 2377 * 2378 * A. Remove from hash tables. 2379 */ 2380 2381 sk->sk_prot->unhash(sk); 2382 2383 /* 2384 * In this point socket cannot receive new packets, but it is possible 2385 * that some packets are in flight because some CPU runs receiver and 2386 * did hash table lookup before we unhashed socket. They will achieve 2387 * receive queue and will be purged by socket destructor. 2388 * 2389 * Also we still have packets pending on receive queue and probably, 2390 * our own packets waiting in device queues. sock_destroy will drain 2391 * receive queue, but transmitted packets will delay socket destruction 2392 * until the last reference will be released. 2393 */ 2394 2395 sock_orphan(sk); 2396 2397 xfrm_sk_free_policy(sk); 2398 2399 sk_refcnt_debug_release(sk); 2400 sock_put(sk); 2401} 2402EXPORT_SYMBOL(sk_common_release); 2403 2404#ifdef CONFIG_PROC_FS 2405#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2406struct prot_inuse { 2407 int val[PROTO_INUSE_NR]; 2408}; 2409 2410static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2411 2412#ifdef CONFIG_NET_NS 2413void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2414{ 2415 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2416} 2417EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2418 2419int sock_prot_inuse_get(struct net *net, struct proto *prot) 2420{ 2421 int cpu, idx = prot->inuse_idx; 2422 int res = 0; 2423 2424 for_each_possible_cpu(cpu) 2425 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2426 2427 return res >= 0 ? res : 0; 2428} 2429EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2430 2431static int __net_init sock_inuse_init_net(struct net *net) 2432{ 2433 net->core.inuse = alloc_percpu(struct prot_inuse); 2434 return net->core.inuse ? 0 : -ENOMEM; 2435} 2436 2437static void __net_exit sock_inuse_exit_net(struct net *net) 2438{ 2439 free_percpu(net->core.inuse); 2440} 2441 2442static struct pernet_operations net_inuse_ops = { 2443 .init = sock_inuse_init_net, 2444 .exit = sock_inuse_exit_net, 2445}; 2446 2447static __init int net_inuse_init(void) 2448{ 2449 if (register_pernet_subsys(&net_inuse_ops)) 2450 panic("Cannot initialize net inuse counters"); 2451 2452 return 0; 2453} 2454 2455core_initcall(net_inuse_init); 2456#else 2457static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2458 2459void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2460{ 2461 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2462} 2463EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2464 2465int sock_prot_inuse_get(struct net *net, struct proto *prot) 2466{ 2467 int cpu, idx = prot->inuse_idx; 2468 int res = 0; 2469 2470 for_each_possible_cpu(cpu) 2471 res += per_cpu(prot_inuse, cpu).val[idx]; 2472 2473 return res >= 0 ? res : 0; 2474} 2475EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2476#endif 2477 2478static void assign_proto_idx(struct proto *prot) 2479{ 2480 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2481 2482 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2483 pr_err("PROTO_INUSE_NR exhausted\n"); 2484 return; 2485 } 2486 2487 set_bit(prot->inuse_idx, proto_inuse_idx); 2488} 2489 2490static void release_proto_idx(struct proto *prot) 2491{ 2492 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2493 clear_bit(prot->inuse_idx, proto_inuse_idx); 2494} 2495#else 2496static inline void assign_proto_idx(struct proto *prot) 2497{ 2498} 2499 2500static inline void release_proto_idx(struct proto *prot) 2501{ 2502} 2503#endif 2504 2505int proto_register(struct proto *prot, int alloc_slab) 2506{ 2507 if (alloc_slab) { 2508 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2509 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2510 NULL); 2511 2512 if (prot->slab == NULL) { 2513 pr_crit("%s: Can't create sock SLAB cache!\n", 2514 prot->name); 2515 goto out; 2516 } 2517 2518 if (prot->rsk_prot != NULL) { 2519 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2520 if (prot->rsk_prot->slab_name == NULL) 2521 goto out_free_sock_slab; 2522 2523 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2524 prot->rsk_prot->obj_size, 0, 2525 SLAB_HWCACHE_ALIGN, NULL); 2526 2527 if (prot->rsk_prot->slab == NULL) { 2528 pr_crit("%s: Can't create request sock SLAB cache!\n", 2529 prot->name); 2530 goto out_free_request_sock_slab_name; 2531 } 2532 } 2533 2534 if (prot->twsk_prot != NULL) { 2535 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2536 2537 if (prot->twsk_prot->twsk_slab_name == NULL) 2538 goto out_free_request_sock_slab; 2539 2540 prot->twsk_prot->twsk_slab = 2541 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2542 prot->twsk_prot->twsk_obj_size, 2543 0, 2544 SLAB_HWCACHE_ALIGN | 2545 prot->slab_flags, 2546 NULL); 2547 if (prot->twsk_prot->twsk_slab == NULL) 2548 goto out_free_timewait_sock_slab_name; 2549 } 2550 } 2551 2552 mutex_lock(&proto_list_mutex); 2553 list_add(&prot->node, &proto_list); 2554 assign_proto_idx(prot); 2555 mutex_unlock(&proto_list_mutex); 2556 return 0; 2557 2558out_free_timewait_sock_slab_name: 2559 kfree(prot->twsk_prot->twsk_slab_name); 2560out_free_request_sock_slab: 2561 if (prot->rsk_prot && prot->rsk_prot->slab) { 2562 kmem_cache_destroy(prot->rsk_prot->slab); 2563 prot->rsk_prot->slab = NULL; 2564 } 2565out_free_request_sock_slab_name: 2566 if (prot->rsk_prot) 2567 kfree(prot->rsk_prot->slab_name); 2568out_free_sock_slab: 2569 kmem_cache_destroy(prot->slab); 2570 prot->slab = NULL; 2571out: 2572 return -ENOBUFS; 2573} 2574EXPORT_SYMBOL(proto_register); 2575 2576void proto_unregister(struct proto *prot) 2577{ 2578 mutex_lock(&proto_list_mutex); 2579 release_proto_idx(prot); 2580 list_del(&prot->node); 2581 mutex_unlock(&proto_list_mutex); 2582 2583 if (prot->slab != NULL) { 2584 kmem_cache_destroy(prot->slab); 2585 prot->slab = NULL; 2586 } 2587 2588 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2589 kmem_cache_destroy(prot->rsk_prot->slab); 2590 kfree(prot->rsk_prot->slab_name); 2591 prot->rsk_prot->slab = NULL; 2592 } 2593 2594 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2595 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2596 kfree(prot->twsk_prot->twsk_slab_name); 2597 prot->twsk_prot->twsk_slab = NULL; 2598 } 2599} 2600EXPORT_SYMBOL(proto_unregister); 2601 2602#ifdef CONFIG_PROC_FS 2603static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2604 __acquires(proto_list_mutex) 2605{ 2606 mutex_lock(&proto_list_mutex); 2607 return seq_list_start_head(&proto_list, *pos); 2608} 2609 2610static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2611{ 2612 return seq_list_next(v, &proto_list, pos); 2613} 2614 2615static void proto_seq_stop(struct seq_file *seq, void *v) 2616 __releases(proto_list_mutex) 2617{ 2618 mutex_unlock(&proto_list_mutex); 2619} 2620 2621static char proto_method_implemented(const void *method) 2622{ 2623 return method == NULL ? 'n' : 'y'; 2624} 2625static long sock_prot_memory_allocated(struct proto *proto) 2626{ 2627 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 2628} 2629 2630static char *sock_prot_memory_pressure(struct proto *proto) 2631{ 2632 return proto->memory_pressure != NULL ? 2633 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2634} 2635 2636static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2637{ 2638 2639 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2640 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2641 proto->name, 2642 proto->obj_size, 2643 sock_prot_inuse_get(seq_file_net(seq), proto), 2644 sock_prot_memory_allocated(proto), 2645 sock_prot_memory_pressure(proto), 2646 proto->max_header, 2647 proto->slab == NULL ? "no" : "yes", 2648 module_name(proto->owner), 2649 proto_method_implemented(proto->close), 2650 proto_method_implemented(proto->connect), 2651 proto_method_implemented(proto->disconnect), 2652 proto_method_implemented(proto->accept), 2653 proto_method_implemented(proto->ioctl), 2654 proto_method_implemented(proto->init), 2655 proto_method_implemented(proto->destroy), 2656 proto_method_implemented(proto->shutdown), 2657 proto_method_implemented(proto->setsockopt), 2658 proto_method_implemented(proto->getsockopt), 2659 proto_method_implemented(proto->sendmsg), 2660 proto_method_implemented(proto->recvmsg), 2661 proto_method_implemented(proto->sendpage), 2662 proto_method_implemented(proto->bind), 2663 proto_method_implemented(proto->backlog_rcv), 2664 proto_method_implemented(proto->hash), 2665 proto_method_implemented(proto->unhash), 2666 proto_method_implemented(proto->get_port), 2667 proto_method_implemented(proto->enter_memory_pressure)); 2668} 2669 2670static int proto_seq_show(struct seq_file *seq, void *v) 2671{ 2672 if (v == &proto_list) 2673 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2674 "protocol", 2675 "size", 2676 "sockets", 2677 "memory", 2678 "press", 2679 "maxhdr", 2680 "slab", 2681 "module", 2682 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2683 else 2684 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2685 return 0; 2686} 2687 2688static const struct seq_operations proto_seq_ops = { 2689 .start = proto_seq_start, 2690 .next = proto_seq_next, 2691 .stop = proto_seq_stop, 2692 .show = proto_seq_show, 2693}; 2694 2695static int proto_seq_open(struct inode *inode, struct file *file) 2696{ 2697 return seq_open_net(inode, file, &proto_seq_ops, 2698 sizeof(struct seq_net_private)); 2699} 2700 2701static const struct file_operations proto_seq_fops = { 2702 .owner = THIS_MODULE, 2703 .open = proto_seq_open, 2704 .read = seq_read, 2705 .llseek = seq_lseek, 2706 .release = seq_release_net, 2707}; 2708 2709static __net_init int proto_init_net(struct net *net) 2710{ 2711 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2712 return -ENOMEM; 2713 2714 return 0; 2715} 2716 2717static __net_exit void proto_exit_net(struct net *net) 2718{ 2719 proc_net_remove(net, "protocols"); 2720} 2721 2722 2723static __net_initdata struct pernet_operations proto_net_ops = { 2724 .init = proto_init_net, 2725 .exit = proto_exit_net, 2726}; 2727 2728static int __init proto_init(void) 2729{ 2730 return register_pernet_subsys(&proto_net_ops); 2731} 2732 2733subsys_initcall(proto_init); 2734 2735#endif /* PROC_FS */ 2736