sock.c revision 25cc4ae913a46bcc11b03c37bec59568f2122a36
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 93 94#include <linux/capability.h> 95#include <linux/errno.h> 96#include <linux/types.h> 97#include <linux/socket.h> 98#include <linux/in.h> 99#include <linux/kernel.h> 100#include <linux/module.h> 101#include <linux/proc_fs.h> 102#include <linux/seq_file.h> 103#include <linux/sched.h> 104#include <linux/timer.h> 105#include <linux/string.h> 106#include <linux/sockios.h> 107#include <linux/net.h> 108#include <linux/mm.h> 109#include <linux/slab.h> 110#include <linux/interrupt.h> 111#include <linux/poll.h> 112#include <linux/tcp.h> 113#include <linux/init.h> 114#include <linux/highmem.h> 115#include <linux/user_namespace.h> 116#include <linux/static_key.h> 117#include <linux/memcontrol.h> 118#include <linux/prefetch.h> 119 120#include <asm/uaccess.h> 121 122#include <linux/netdevice.h> 123#include <net/protocol.h> 124#include <linux/skbuff.h> 125#include <net/net_namespace.h> 126#include <net/request_sock.h> 127#include <net/sock.h> 128#include <linux/net_tstamp.h> 129#include <net/xfrm.h> 130#include <linux/ipsec.h> 131#include <net/cls_cgroup.h> 132#include <net/netprio_cgroup.h> 133 134#include <linux/filter.h> 135 136#include <trace/events/sock.h> 137 138#ifdef CONFIG_INET 139#include <net/tcp.h> 140#endif 141 142static DEFINE_MUTEX(proto_list_mutex); 143static LIST_HEAD(proto_list); 144 145#ifdef CONFIG_MEMCG_KMEM 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 147{ 148 struct proto *proto; 149 int ret = 0; 150 151 mutex_lock(&proto_list_mutex); 152 list_for_each_entry(proto, &proto_list, node) { 153 if (proto->init_cgroup) { 154 ret = proto->init_cgroup(memcg, ss); 155 if (ret) 156 goto out; 157 } 158 } 159 160 mutex_unlock(&proto_list_mutex); 161 return ret; 162out: 163 list_for_each_entry_continue_reverse(proto, &proto_list, node) 164 if (proto->destroy_cgroup) 165 proto->destroy_cgroup(memcg); 166 mutex_unlock(&proto_list_mutex); 167 return ret; 168} 169 170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 171{ 172 struct proto *proto; 173 174 mutex_lock(&proto_list_mutex); 175 list_for_each_entry_reverse(proto, &proto_list, node) 176 if (proto->destroy_cgroup) 177 proto->destroy_cgroup(memcg); 178 mutex_unlock(&proto_list_mutex); 179} 180#endif 181 182/* 183 * Each address family might have different locking rules, so we have 184 * one slock key per address family: 185 */ 186static struct lock_class_key af_family_keys[AF_MAX]; 187static struct lock_class_key af_family_slock_keys[AF_MAX]; 188 189struct static_key memcg_socket_limit_enabled; 190EXPORT_SYMBOL(memcg_socket_limit_enabled); 191 192/* 193 * Make lock validator output more readable. (we pre-construct these 194 * strings build-time, so that runtime initialization of socket 195 * locks is fast): 196 */ 197static const char *const af_family_key_strings[AF_MAX+1] = { 198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 212}; 213static const char *const af_family_slock_key_strings[AF_MAX+1] = { 214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 223 "slock-27" , "slock-28" , "slock-AF_CAN" , 224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 227 "slock-AF_NFC" , "slock-AF_MAX" 228}; 229static const char *const af_family_clock_key_strings[AF_MAX+1] = { 230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 239 "clock-27" , "clock-28" , "clock-AF_CAN" , 240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 243 "clock-AF_NFC" , "clock-AF_MAX" 244}; 245 246/* 247 * sk_callback_lock locking rules are per-address-family, 248 * so split the lock classes by using a per-AF key: 249 */ 250static struct lock_class_key af_callback_keys[AF_MAX]; 251 252/* Take into consideration the size of the struct sk_buff overhead in the 253 * determination of these values, since that is non-constant across 254 * platforms. This makes socket queueing behavior and performance 255 * not depend upon such differences. 256 */ 257#define _SK_MEM_PACKETS 256 258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 261 262/* Run time adjustable parameters. */ 263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 264EXPORT_SYMBOL(sysctl_wmem_max); 265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 266EXPORT_SYMBOL(sysctl_rmem_max); 267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 269 270/* Maximal space eaten by iovec or ancillary data plus some space */ 271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 272EXPORT_SYMBOL(sysctl_optmem_max); 273 274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; 275EXPORT_SYMBOL_GPL(memalloc_socks); 276 277/** 278 * sk_set_memalloc - sets %SOCK_MEMALLOC 279 * @sk: socket to set it on 280 * 281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 282 * It's the responsibility of the admin to adjust min_free_kbytes 283 * to meet the requirements 284 */ 285void sk_set_memalloc(struct sock *sk) 286{ 287 sock_set_flag(sk, SOCK_MEMALLOC); 288 sk->sk_allocation |= __GFP_MEMALLOC; 289 static_key_slow_inc(&memalloc_socks); 290} 291EXPORT_SYMBOL_GPL(sk_set_memalloc); 292 293void sk_clear_memalloc(struct sock *sk) 294{ 295 sock_reset_flag(sk, SOCK_MEMALLOC); 296 sk->sk_allocation &= ~__GFP_MEMALLOC; 297 static_key_slow_dec(&memalloc_socks); 298 299 /* 300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while 302 * it has rmem allocations there is a risk that the user of the 303 * socket cannot make forward progress due to exceeding the rmem 304 * limits. By rights, sk_clear_memalloc() should only be called 305 * on sockets being torn down but warn and reset the accounting if 306 * that assumption breaks. 307 */ 308 if (WARN_ON(sk->sk_forward_alloc)) 309 sk_mem_reclaim(sk); 310} 311EXPORT_SYMBOL_GPL(sk_clear_memalloc); 312 313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 314{ 315 int ret; 316 unsigned long pflags = current->flags; 317 318 /* these should have been dropped before queueing */ 319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 320 321 current->flags |= PF_MEMALLOC; 322 ret = sk->sk_backlog_rcv(sk, skb); 323 tsk_restore_flags(current, pflags, PF_MEMALLOC); 324 325 return ret; 326} 327EXPORT_SYMBOL(__sk_backlog_rcv); 328 329static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 330{ 331 struct timeval tv; 332 333 if (optlen < sizeof(tv)) 334 return -EINVAL; 335 if (copy_from_user(&tv, optval, sizeof(tv))) 336 return -EFAULT; 337 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 338 return -EDOM; 339 340 if (tv.tv_sec < 0) { 341 static int warned __read_mostly; 342 343 *timeo_p = 0; 344 if (warned < 10 && net_ratelimit()) { 345 warned++; 346 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 347 __func__, current->comm, task_pid_nr(current)); 348 } 349 return 0; 350 } 351 *timeo_p = MAX_SCHEDULE_TIMEOUT; 352 if (tv.tv_sec == 0 && tv.tv_usec == 0) 353 return 0; 354 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 355 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 356 return 0; 357} 358 359static void sock_warn_obsolete_bsdism(const char *name) 360{ 361 static int warned; 362 static char warncomm[TASK_COMM_LEN]; 363 if (strcmp(warncomm, current->comm) && warned < 5) { 364 strcpy(warncomm, current->comm); 365 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 366 warncomm, name); 367 warned++; 368 } 369} 370 371#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 372 373static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 374{ 375 if (sk->sk_flags & flags) { 376 sk->sk_flags &= ~flags; 377 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 378 net_disable_timestamp(); 379 } 380} 381 382 383int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 384{ 385 int err; 386 int skb_len; 387 unsigned long flags; 388 struct sk_buff_head *list = &sk->sk_receive_queue; 389 390 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 391 atomic_inc(&sk->sk_drops); 392 trace_sock_rcvqueue_full(sk, skb); 393 return -ENOMEM; 394 } 395 396 err = sk_filter(sk, skb); 397 if (err) 398 return err; 399 400 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 401 atomic_inc(&sk->sk_drops); 402 return -ENOBUFS; 403 } 404 405 skb->dev = NULL; 406 skb_set_owner_r(skb, sk); 407 408 /* Cache the SKB length before we tack it onto the receive 409 * queue. Once it is added it no longer belongs to us and 410 * may be freed by other threads of control pulling packets 411 * from the queue. 412 */ 413 skb_len = skb->len; 414 415 /* we escape from rcu protected region, make sure we dont leak 416 * a norefcounted dst 417 */ 418 skb_dst_force(skb); 419 420 spin_lock_irqsave(&list->lock, flags); 421 skb->dropcount = atomic_read(&sk->sk_drops); 422 __skb_queue_tail(list, skb); 423 spin_unlock_irqrestore(&list->lock, flags); 424 425 if (!sock_flag(sk, SOCK_DEAD)) 426 sk->sk_data_ready(sk, skb_len); 427 return 0; 428} 429EXPORT_SYMBOL(sock_queue_rcv_skb); 430 431int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 432{ 433 int rc = NET_RX_SUCCESS; 434 435 if (sk_filter(sk, skb)) 436 goto discard_and_relse; 437 438 skb->dev = NULL; 439 440 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { 441 atomic_inc(&sk->sk_drops); 442 goto discard_and_relse; 443 } 444 if (nested) 445 bh_lock_sock_nested(sk); 446 else 447 bh_lock_sock(sk); 448 if (!sock_owned_by_user(sk)) { 449 /* 450 * trylock + unlock semantics: 451 */ 452 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 453 454 rc = sk_backlog_rcv(sk, skb); 455 456 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 457 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 458 bh_unlock_sock(sk); 459 atomic_inc(&sk->sk_drops); 460 goto discard_and_relse; 461 } 462 463 bh_unlock_sock(sk); 464out: 465 sock_put(sk); 466 return rc; 467discard_and_relse: 468 kfree_skb(skb); 469 goto out; 470} 471EXPORT_SYMBOL(sk_receive_skb); 472 473void sk_reset_txq(struct sock *sk) 474{ 475 sk_tx_queue_clear(sk); 476} 477EXPORT_SYMBOL(sk_reset_txq); 478 479struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 480{ 481 struct dst_entry *dst = __sk_dst_get(sk); 482 483 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 484 sk_tx_queue_clear(sk); 485 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 486 dst_release(dst); 487 return NULL; 488 } 489 490 return dst; 491} 492EXPORT_SYMBOL(__sk_dst_check); 493 494struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 495{ 496 struct dst_entry *dst = sk_dst_get(sk); 497 498 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 499 sk_dst_reset(sk); 500 dst_release(dst); 501 return NULL; 502 } 503 504 return dst; 505} 506EXPORT_SYMBOL(sk_dst_check); 507 508static int sock_setbindtodevice(struct sock *sk, char __user *optval, 509 int optlen) 510{ 511 int ret = -ENOPROTOOPT; 512#ifdef CONFIG_NETDEVICES 513 struct net *net = sock_net(sk); 514 char devname[IFNAMSIZ]; 515 int index; 516 517 /* Sorry... */ 518 ret = -EPERM; 519 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 520 goto out; 521 522 ret = -EINVAL; 523 if (optlen < 0) 524 goto out; 525 526 /* Bind this socket to a particular device like "eth0", 527 * as specified in the passed interface name. If the 528 * name is "" or the option length is zero the socket 529 * is not bound. 530 */ 531 if (optlen > IFNAMSIZ - 1) 532 optlen = IFNAMSIZ - 1; 533 memset(devname, 0, sizeof(devname)); 534 535 ret = -EFAULT; 536 if (copy_from_user(devname, optval, optlen)) 537 goto out; 538 539 index = 0; 540 if (devname[0] != '\0') { 541 struct net_device *dev; 542 543 rcu_read_lock(); 544 dev = dev_get_by_name_rcu(net, devname); 545 if (dev) 546 index = dev->ifindex; 547 rcu_read_unlock(); 548 ret = -ENODEV; 549 if (!dev) 550 goto out; 551 } 552 553 lock_sock(sk); 554 sk->sk_bound_dev_if = index; 555 sk_dst_reset(sk); 556 release_sock(sk); 557 558 ret = 0; 559 560out: 561#endif 562 563 return ret; 564} 565 566static int sock_getbindtodevice(struct sock *sk, char __user *optval, 567 int __user *optlen, int len) 568{ 569 int ret = -ENOPROTOOPT; 570#ifdef CONFIG_NETDEVICES 571 struct net *net = sock_net(sk); 572 struct net_device *dev; 573 char devname[IFNAMSIZ]; 574 unsigned seq; 575 576 if (sk->sk_bound_dev_if == 0) { 577 len = 0; 578 goto zero; 579 } 580 581 ret = -EINVAL; 582 if (len < IFNAMSIZ) 583 goto out; 584 585retry: 586 seq = read_seqcount_begin(&devnet_rename_seq); 587 rcu_read_lock(); 588 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); 589 ret = -ENODEV; 590 if (!dev) { 591 rcu_read_unlock(); 592 goto out; 593 } 594 595 strcpy(devname, dev->name); 596 rcu_read_unlock(); 597 if (read_seqcount_retry(&devnet_rename_seq, seq)) 598 goto retry; 599 600 len = strlen(devname) + 1; 601 602 ret = -EFAULT; 603 if (copy_to_user(optval, devname, len)) 604 goto out; 605 606zero: 607 ret = -EFAULT; 608 if (put_user(len, optlen)) 609 goto out; 610 611 ret = 0; 612 613out: 614#endif 615 616 return ret; 617} 618 619static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 620{ 621 if (valbool) 622 sock_set_flag(sk, bit); 623 else 624 sock_reset_flag(sk, bit); 625} 626 627/* 628 * This is meant for all protocols to use and covers goings on 629 * at the socket level. Everything here is generic. 630 */ 631 632int sock_setsockopt(struct socket *sock, int level, int optname, 633 char __user *optval, unsigned int optlen) 634{ 635 struct sock *sk = sock->sk; 636 int val; 637 int valbool; 638 struct linger ling; 639 int ret = 0; 640 641 /* 642 * Options without arguments 643 */ 644 645 if (optname == SO_BINDTODEVICE) 646 return sock_setbindtodevice(sk, optval, optlen); 647 648 if (optlen < sizeof(int)) 649 return -EINVAL; 650 651 if (get_user(val, (int __user *)optval)) 652 return -EFAULT; 653 654 valbool = val ? 1 : 0; 655 656 lock_sock(sk); 657 658 switch (optname) { 659 case SO_DEBUG: 660 if (val && !capable(CAP_NET_ADMIN)) 661 ret = -EACCES; 662 else 663 sock_valbool_flag(sk, SOCK_DBG, valbool); 664 break; 665 case SO_REUSEADDR: 666 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 667 break; 668 case SO_REUSEPORT: 669 sk->sk_reuseport = valbool; 670 break; 671 case SO_TYPE: 672 case SO_PROTOCOL: 673 case SO_DOMAIN: 674 case SO_ERROR: 675 ret = -ENOPROTOOPT; 676 break; 677 case SO_DONTROUTE: 678 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 679 break; 680 case SO_BROADCAST: 681 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 682 break; 683 case SO_SNDBUF: 684 /* Don't error on this BSD doesn't and if you think 685 * about it this is right. Otherwise apps have to 686 * play 'guess the biggest size' games. RCVBUF/SNDBUF 687 * are treated in BSD as hints 688 */ 689 val = min_t(u32, val, sysctl_wmem_max); 690set_sndbuf: 691 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 692 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 693 /* Wake up sending tasks if we upped the value. */ 694 sk->sk_write_space(sk); 695 break; 696 697 case SO_SNDBUFFORCE: 698 if (!capable(CAP_NET_ADMIN)) { 699 ret = -EPERM; 700 break; 701 } 702 goto set_sndbuf; 703 704 case SO_RCVBUF: 705 /* Don't error on this BSD doesn't and if you think 706 * about it this is right. Otherwise apps have to 707 * play 'guess the biggest size' games. RCVBUF/SNDBUF 708 * are treated in BSD as hints 709 */ 710 val = min_t(u32, val, sysctl_rmem_max); 711set_rcvbuf: 712 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 713 /* 714 * We double it on the way in to account for 715 * "struct sk_buff" etc. overhead. Applications 716 * assume that the SO_RCVBUF setting they make will 717 * allow that much actual data to be received on that 718 * socket. 719 * 720 * Applications are unaware that "struct sk_buff" and 721 * other overheads allocate from the receive buffer 722 * during socket buffer allocation. 723 * 724 * And after considering the possible alternatives, 725 * returning the value we actually used in getsockopt 726 * is the most desirable behavior. 727 */ 728 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 729 break; 730 731 case SO_RCVBUFFORCE: 732 if (!capable(CAP_NET_ADMIN)) { 733 ret = -EPERM; 734 break; 735 } 736 goto set_rcvbuf; 737 738 case SO_KEEPALIVE: 739#ifdef CONFIG_INET 740 if (sk->sk_protocol == IPPROTO_TCP && 741 sk->sk_type == SOCK_STREAM) 742 tcp_set_keepalive(sk, valbool); 743#endif 744 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 745 break; 746 747 case SO_OOBINLINE: 748 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 749 break; 750 751 case SO_NO_CHECK: 752 sk->sk_no_check = valbool; 753 break; 754 755 case SO_PRIORITY: 756 if ((val >= 0 && val <= 6) || 757 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 758 sk->sk_priority = val; 759 else 760 ret = -EPERM; 761 break; 762 763 case SO_LINGER: 764 if (optlen < sizeof(ling)) { 765 ret = -EINVAL; /* 1003.1g */ 766 break; 767 } 768 if (copy_from_user(&ling, optval, sizeof(ling))) { 769 ret = -EFAULT; 770 break; 771 } 772 if (!ling.l_onoff) 773 sock_reset_flag(sk, SOCK_LINGER); 774 else { 775#if (BITS_PER_LONG == 32) 776 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 777 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 778 else 779#endif 780 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 781 sock_set_flag(sk, SOCK_LINGER); 782 } 783 break; 784 785 case SO_BSDCOMPAT: 786 sock_warn_obsolete_bsdism("setsockopt"); 787 break; 788 789 case SO_PASSCRED: 790 if (valbool) 791 set_bit(SOCK_PASSCRED, &sock->flags); 792 else 793 clear_bit(SOCK_PASSCRED, &sock->flags); 794 break; 795 796 case SO_TIMESTAMP: 797 case SO_TIMESTAMPNS: 798 if (valbool) { 799 if (optname == SO_TIMESTAMP) 800 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 801 else 802 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 803 sock_set_flag(sk, SOCK_RCVTSTAMP); 804 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 805 } else { 806 sock_reset_flag(sk, SOCK_RCVTSTAMP); 807 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 808 } 809 break; 810 811 case SO_TIMESTAMPING: 812 if (val & ~SOF_TIMESTAMPING_MASK) { 813 ret = -EINVAL; 814 break; 815 } 816 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 817 val & SOF_TIMESTAMPING_TX_HARDWARE); 818 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 819 val & SOF_TIMESTAMPING_TX_SOFTWARE); 820 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 821 val & SOF_TIMESTAMPING_RX_HARDWARE); 822 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 823 sock_enable_timestamp(sk, 824 SOCK_TIMESTAMPING_RX_SOFTWARE); 825 else 826 sock_disable_timestamp(sk, 827 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 828 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 829 val & SOF_TIMESTAMPING_SOFTWARE); 830 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 831 val & SOF_TIMESTAMPING_SYS_HARDWARE); 832 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 833 val & SOF_TIMESTAMPING_RAW_HARDWARE); 834 break; 835 836 case SO_RCVLOWAT: 837 if (val < 0) 838 val = INT_MAX; 839 sk->sk_rcvlowat = val ? : 1; 840 break; 841 842 case SO_RCVTIMEO: 843 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 844 break; 845 846 case SO_SNDTIMEO: 847 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 848 break; 849 850 case SO_ATTACH_FILTER: 851 ret = -EINVAL; 852 if (optlen == sizeof(struct sock_fprog)) { 853 struct sock_fprog fprog; 854 855 ret = -EFAULT; 856 if (copy_from_user(&fprog, optval, sizeof(fprog))) 857 break; 858 859 ret = sk_attach_filter(&fprog, sk); 860 } 861 break; 862 863 case SO_DETACH_FILTER: 864 ret = sk_detach_filter(sk); 865 break; 866 867 case SO_LOCK_FILTER: 868 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 869 ret = -EPERM; 870 else 871 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 872 break; 873 874 case SO_PASSSEC: 875 if (valbool) 876 set_bit(SOCK_PASSSEC, &sock->flags); 877 else 878 clear_bit(SOCK_PASSSEC, &sock->flags); 879 break; 880 case SO_MARK: 881 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 882 ret = -EPERM; 883 else 884 sk->sk_mark = val; 885 break; 886 887 /* We implement the SO_SNDLOWAT etc to 888 not be settable (1003.1g 5.3) */ 889 case SO_RXQ_OVFL: 890 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 891 break; 892 893 case SO_WIFI_STATUS: 894 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 895 break; 896 897 case SO_PEEK_OFF: 898 if (sock->ops->set_peek_off) 899 sock->ops->set_peek_off(sk, val); 900 else 901 ret = -EOPNOTSUPP; 902 break; 903 904 case SO_NOFCS: 905 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 906 break; 907 908 default: 909 ret = -ENOPROTOOPT; 910 break; 911 } 912 release_sock(sk); 913 return ret; 914} 915EXPORT_SYMBOL(sock_setsockopt); 916 917 918void cred_to_ucred(struct pid *pid, const struct cred *cred, 919 struct ucred *ucred) 920{ 921 ucred->pid = pid_vnr(pid); 922 ucred->uid = ucred->gid = -1; 923 if (cred) { 924 struct user_namespace *current_ns = current_user_ns(); 925 926 ucred->uid = from_kuid_munged(current_ns, cred->euid); 927 ucred->gid = from_kgid_munged(current_ns, cred->egid); 928 } 929} 930EXPORT_SYMBOL_GPL(cred_to_ucred); 931 932int sock_getsockopt(struct socket *sock, int level, int optname, 933 char __user *optval, int __user *optlen) 934{ 935 struct sock *sk = sock->sk; 936 937 union { 938 int val; 939 struct linger ling; 940 struct timeval tm; 941 } v; 942 943 int lv = sizeof(int); 944 int len; 945 946 if (get_user(len, optlen)) 947 return -EFAULT; 948 if (len < 0) 949 return -EINVAL; 950 951 memset(&v, 0, sizeof(v)); 952 953 switch (optname) { 954 case SO_DEBUG: 955 v.val = sock_flag(sk, SOCK_DBG); 956 break; 957 958 case SO_DONTROUTE: 959 v.val = sock_flag(sk, SOCK_LOCALROUTE); 960 break; 961 962 case SO_BROADCAST: 963 v.val = sock_flag(sk, SOCK_BROADCAST); 964 break; 965 966 case SO_SNDBUF: 967 v.val = sk->sk_sndbuf; 968 break; 969 970 case SO_RCVBUF: 971 v.val = sk->sk_rcvbuf; 972 break; 973 974 case SO_REUSEADDR: 975 v.val = sk->sk_reuse; 976 break; 977 978 case SO_REUSEPORT: 979 v.val = sk->sk_reuseport; 980 break; 981 982 case SO_KEEPALIVE: 983 v.val = sock_flag(sk, SOCK_KEEPOPEN); 984 break; 985 986 case SO_TYPE: 987 v.val = sk->sk_type; 988 break; 989 990 case SO_PROTOCOL: 991 v.val = sk->sk_protocol; 992 break; 993 994 case SO_DOMAIN: 995 v.val = sk->sk_family; 996 break; 997 998 case SO_ERROR: 999 v.val = -sock_error(sk); 1000 if (v.val == 0) 1001 v.val = xchg(&sk->sk_err_soft, 0); 1002 break; 1003 1004 case SO_OOBINLINE: 1005 v.val = sock_flag(sk, SOCK_URGINLINE); 1006 break; 1007 1008 case SO_NO_CHECK: 1009 v.val = sk->sk_no_check; 1010 break; 1011 1012 case SO_PRIORITY: 1013 v.val = sk->sk_priority; 1014 break; 1015 1016 case SO_LINGER: 1017 lv = sizeof(v.ling); 1018 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1019 v.ling.l_linger = sk->sk_lingertime / HZ; 1020 break; 1021 1022 case SO_BSDCOMPAT: 1023 sock_warn_obsolete_bsdism("getsockopt"); 1024 break; 1025 1026 case SO_TIMESTAMP: 1027 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1028 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1029 break; 1030 1031 case SO_TIMESTAMPNS: 1032 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 1033 break; 1034 1035 case SO_TIMESTAMPING: 1036 v.val = 0; 1037 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 1038 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 1039 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 1040 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 1041 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 1042 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 1043 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1044 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 1045 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 1046 v.val |= SOF_TIMESTAMPING_SOFTWARE; 1047 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 1048 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 1049 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 1050 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 1051 break; 1052 1053 case SO_RCVTIMEO: 1054 lv = sizeof(struct timeval); 1055 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 1056 v.tm.tv_sec = 0; 1057 v.tm.tv_usec = 0; 1058 } else { 1059 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 1060 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 1061 } 1062 break; 1063 1064 case SO_SNDTIMEO: 1065 lv = sizeof(struct timeval); 1066 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 1067 v.tm.tv_sec = 0; 1068 v.tm.tv_usec = 0; 1069 } else { 1070 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 1071 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 1072 } 1073 break; 1074 1075 case SO_RCVLOWAT: 1076 v.val = sk->sk_rcvlowat; 1077 break; 1078 1079 case SO_SNDLOWAT: 1080 v.val = 1; 1081 break; 1082 1083 case SO_PASSCRED: 1084 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1085 break; 1086 1087 case SO_PEERCRED: 1088 { 1089 struct ucred peercred; 1090 if (len > sizeof(peercred)) 1091 len = sizeof(peercred); 1092 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1093 if (copy_to_user(optval, &peercred, len)) 1094 return -EFAULT; 1095 goto lenout; 1096 } 1097 1098 case SO_PEERNAME: 1099 { 1100 char address[128]; 1101 1102 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 1103 return -ENOTCONN; 1104 if (lv < len) 1105 return -EINVAL; 1106 if (copy_to_user(optval, address, len)) 1107 return -EFAULT; 1108 goto lenout; 1109 } 1110 1111 /* Dubious BSD thing... Probably nobody even uses it, but 1112 * the UNIX standard wants it for whatever reason... -DaveM 1113 */ 1114 case SO_ACCEPTCONN: 1115 v.val = sk->sk_state == TCP_LISTEN; 1116 break; 1117 1118 case SO_PASSSEC: 1119 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1120 break; 1121 1122 case SO_PEERSEC: 1123 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1124 1125 case SO_MARK: 1126 v.val = sk->sk_mark; 1127 break; 1128 1129 case SO_RXQ_OVFL: 1130 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1131 break; 1132 1133 case SO_WIFI_STATUS: 1134 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1135 break; 1136 1137 case SO_PEEK_OFF: 1138 if (!sock->ops->set_peek_off) 1139 return -EOPNOTSUPP; 1140 1141 v.val = sk->sk_peek_off; 1142 break; 1143 case SO_NOFCS: 1144 v.val = sock_flag(sk, SOCK_NOFCS); 1145 break; 1146 1147 case SO_BINDTODEVICE: 1148 return sock_getbindtodevice(sk, optval, optlen, len); 1149 1150 case SO_GET_FILTER: 1151 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); 1152 if (len < 0) 1153 return len; 1154 1155 goto lenout; 1156 1157 case SO_LOCK_FILTER: 1158 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1159 break; 1160 1161 default: 1162 return -ENOPROTOOPT; 1163 } 1164 1165 if (len > lv) 1166 len = lv; 1167 if (copy_to_user(optval, &v, len)) 1168 return -EFAULT; 1169lenout: 1170 if (put_user(len, optlen)) 1171 return -EFAULT; 1172 return 0; 1173} 1174 1175/* 1176 * Initialize an sk_lock. 1177 * 1178 * (We also register the sk_lock with the lock validator.) 1179 */ 1180static inline void sock_lock_init(struct sock *sk) 1181{ 1182 sock_lock_init_class_and_name(sk, 1183 af_family_slock_key_strings[sk->sk_family], 1184 af_family_slock_keys + sk->sk_family, 1185 af_family_key_strings[sk->sk_family], 1186 af_family_keys + sk->sk_family); 1187} 1188 1189/* 1190 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1191 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1192 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1193 */ 1194static void sock_copy(struct sock *nsk, const struct sock *osk) 1195{ 1196#ifdef CONFIG_SECURITY_NETWORK 1197 void *sptr = nsk->sk_security; 1198#endif 1199 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1200 1201 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1202 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1203 1204#ifdef CONFIG_SECURITY_NETWORK 1205 nsk->sk_security = sptr; 1206 security_sk_clone(osk, nsk); 1207#endif 1208} 1209 1210/* 1211 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1212 * un-modified. Special care is taken when initializing object to zero. 1213 */ 1214static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1215{ 1216 if (offsetof(struct sock, sk_node.next) != 0) 1217 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1218 memset(&sk->sk_node.pprev, 0, 1219 size - offsetof(struct sock, sk_node.pprev)); 1220} 1221 1222void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1223{ 1224 unsigned long nulls1, nulls2; 1225 1226 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1227 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1228 if (nulls1 > nulls2) 1229 swap(nulls1, nulls2); 1230 1231 if (nulls1 != 0) 1232 memset((char *)sk, 0, nulls1); 1233 memset((char *)sk + nulls1 + sizeof(void *), 0, 1234 nulls2 - nulls1 - sizeof(void *)); 1235 memset((char *)sk + nulls2 + sizeof(void *), 0, 1236 size - nulls2 - sizeof(void *)); 1237} 1238EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1239 1240static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1241 int family) 1242{ 1243 struct sock *sk; 1244 struct kmem_cache *slab; 1245 1246 slab = prot->slab; 1247 if (slab != NULL) { 1248 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1249 if (!sk) 1250 return sk; 1251 if (priority & __GFP_ZERO) { 1252 if (prot->clear_sk) 1253 prot->clear_sk(sk, prot->obj_size); 1254 else 1255 sk_prot_clear_nulls(sk, prot->obj_size); 1256 } 1257 } else 1258 sk = kmalloc(prot->obj_size, priority); 1259 1260 if (sk != NULL) { 1261 kmemcheck_annotate_bitfield(sk, flags); 1262 1263 if (security_sk_alloc(sk, family, priority)) 1264 goto out_free; 1265 1266 if (!try_module_get(prot->owner)) 1267 goto out_free_sec; 1268 sk_tx_queue_clear(sk); 1269 } 1270 1271 return sk; 1272 1273out_free_sec: 1274 security_sk_free(sk); 1275out_free: 1276 if (slab != NULL) 1277 kmem_cache_free(slab, sk); 1278 else 1279 kfree(sk); 1280 return NULL; 1281} 1282 1283static void sk_prot_free(struct proto *prot, struct sock *sk) 1284{ 1285 struct kmem_cache *slab; 1286 struct module *owner; 1287 1288 owner = prot->owner; 1289 slab = prot->slab; 1290 1291 security_sk_free(sk); 1292 if (slab != NULL) 1293 kmem_cache_free(slab, sk); 1294 else 1295 kfree(sk); 1296 module_put(owner); 1297} 1298 1299#ifdef CONFIG_CGROUPS 1300#if IS_ENABLED(CONFIG_NET_CLS_CGROUP) 1301void sock_update_classid(struct sock *sk, struct task_struct *task) 1302{ 1303 u32 classid; 1304 1305 classid = task_cls_classid(task); 1306 if (classid != sk->sk_classid) 1307 sk->sk_classid = classid; 1308} 1309EXPORT_SYMBOL(sock_update_classid); 1310#endif 1311 1312#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 1313void sock_update_netprioidx(struct sock *sk, struct task_struct *task) 1314{ 1315 if (in_interrupt()) 1316 return; 1317 1318 sk->sk_cgrp_prioidx = task_netprioidx(task); 1319} 1320EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1321#endif 1322#endif 1323 1324/** 1325 * sk_alloc - All socket objects are allocated here 1326 * @net: the applicable net namespace 1327 * @family: protocol family 1328 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1329 * @prot: struct proto associated with this new sock instance 1330 */ 1331struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1332 struct proto *prot) 1333{ 1334 struct sock *sk; 1335 1336 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1337 if (sk) { 1338 sk->sk_family = family; 1339 /* 1340 * See comment in struct sock definition to understand 1341 * why we need sk_prot_creator -acme 1342 */ 1343 sk->sk_prot = sk->sk_prot_creator = prot; 1344 sock_lock_init(sk); 1345 sock_net_set(sk, get_net(net)); 1346 atomic_set(&sk->sk_wmem_alloc, 1); 1347 1348 sock_update_classid(sk, current); 1349 sock_update_netprioidx(sk, current); 1350 } 1351 1352 return sk; 1353} 1354EXPORT_SYMBOL(sk_alloc); 1355 1356static void __sk_free(struct sock *sk) 1357{ 1358 struct sk_filter *filter; 1359 1360 if (sk->sk_destruct) 1361 sk->sk_destruct(sk); 1362 1363 filter = rcu_dereference_check(sk->sk_filter, 1364 atomic_read(&sk->sk_wmem_alloc) == 0); 1365 if (filter) { 1366 sk_filter_uncharge(sk, filter); 1367 RCU_INIT_POINTER(sk->sk_filter, NULL); 1368 } 1369 1370 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1371 1372 if (atomic_read(&sk->sk_omem_alloc)) 1373 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1374 __func__, atomic_read(&sk->sk_omem_alloc)); 1375 1376 if (sk->sk_peer_cred) 1377 put_cred(sk->sk_peer_cred); 1378 put_pid(sk->sk_peer_pid); 1379 put_net(sock_net(sk)); 1380 sk_prot_free(sk->sk_prot_creator, sk); 1381} 1382 1383void sk_free(struct sock *sk) 1384{ 1385 /* 1386 * We subtract one from sk_wmem_alloc and can know if 1387 * some packets are still in some tx queue. 1388 * If not null, sock_wfree() will call __sk_free(sk) later 1389 */ 1390 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1391 __sk_free(sk); 1392} 1393EXPORT_SYMBOL(sk_free); 1394 1395/* 1396 * Last sock_put should drop reference to sk->sk_net. It has already 1397 * been dropped in sk_change_net. Taking reference to stopping namespace 1398 * is not an option. 1399 * Take reference to a socket to remove it from hash _alive_ and after that 1400 * destroy it in the context of init_net. 1401 */ 1402void sk_release_kernel(struct sock *sk) 1403{ 1404 if (sk == NULL || sk->sk_socket == NULL) 1405 return; 1406 1407 sock_hold(sk); 1408 sock_release(sk->sk_socket); 1409 release_net(sock_net(sk)); 1410 sock_net_set(sk, get_net(&init_net)); 1411 sock_put(sk); 1412} 1413EXPORT_SYMBOL(sk_release_kernel); 1414 1415static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1416{ 1417 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1418 sock_update_memcg(newsk); 1419} 1420 1421/** 1422 * sk_clone_lock - clone a socket, and lock its clone 1423 * @sk: the socket to clone 1424 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1425 * 1426 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1427 */ 1428struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1429{ 1430 struct sock *newsk; 1431 1432 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1433 if (newsk != NULL) { 1434 struct sk_filter *filter; 1435 1436 sock_copy(newsk, sk); 1437 1438 /* SANITY */ 1439 get_net(sock_net(newsk)); 1440 sk_node_init(&newsk->sk_node); 1441 sock_lock_init(newsk); 1442 bh_lock_sock(newsk); 1443 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1444 newsk->sk_backlog.len = 0; 1445 1446 atomic_set(&newsk->sk_rmem_alloc, 0); 1447 /* 1448 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1449 */ 1450 atomic_set(&newsk->sk_wmem_alloc, 1); 1451 atomic_set(&newsk->sk_omem_alloc, 0); 1452 skb_queue_head_init(&newsk->sk_receive_queue); 1453 skb_queue_head_init(&newsk->sk_write_queue); 1454#ifdef CONFIG_NET_DMA 1455 skb_queue_head_init(&newsk->sk_async_wait_queue); 1456#endif 1457 1458 spin_lock_init(&newsk->sk_dst_lock); 1459 rwlock_init(&newsk->sk_callback_lock); 1460 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1461 af_callback_keys + newsk->sk_family, 1462 af_family_clock_key_strings[newsk->sk_family]); 1463 1464 newsk->sk_dst_cache = NULL; 1465 newsk->sk_wmem_queued = 0; 1466 newsk->sk_forward_alloc = 0; 1467 newsk->sk_send_head = NULL; 1468 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1469 1470 sock_reset_flag(newsk, SOCK_DONE); 1471 skb_queue_head_init(&newsk->sk_error_queue); 1472 1473 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1474 if (filter != NULL) 1475 sk_filter_charge(newsk, filter); 1476 1477 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1478 /* It is still raw copy of parent, so invalidate 1479 * destructor and make plain sk_free() */ 1480 newsk->sk_destruct = NULL; 1481 bh_unlock_sock(newsk); 1482 sk_free(newsk); 1483 newsk = NULL; 1484 goto out; 1485 } 1486 1487 newsk->sk_err = 0; 1488 newsk->sk_priority = 0; 1489 /* 1490 * Before updating sk_refcnt, we must commit prior changes to memory 1491 * (Documentation/RCU/rculist_nulls.txt for details) 1492 */ 1493 smp_wmb(); 1494 atomic_set(&newsk->sk_refcnt, 2); 1495 1496 /* 1497 * Increment the counter in the same struct proto as the master 1498 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1499 * is the same as sk->sk_prot->socks, as this field was copied 1500 * with memcpy). 1501 * 1502 * This _changes_ the previous behaviour, where 1503 * tcp_create_openreq_child always was incrementing the 1504 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1505 * to be taken into account in all callers. -acme 1506 */ 1507 sk_refcnt_debug_inc(newsk); 1508 sk_set_socket(newsk, NULL); 1509 newsk->sk_wq = NULL; 1510 1511 sk_update_clone(sk, newsk); 1512 1513 if (newsk->sk_prot->sockets_allocated) 1514 sk_sockets_allocated_inc(newsk); 1515 1516 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1517 net_enable_timestamp(); 1518 } 1519out: 1520 return newsk; 1521} 1522EXPORT_SYMBOL_GPL(sk_clone_lock); 1523 1524void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1525{ 1526 __sk_dst_set(sk, dst); 1527 sk->sk_route_caps = dst->dev->features; 1528 if (sk->sk_route_caps & NETIF_F_GSO) 1529 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1530 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1531 if (sk_can_gso(sk)) { 1532 if (dst->header_len) { 1533 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1534 } else { 1535 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1536 sk->sk_gso_max_size = dst->dev->gso_max_size; 1537 sk->sk_gso_max_segs = dst->dev->gso_max_segs; 1538 } 1539 } 1540} 1541EXPORT_SYMBOL_GPL(sk_setup_caps); 1542 1543/* 1544 * Simple resource managers for sockets. 1545 */ 1546 1547 1548/* 1549 * Write buffer destructor automatically called from kfree_skb. 1550 */ 1551void sock_wfree(struct sk_buff *skb) 1552{ 1553 struct sock *sk = skb->sk; 1554 unsigned int len = skb->truesize; 1555 1556 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1557 /* 1558 * Keep a reference on sk_wmem_alloc, this will be released 1559 * after sk_write_space() call 1560 */ 1561 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1562 sk->sk_write_space(sk); 1563 len = 1; 1564 } 1565 /* 1566 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1567 * could not do because of in-flight packets 1568 */ 1569 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1570 __sk_free(sk); 1571} 1572EXPORT_SYMBOL(sock_wfree); 1573 1574/* 1575 * Read buffer destructor automatically called from kfree_skb. 1576 */ 1577void sock_rfree(struct sk_buff *skb) 1578{ 1579 struct sock *sk = skb->sk; 1580 unsigned int len = skb->truesize; 1581 1582 atomic_sub(len, &sk->sk_rmem_alloc); 1583 sk_mem_uncharge(sk, len); 1584} 1585EXPORT_SYMBOL(sock_rfree); 1586 1587void sock_edemux(struct sk_buff *skb) 1588{ 1589 struct sock *sk = skb->sk; 1590 1591#ifdef CONFIG_INET 1592 if (sk->sk_state == TCP_TIME_WAIT) 1593 inet_twsk_put(inet_twsk(sk)); 1594 else 1595#endif 1596 sock_put(sk); 1597} 1598EXPORT_SYMBOL(sock_edemux); 1599 1600kuid_t sock_i_uid(struct sock *sk) 1601{ 1602 kuid_t uid; 1603 1604 read_lock_bh(&sk->sk_callback_lock); 1605 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 1606 read_unlock_bh(&sk->sk_callback_lock); 1607 return uid; 1608} 1609EXPORT_SYMBOL(sock_i_uid); 1610 1611unsigned long sock_i_ino(struct sock *sk) 1612{ 1613 unsigned long ino; 1614 1615 read_lock_bh(&sk->sk_callback_lock); 1616 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1617 read_unlock_bh(&sk->sk_callback_lock); 1618 return ino; 1619} 1620EXPORT_SYMBOL(sock_i_ino); 1621 1622/* 1623 * Allocate a skb from the socket's send buffer. 1624 */ 1625struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1626 gfp_t priority) 1627{ 1628 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1629 struct sk_buff *skb = alloc_skb(size, priority); 1630 if (skb) { 1631 skb_set_owner_w(skb, sk); 1632 return skb; 1633 } 1634 } 1635 return NULL; 1636} 1637EXPORT_SYMBOL(sock_wmalloc); 1638 1639/* 1640 * Allocate a skb from the socket's receive buffer. 1641 */ 1642struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1643 gfp_t priority) 1644{ 1645 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1646 struct sk_buff *skb = alloc_skb(size, priority); 1647 if (skb) { 1648 skb_set_owner_r(skb, sk); 1649 return skb; 1650 } 1651 } 1652 return NULL; 1653} 1654 1655/* 1656 * Allocate a memory block from the socket's option memory buffer. 1657 */ 1658void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1659{ 1660 if ((unsigned int)size <= sysctl_optmem_max && 1661 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1662 void *mem; 1663 /* First do the add, to avoid the race if kmalloc 1664 * might sleep. 1665 */ 1666 atomic_add(size, &sk->sk_omem_alloc); 1667 mem = kmalloc(size, priority); 1668 if (mem) 1669 return mem; 1670 atomic_sub(size, &sk->sk_omem_alloc); 1671 } 1672 return NULL; 1673} 1674EXPORT_SYMBOL(sock_kmalloc); 1675 1676/* 1677 * Free an option memory block. 1678 */ 1679void sock_kfree_s(struct sock *sk, void *mem, int size) 1680{ 1681 kfree(mem); 1682 atomic_sub(size, &sk->sk_omem_alloc); 1683} 1684EXPORT_SYMBOL(sock_kfree_s); 1685 1686/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1687 I think, these locks should be removed for datagram sockets. 1688 */ 1689static long sock_wait_for_wmem(struct sock *sk, long timeo) 1690{ 1691 DEFINE_WAIT(wait); 1692 1693 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1694 for (;;) { 1695 if (!timeo) 1696 break; 1697 if (signal_pending(current)) 1698 break; 1699 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1700 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1701 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1702 break; 1703 if (sk->sk_shutdown & SEND_SHUTDOWN) 1704 break; 1705 if (sk->sk_err) 1706 break; 1707 timeo = schedule_timeout(timeo); 1708 } 1709 finish_wait(sk_sleep(sk), &wait); 1710 return timeo; 1711} 1712 1713 1714/* 1715 * Generic send/receive buffer handlers 1716 */ 1717 1718struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1719 unsigned long data_len, int noblock, 1720 int *errcode) 1721{ 1722 struct sk_buff *skb; 1723 gfp_t gfp_mask; 1724 long timeo; 1725 int err; 1726 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1727 1728 err = -EMSGSIZE; 1729 if (npages > MAX_SKB_FRAGS) 1730 goto failure; 1731 1732 gfp_mask = sk->sk_allocation; 1733 if (gfp_mask & __GFP_WAIT) 1734 gfp_mask |= __GFP_REPEAT; 1735 1736 timeo = sock_sndtimeo(sk, noblock); 1737 while (1) { 1738 err = sock_error(sk); 1739 if (err != 0) 1740 goto failure; 1741 1742 err = -EPIPE; 1743 if (sk->sk_shutdown & SEND_SHUTDOWN) 1744 goto failure; 1745 1746 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1747 skb = alloc_skb(header_len, gfp_mask); 1748 if (skb) { 1749 int i; 1750 1751 /* No pages, we're done... */ 1752 if (!data_len) 1753 break; 1754 1755 skb->truesize += data_len; 1756 skb_shinfo(skb)->nr_frags = npages; 1757 for (i = 0; i < npages; i++) { 1758 struct page *page; 1759 1760 page = alloc_pages(sk->sk_allocation, 0); 1761 if (!page) { 1762 err = -ENOBUFS; 1763 skb_shinfo(skb)->nr_frags = i; 1764 kfree_skb(skb); 1765 goto failure; 1766 } 1767 1768 __skb_fill_page_desc(skb, i, 1769 page, 0, 1770 (data_len >= PAGE_SIZE ? 1771 PAGE_SIZE : 1772 data_len)); 1773 data_len -= PAGE_SIZE; 1774 } 1775 1776 /* Full success... */ 1777 break; 1778 } 1779 err = -ENOBUFS; 1780 goto failure; 1781 } 1782 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1783 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1784 err = -EAGAIN; 1785 if (!timeo) 1786 goto failure; 1787 if (signal_pending(current)) 1788 goto interrupted; 1789 timeo = sock_wait_for_wmem(sk, timeo); 1790 } 1791 1792 skb_set_owner_w(skb, sk); 1793 return skb; 1794 1795interrupted: 1796 err = sock_intr_errno(timeo); 1797failure: 1798 *errcode = err; 1799 return NULL; 1800} 1801EXPORT_SYMBOL(sock_alloc_send_pskb); 1802 1803struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1804 int noblock, int *errcode) 1805{ 1806 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1807} 1808EXPORT_SYMBOL(sock_alloc_send_skb); 1809 1810/* On 32bit arches, an skb frag is limited to 2^15 */ 1811#define SKB_FRAG_PAGE_ORDER get_order(32768) 1812 1813bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1814{ 1815 int order; 1816 1817 if (pfrag->page) { 1818 if (atomic_read(&pfrag->page->_count) == 1) { 1819 pfrag->offset = 0; 1820 return true; 1821 } 1822 if (pfrag->offset < pfrag->size) 1823 return true; 1824 put_page(pfrag->page); 1825 } 1826 1827 /* We restrict high order allocations to users that can afford to wait */ 1828 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; 1829 1830 do { 1831 gfp_t gfp = sk->sk_allocation; 1832 1833 if (order) 1834 gfp |= __GFP_COMP | __GFP_NOWARN; 1835 pfrag->page = alloc_pages(gfp, order); 1836 if (likely(pfrag->page)) { 1837 pfrag->offset = 0; 1838 pfrag->size = PAGE_SIZE << order; 1839 return true; 1840 } 1841 } while (--order >= 0); 1842 1843 sk_enter_memory_pressure(sk); 1844 sk_stream_moderate_sndbuf(sk); 1845 return false; 1846} 1847EXPORT_SYMBOL(sk_page_frag_refill); 1848 1849static void __lock_sock(struct sock *sk) 1850 __releases(&sk->sk_lock.slock) 1851 __acquires(&sk->sk_lock.slock) 1852{ 1853 DEFINE_WAIT(wait); 1854 1855 for (;;) { 1856 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1857 TASK_UNINTERRUPTIBLE); 1858 spin_unlock_bh(&sk->sk_lock.slock); 1859 schedule(); 1860 spin_lock_bh(&sk->sk_lock.slock); 1861 if (!sock_owned_by_user(sk)) 1862 break; 1863 } 1864 finish_wait(&sk->sk_lock.wq, &wait); 1865} 1866 1867static void __release_sock(struct sock *sk) 1868 __releases(&sk->sk_lock.slock) 1869 __acquires(&sk->sk_lock.slock) 1870{ 1871 struct sk_buff *skb = sk->sk_backlog.head; 1872 1873 do { 1874 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1875 bh_unlock_sock(sk); 1876 1877 do { 1878 struct sk_buff *next = skb->next; 1879 1880 prefetch(next); 1881 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1882 skb->next = NULL; 1883 sk_backlog_rcv(sk, skb); 1884 1885 /* 1886 * We are in process context here with softirqs 1887 * disabled, use cond_resched_softirq() to preempt. 1888 * This is safe to do because we've taken the backlog 1889 * queue private: 1890 */ 1891 cond_resched_softirq(); 1892 1893 skb = next; 1894 } while (skb != NULL); 1895 1896 bh_lock_sock(sk); 1897 } while ((skb = sk->sk_backlog.head) != NULL); 1898 1899 /* 1900 * Doing the zeroing here guarantee we can not loop forever 1901 * while a wild producer attempts to flood us. 1902 */ 1903 sk->sk_backlog.len = 0; 1904} 1905 1906/** 1907 * sk_wait_data - wait for data to arrive at sk_receive_queue 1908 * @sk: sock to wait on 1909 * @timeo: for how long 1910 * 1911 * Now socket state including sk->sk_err is changed only under lock, 1912 * hence we may omit checks after joining wait queue. 1913 * We check receive queue before schedule() only as optimization; 1914 * it is very likely that release_sock() added new data. 1915 */ 1916int sk_wait_data(struct sock *sk, long *timeo) 1917{ 1918 int rc; 1919 DEFINE_WAIT(wait); 1920 1921 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1922 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1923 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1924 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1925 finish_wait(sk_sleep(sk), &wait); 1926 return rc; 1927} 1928EXPORT_SYMBOL(sk_wait_data); 1929 1930/** 1931 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1932 * @sk: socket 1933 * @size: memory size to allocate 1934 * @kind: allocation type 1935 * 1936 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1937 * rmem allocation. This function assumes that protocols which have 1938 * memory_pressure use sk_wmem_queued as write buffer accounting. 1939 */ 1940int __sk_mem_schedule(struct sock *sk, int size, int kind) 1941{ 1942 struct proto *prot = sk->sk_prot; 1943 int amt = sk_mem_pages(size); 1944 long allocated; 1945 int parent_status = UNDER_LIMIT; 1946 1947 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1948 1949 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1950 1951 /* Under limit. */ 1952 if (parent_status == UNDER_LIMIT && 1953 allocated <= sk_prot_mem_limits(sk, 0)) { 1954 sk_leave_memory_pressure(sk); 1955 return 1; 1956 } 1957 1958 /* Under pressure. (we or our parents) */ 1959 if ((parent_status > SOFT_LIMIT) || 1960 allocated > sk_prot_mem_limits(sk, 1)) 1961 sk_enter_memory_pressure(sk); 1962 1963 /* Over hard limit (we or our parents) */ 1964 if ((parent_status == OVER_LIMIT) || 1965 (allocated > sk_prot_mem_limits(sk, 2))) 1966 goto suppress_allocation; 1967 1968 /* guarantee minimum buffer size under pressure */ 1969 if (kind == SK_MEM_RECV) { 1970 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1971 return 1; 1972 1973 } else { /* SK_MEM_SEND */ 1974 if (sk->sk_type == SOCK_STREAM) { 1975 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1976 return 1; 1977 } else if (atomic_read(&sk->sk_wmem_alloc) < 1978 prot->sysctl_wmem[0]) 1979 return 1; 1980 } 1981 1982 if (sk_has_memory_pressure(sk)) { 1983 int alloc; 1984 1985 if (!sk_under_memory_pressure(sk)) 1986 return 1; 1987 alloc = sk_sockets_allocated_read_positive(sk); 1988 if (sk_prot_mem_limits(sk, 2) > alloc * 1989 sk_mem_pages(sk->sk_wmem_queued + 1990 atomic_read(&sk->sk_rmem_alloc) + 1991 sk->sk_forward_alloc)) 1992 return 1; 1993 } 1994 1995suppress_allocation: 1996 1997 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1998 sk_stream_moderate_sndbuf(sk); 1999 2000 /* Fail only if socket is _under_ its sndbuf. 2001 * In this case we cannot block, so that we have to fail. 2002 */ 2003 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 2004 return 1; 2005 } 2006 2007 trace_sock_exceed_buf_limit(sk, prot, allocated); 2008 2009 /* Alas. Undo changes. */ 2010 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 2011 2012 sk_memory_allocated_sub(sk, amt); 2013 2014 return 0; 2015} 2016EXPORT_SYMBOL(__sk_mem_schedule); 2017 2018/** 2019 * __sk_reclaim - reclaim memory_allocated 2020 * @sk: socket 2021 */ 2022void __sk_mem_reclaim(struct sock *sk) 2023{ 2024 sk_memory_allocated_sub(sk, 2025 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 2026 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 2027 2028 if (sk_under_memory_pressure(sk) && 2029 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 2030 sk_leave_memory_pressure(sk); 2031} 2032EXPORT_SYMBOL(__sk_mem_reclaim); 2033 2034 2035/* 2036 * Set of default routines for initialising struct proto_ops when 2037 * the protocol does not support a particular function. In certain 2038 * cases where it makes no sense for a protocol to have a "do nothing" 2039 * function, some default processing is provided. 2040 */ 2041 2042int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 2043{ 2044 return -EOPNOTSUPP; 2045} 2046EXPORT_SYMBOL(sock_no_bind); 2047 2048int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 2049 int len, int flags) 2050{ 2051 return -EOPNOTSUPP; 2052} 2053EXPORT_SYMBOL(sock_no_connect); 2054 2055int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 2056{ 2057 return -EOPNOTSUPP; 2058} 2059EXPORT_SYMBOL(sock_no_socketpair); 2060 2061int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2062{ 2063 return -EOPNOTSUPP; 2064} 2065EXPORT_SYMBOL(sock_no_accept); 2066 2067int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 2068 int *len, int peer) 2069{ 2070 return -EOPNOTSUPP; 2071} 2072EXPORT_SYMBOL(sock_no_getname); 2073 2074unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 2075{ 2076 return 0; 2077} 2078EXPORT_SYMBOL(sock_no_poll); 2079 2080int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2081{ 2082 return -EOPNOTSUPP; 2083} 2084EXPORT_SYMBOL(sock_no_ioctl); 2085 2086int sock_no_listen(struct socket *sock, int backlog) 2087{ 2088 return -EOPNOTSUPP; 2089} 2090EXPORT_SYMBOL(sock_no_listen); 2091 2092int sock_no_shutdown(struct socket *sock, int how) 2093{ 2094 return -EOPNOTSUPP; 2095} 2096EXPORT_SYMBOL(sock_no_shutdown); 2097 2098int sock_no_setsockopt(struct socket *sock, int level, int optname, 2099 char __user *optval, unsigned int optlen) 2100{ 2101 return -EOPNOTSUPP; 2102} 2103EXPORT_SYMBOL(sock_no_setsockopt); 2104 2105int sock_no_getsockopt(struct socket *sock, int level, int optname, 2106 char __user *optval, int __user *optlen) 2107{ 2108 return -EOPNOTSUPP; 2109} 2110EXPORT_SYMBOL(sock_no_getsockopt); 2111 2112int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2113 size_t len) 2114{ 2115 return -EOPNOTSUPP; 2116} 2117EXPORT_SYMBOL(sock_no_sendmsg); 2118 2119int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2120 size_t len, int flags) 2121{ 2122 return -EOPNOTSUPP; 2123} 2124EXPORT_SYMBOL(sock_no_recvmsg); 2125 2126int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2127{ 2128 /* Mirror missing mmap method error code */ 2129 return -ENODEV; 2130} 2131EXPORT_SYMBOL(sock_no_mmap); 2132 2133ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2134{ 2135 ssize_t res; 2136 struct msghdr msg = {.msg_flags = flags}; 2137 struct kvec iov; 2138 char *kaddr = kmap(page); 2139 iov.iov_base = kaddr + offset; 2140 iov.iov_len = size; 2141 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2142 kunmap(page); 2143 return res; 2144} 2145EXPORT_SYMBOL(sock_no_sendpage); 2146 2147/* 2148 * Default Socket Callbacks 2149 */ 2150 2151static void sock_def_wakeup(struct sock *sk) 2152{ 2153 struct socket_wq *wq; 2154 2155 rcu_read_lock(); 2156 wq = rcu_dereference(sk->sk_wq); 2157 if (wq_has_sleeper(wq)) 2158 wake_up_interruptible_all(&wq->wait); 2159 rcu_read_unlock(); 2160} 2161 2162static void sock_def_error_report(struct sock *sk) 2163{ 2164 struct socket_wq *wq; 2165 2166 rcu_read_lock(); 2167 wq = rcu_dereference(sk->sk_wq); 2168 if (wq_has_sleeper(wq)) 2169 wake_up_interruptible_poll(&wq->wait, POLLERR); 2170 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2171 rcu_read_unlock(); 2172} 2173 2174static void sock_def_readable(struct sock *sk, int len) 2175{ 2176 struct socket_wq *wq; 2177 2178 rcu_read_lock(); 2179 wq = rcu_dereference(sk->sk_wq); 2180 if (wq_has_sleeper(wq)) 2181 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2182 POLLRDNORM | POLLRDBAND); 2183 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2184 rcu_read_unlock(); 2185} 2186 2187static void sock_def_write_space(struct sock *sk) 2188{ 2189 struct socket_wq *wq; 2190 2191 rcu_read_lock(); 2192 2193 /* Do not wake up a writer until he can make "significant" 2194 * progress. --DaveM 2195 */ 2196 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2197 wq = rcu_dereference(sk->sk_wq); 2198 if (wq_has_sleeper(wq)) 2199 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2200 POLLWRNORM | POLLWRBAND); 2201 2202 /* Should agree with poll, otherwise some programs break */ 2203 if (sock_writeable(sk)) 2204 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2205 } 2206 2207 rcu_read_unlock(); 2208} 2209 2210static void sock_def_destruct(struct sock *sk) 2211{ 2212 kfree(sk->sk_protinfo); 2213} 2214 2215void sk_send_sigurg(struct sock *sk) 2216{ 2217 if (sk->sk_socket && sk->sk_socket->file) 2218 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2219 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2220} 2221EXPORT_SYMBOL(sk_send_sigurg); 2222 2223void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2224 unsigned long expires) 2225{ 2226 if (!mod_timer(timer, expires)) 2227 sock_hold(sk); 2228} 2229EXPORT_SYMBOL(sk_reset_timer); 2230 2231void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2232{ 2233 if (del_timer(timer)) 2234 __sock_put(sk); 2235} 2236EXPORT_SYMBOL(sk_stop_timer); 2237 2238void sock_init_data(struct socket *sock, struct sock *sk) 2239{ 2240 skb_queue_head_init(&sk->sk_receive_queue); 2241 skb_queue_head_init(&sk->sk_write_queue); 2242 skb_queue_head_init(&sk->sk_error_queue); 2243#ifdef CONFIG_NET_DMA 2244 skb_queue_head_init(&sk->sk_async_wait_queue); 2245#endif 2246 2247 sk->sk_send_head = NULL; 2248 2249 init_timer(&sk->sk_timer); 2250 2251 sk->sk_allocation = GFP_KERNEL; 2252 sk->sk_rcvbuf = sysctl_rmem_default; 2253 sk->sk_sndbuf = sysctl_wmem_default; 2254 sk->sk_state = TCP_CLOSE; 2255 sk_set_socket(sk, sock); 2256 2257 sock_set_flag(sk, SOCK_ZAPPED); 2258 2259 if (sock) { 2260 sk->sk_type = sock->type; 2261 sk->sk_wq = sock->wq; 2262 sock->sk = sk; 2263 } else 2264 sk->sk_wq = NULL; 2265 2266 spin_lock_init(&sk->sk_dst_lock); 2267 rwlock_init(&sk->sk_callback_lock); 2268 lockdep_set_class_and_name(&sk->sk_callback_lock, 2269 af_callback_keys + sk->sk_family, 2270 af_family_clock_key_strings[sk->sk_family]); 2271 2272 sk->sk_state_change = sock_def_wakeup; 2273 sk->sk_data_ready = sock_def_readable; 2274 sk->sk_write_space = sock_def_write_space; 2275 sk->sk_error_report = sock_def_error_report; 2276 sk->sk_destruct = sock_def_destruct; 2277 2278 sk->sk_frag.page = NULL; 2279 sk->sk_frag.offset = 0; 2280 sk->sk_peek_off = -1; 2281 2282 sk->sk_peer_pid = NULL; 2283 sk->sk_peer_cred = NULL; 2284 sk->sk_write_pending = 0; 2285 sk->sk_rcvlowat = 1; 2286 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2287 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2288 2289 sk->sk_stamp = ktime_set(-1L, 0); 2290 2291 /* 2292 * Before updating sk_refcnt, we must commit prior changes to memory 2293 * (Documentation/RCU/rculist_nulls.txt for details) 2294 */ 2295 smp_wmb(); 2296 atomic_set(&sk->sk_refcnt, 1); 2297 atomic_set(&sk->sk_drops, 0); 2298} 2299EXPORT_SYMBOL(sock_init_data); 2300 2301void lock_sock_nested(struct sock *sk, int subclass) 2302{ 2303 might_sleep(); 2304 spin_lock_bh(&sk->sk_lock.slock); 2305 if (sk->sk_lock.owned) 2306 __lock_sock(sk); 2307 sk->sk_lock.owned = 1; 2308 spin_unlock(&sk->sk_lock.slock); 2309 /* 2310 * The sk_lock has mutex_lock() semantics here: 2311 */ 2312 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2313 local_bh_enable(); 2314} 2315EXPORT_SYMBOL(lock_sock_nested); 2316 2317void release_sock(struct sock *sk) 2318{ 2319 /* 2320 * The sk_lock has mutex_unlock() semantics: 2321 */ 2322 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2323 2324 spin_lock_bh(&sk->sk_lock.slock); 2325 if (sk->sk_backlog.tail) 2326 __release_sock(sk); 2327 2328 if (sk->sk_prot->release_cb) 2329 sk->sk_prot->release_cb(sk); 2330 2331 sk->sk_lock.owned = 0; 2332 if (waitqueue_active(&sk->sk_lock.wq)) 2333 wake_up(&sk->sk_lock.wq); 2334 spin_unlock_bh(&sk->sk_lock.slock); 2335} 2336EXPORT_SYMBOL(release_sock); 2337 2338/** 2339 * lock_sock_fast - fast version of lock_sock 2340 * @sk: socket 2341 * 2342 * This version should be used for very small section, where process wont block 2343 * return false if fast path is taken 2344 * sk_lock.slock locked, owned = 0, BH disabled 2345 * return true if slow path is taken 2346 * sk_lock.slock unlocked, owned = 1, BH enabled 2347 */ 2348bool lock_sock_fast(struct sock *sk) 2349{ 2350 might_sleep(); 2351 spin_lock_bh(&sk->sk_lock.slock); 2352 2353 if (!sk->sk_lock.owned) 2354 /* 2355 * Note : We must disable BH 2356 */ 2357 return false; 2358 2359 __lock_sock(sk); 2360 sk->sk_lock.owned = 1; 2361 spin_unlock(&sk->sk_lock.slock); 2362 /* 2363 * The sk_lock has mutex_lock() semantics here: 2364 */ 2365 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2366 local_bh_enable(); 2367 return true; 2368} 2369EXPORT_SYMBOL(lock_sock_fast); 2370 2371int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2372{ 2373 struct timeval tv; 2374 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2375 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2376 tv = ktime_to_timeval(sk->sk_stamp); 2377 if (tv.tv_sec == -1) 2378 return -ENOENT; 2379 if (tv.tv_sec == 0) { 2380 sk->sk_stamp = ktime_get_real(); 2381 tv = ktime_to_timeval(sk->sk_stamp); 2382 } 2383 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2384} 2385EXPORT_SYMBOL(sock_get_timestamp); 2386 2387int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2388{ 2389 struct timespec ts; 2390 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2391 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2392 ts = ktime_to_timespec(sk->sk_stamp); 2393 if (ts.tv_sec == -1) 2394 return -ENOENT; 2395 if (ts.tv_sec == 0) { 2396 sk->sk_stamp = ktime_get_real(); 2397 ts = ktime_to_timespec(sk->sk_stamp); 2398 } 2399 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2400} 2401EXPORT_SYMBOL(sock_get_timestampns); 2402 2403void sock_enable_timestamp(struct sock *sk, int flag) 2404{ 2405 if (!sock_flag(sk, flag)) { 2406 unsigned long previous_flags = sk->sk_flags; 2407 2408 sock_set_flag(sk, flag); 2409 /* 2410 * we just set one of the two flags which require net 2411 * time stamping, but time stamping might have been on 2412 * already because of the other one 2413 */ 2414 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2415 net_enable_timestamp(); 2416 } 2417} 2418 2419/* 2420 * Get a socket option on an socket. 2421 * 2422 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2423 * asynchronous errors should be reported by getsockopt. We assume 2424 * this means if you specify SO_ERROR (otherwise whats the point of it). 2425 */ 2426int sock_common_getsockopt(struct socket *sock, int level, int optname, 2427 char __user *optval, int __user *optlen) 2428{ 2429 struct sock *sk = sock->sk; 2430 2431 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2432} 2433EXPORT_SYMBOL(sock_common_getsockopt); 2434 2435#ifdef CONFIG_COMPAT 2436int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2437 char __user *optval, int __user *optlen) 2438{ 2439 struct sock *sk = sock->sk; 2440 2441 if (sk->sk_prot->compat_getsockopt != NULL) 2442 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2443 optval, optlen); 2444 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2445} 2446EXPORT_SYMBOL(compat_sock_common_getsockopt); 2447#endif 2448 2449int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2450 struct msghdr *msg, size_t size, int flags) 2451{ 2452 struct sock *sk = sock->sk; 2453 int addr_len = 0; 2454 int err; 2455 2456 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2457 flags & ~MSG_DONTWAIT, &addr_len); 2458 if (err >= 0) 2459 msg->msg_namelen = addr_len; 2460 return err; 2461} 2462EXPORT_SYMBOL(sock_common_recvmsg); 2463 2464/* 2465 * Set socket options on an inet socket. 2466 */ 2467int sock_common_setsockopt(struct socket *sock, int level, int optname, 2468 char __user *optval, unsigned int optlen) 2469{ 2470 struct sock *sk = sock->sk; 2471 2472 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2473} 2474EXPORT_SYMBOL(sock_common_setsockopt); 2475 2476#ifdef CONFIG_COMPAT 2477int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2478 char __user *optval, unsigned int optlen) 2479{ 2480 struct sock *sk = sock->sk; 2481 2482 if (sk->sk_prot->compat_setsockopt != NULL) 2483 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2484 optval, optlen); 2485 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2486} 2487EXPORT_SYMBOL(compat_sock_common_setsockopt); 2488#endif 2489 2490void sk_common_release(struct sock *sk) 2491{ 2492 if (sk->sk_prot->destroy) 2493 sk->sk_prot->destroy(sk); 2494 2495 /* 2496 * Observation: when sock_common_release is called, processes have 2497 * no access to socket. But net still has. 2498 * Step one, detach it from networking: 2499 * 2500 * A. Remove from hash tables. 2501 */ 2502 2503 sk->sk_prot->unhash(sk); 2504 2505 /* 2506 * In this point socket cannot receive new packets, but it is possible 2507 * that some packets are in flight because some CPU runs receiver and 2508 * did hash table lookup before we unhashed socket. They will achieve 2509 * receive queue and will be purged by socket destructor. 2510 * 2511 * Also we still have packets pending on receive queue and probably, 2512 * our own packets waiting in device queues. sock_destroy will drain 2513 * receive queue, but transmitted packets will delay socket destruction 2514 * until the last reference will be released. 2515 */ 2516 2517 sock_orphan(sk); 2518 2519 xfrm_sk_free_policy(sk); 2520 2521 sk_refcnt_debug_release(sk); 2522 2523 if (sk->sk_frag.page) { 2524 put_page(sk->sk_frag.page); 2525 sk->sk_frag.page = NULL; 2526 } 2527 2528 sock_put(sk); 2529} 2530EXPORT_SYMBOL(sk_common_release); 2531 2532#ifdef CONFIG_PROC_FS 2533#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2534struct prot_inuse { 2535 int val[PROTO_INUSE_NR]; 2536}; 2537 2538static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2539 2540#ifdef CONFIG_NET_NS 2541void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2542{ 2543 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2544} 2545EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2546 2547int sock_prot_inuse_get(struct net *net, struct proto *prot) 2548{ 2549 int cpu, idx = prot->inuse_idx; 2550 int res = 0; 2551 2552 for_each_possible_cpu(cpu) 2553 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2554 2555 return res >= 0 ? res : 0; 2556} 2557EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2558 2559static int __net_init sock_inuse_init_net(struct net *net) 2560{ 2561 net->core.inuse = alloc_percpu(struct prot_inuse); 2562 return net->core.inuse ? 0 : -ENOMEM; 2563} 2564 2565static void __net_exit sock_inuse_exit_net(struct net *net) 2566{ 2567 free_percpu(net->core.inuse); 2568} 2569 2570static struct pernet_operations net_inuse_ops = { 2571 .init = sock_inuse_init_net, 2572 .exit = sock_inuse_exit_net, 2573}; 2574 2575static __init int net_inuse_init(void) 2576{ 2577 if (register_pernet_subsys(&net_inuse_ops)) 2578 panic("Cannot initialize net inuse counters"); 2579 2580 return 0; 2581} 2582 2583core_initcall(net_inuse_init); 2584#else 2585static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2586 2587void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2588{ 2589 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2590} 2591EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2592 2593int sock_prot_inuse_get(struct net *net, struct proto *prot) 2594{ 2595 int cpu, idx = prot->inuse_idx; 2596 int res = 0; 2597 2598 for_each_possible_cpu(cpu) 2599 res += per_cpu(prot_inuse, cpu).val[idx]; 2600 2601 return res >= 0 ? res : 0; 2602} 2603EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2604#endif 2605 2606static void assign_proto_idx(struct proto *prot) 2607{ 2608 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2609 2610 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2611 pr_err("PROTO_INUSE_NR exhausted\n"); 2612 return; 2613 } 2614 2615 set_bit(prot->inuse_idx, proto_inuse_idx); 2616} 2617 2618static void release_proto_idx(struct proto *prot) 2619{ 2620 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2621 clear_bit(prot->inuse_idx, proto_inuse_idx); 2622} 2623#else 2624static inline void assign_proto_idx(struct proto *prot) 2625{ 2626} 2627 2628static inline void release_proto_idx(struct proto *prot) 2629{ 2630} 2631#endif 2632 2633int proto_register(struct proto *prot, int alloc_slab) 2634{ 2635 if (alloc_slab) { 2636 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2637 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2638 NULL); 2639 2640 if (prot->slab == NULL) { 2641 pr_crit("%s: Can't create sock SLAB cache!\n", 2642 prot->name); 2643 goto out; 2644 } 2645 2646 if (prot->rsk_prot != NULL) { 2647 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2648 if (prot->rsk_prot->slab_name == NULL) 2649 goto out_free_sock_slab; 2650 2651 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2652 prot->rsk_prot->obj_size, 0, 2653 SLAB_HWCACHE_ALIGN, NULL); 2654 2655 if (prot->rsk_prot->slab == NULL) { 2656 pr_crit("%s: Can't create request sock SLAB cache!\n", 2657 prot->name); 2658 goto out_free_request_sock_slab_name; 2659 } 2660 } 2661 2662 if (prot->twsk_prot != NULL) { 2663 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2664 2665 if (prot->twsk_prot->twsk_slab_name == NULL) 2666 goto out_free_request_sock_slab; 2667 2668 prot->twsk_prot->twsk_slab = 2669 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2670 prot->twsk_prot->twsk_obj_size, 2671 0, 2672 SLAB_HWCACHE_ALIGN | 2673 prot->slab_flags, 2674 NULL); 2675 if (prot->twsk_prot->twsk_slab == NULL) 2676 goto out_free_timewait_sock_slab_name; 2677 } 2678 } 2679 2680 mutex_lock(&proto_list_mutex); 2681 list_add(&prot->node, &proto_list); 2682 assign_proto_idx(prot); 2683 mutex_unlock(&proto_list_mutex); 2684 return 0; 2685 2686out_free_timewait_sock_slab_name: 2687 kfree(prot->twsk_prot->twsk_slab_name); 2688out_free_request_sock_slab: 2689 if (prot->rsk_prot && prot->rsk_prot->slab) { 2690 kmem_cache_destroy(prot->rsk_prot->slab); 2691 prot->rsk_prot->slab = NULL; 2692 } 2693out_free_request_sock_slab_name: 2694 if (prot->rsk_prot) 2695 kfree(prot->rsk_prot->slab_name); 2696out_free_sock_slab: 2697 kmem_cache_destroy(prot->slab); 2698 prot->slab = NULL; 2699out: 2700 return -ENOBUFS; 2701} 2702EXPORT_SYMBOL(proto_register); 2703 2704void proto_unregister(struct proto *prot) 2705{ 2706 mutex_lock(&proto_list_mutex); 2707 release_proto_idx(prot); 2708 list_del(&prot->node); 2709 mutex_unlock(&proto_list_mutex); 2710 2711 if (prot->slab != NULL) { 2712 kmem_cache_destroy(prot->slab); 2713 prot->slab = NULL; 2714 } 2715 2716 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2717 kmem_cache_destroy(prot->rsk_prot->slab); 2718 kfree(prot->rsk_prot->slab_name); 2719 prot->rsk_prot->slab = NULL; 2720 } 2721 2722 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2723 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2724 kfree(prot->twsk_prot->twsk_slab_name); 2725 prot->twsk_prot->twsk_slab = NULL; 2726 } 2727} 2728EXPORT_SYMBOL(proto_unregister); 2729 2730#ifdef CONFIG_PROC_FS 2731static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2732 __acquires(proto_list_mutex) 2733{ 2734 mutex_lock(&proto_list_mutex); 2735 return seq_list_start_head(&proto_list, *pos); 2736} 2737 2738static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2739{ 2740 return seq_list_next(v, &proto_list, pos); 2741} 2742 2743static void proto_seq_stop(struct seq_file *seq, void *v) 2744 __releases(proto_list_mutex) 2745{ 2746 mutex_unlock(&proto_list_mutex); 2747} 2748 2749static char proto_method_implemented(const void *method) 2750{ 2751 return method == NULL ? 'n' : 'y'; 2752} 2753static long sock_prot_memory_allocated(struct proto *proto) 2754{ 2755 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 2756} 2757 2758static char *sock_prot_memory_pressure(struct proto *proto) 2759{ 2760 return proto->memory_pressure != NULL ? 2761 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2762} 2763 2764static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2765{ 2766 2767 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2768 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2769 proto->name, 2770 proto->obj_size, 2771 sock_prot_inuse_get(seq_file_net(seq), proto), 2772 sock_prot_memory_allocated(proto), 2773 sock_prot_memory_pressure(proto), 2774 proto->max_header, 2775 proto->slab == NULL ? "no" : "yes", 2776 module_name(proto->owner), 2777 proto_method_implemented(proto->close), 2778 proto_method_implemented(proto->connect), 2779 proto_method_implemented(proto->disconnect), 2780 proto_method_implemented(proto->accept), 2781 proto_method_implemented(proto->ioctl), 2782 proto_method_implemented(proto->init), 2783 proto_method_implemented(proto->destroy), 2784 proto_method_implemented(proto->shutdown), 2785 proto_method_implemented(proto->setsockopt), 2786 proto_method_implemented(proto->getsockopt), 2787 proto_method_implemented(proto->sendmsg), 2788 proto_method_implemented(proto->recvmsg), 2789 proto_method_implemented(proto->sendpage), 2790 proto_method_implemented(proto->bind), 2791 proto_method_implemented(proto->backlog_rcv), 2792 proto_method_implemented(proto->hash), 2793 proto_method_implemented(proto->unhash), 2794 proto_method_implemented(proto->get_port), 2795 proto_method_implemented(proto->enter_memory_pressure)); 2796} 2797 2798static int proto_seq_show(struct seq_file *seq, void *v) 2799{ 2800 if (v == &proto_list) 2801 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2802 "protocol", 2803 "size", 2804 "sockets", 2805 "memory", 2806 "press", 2807 "maxhdr", 2808 "slab", 2809 "module", 2810 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2811 else 2812 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2813 return 0; 2814} 2815 2816static const struct seq_operations proto_seq_ops = { 2817 .start = proto_seq_start, 2818 .next = proto_seq_next, 2819 .stop = proto_seq_stop, 2820 .show = proto_seq_show, 2821}; 2822 2823static int proto_seq_open(struct inode *inode, struct file *file) 2824{ 2825 return seq_open_net(inode, file, &proto_seq_ops, 2826 sizeof(struct seq_net_private)); 2827} 2828 2829static const struct file_operations proto_seq_fops = { 2830 .owner = THIS_MODULE, 2831 .open = proto_seq_open, 2832 .read = seq_read, 2833 .llseek = seq_lseek, 2834 .release = seq_release_net, 2835}; 2836 2837static __net_init int proto_init_net(struct net *net) 2838{ 2839 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2840 return -ENOMEM; 2841 2842 return 0; 2843} 2844 2845static __net_exit void proto_exit_net(struct net *net) 2846{ 2847 proc_net_remove(net, "protocols"); 2848} 2849 2850 2851static __net_initdata struct pernet_operations proto_net_ops = { 2852 .init = proto_init_net, 2853 .exit = proto_exit_net, 2854}; 2855 2856static int __init proto_init(void) 2857{ 2858 return register_pernet_subsys(&proto_net_ops); 2859} 2860 2861subsys_initcall(proto_init); 2862 2863#endif /* PROC_FS */ 2864