sock.c revision e812347ccf9e8ce073b0ba0c49d03b124707b2b4
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 93 94#include <linux/capability.h> 95#include <linux/errno.h> 96#include <linux/types.h> 97#include <linux/socket.h> 98#include <linux/in.h> 99#include <linux/kernel.h> 100#include <linux/module.h> 101#include <linux/proc_fs.h> 102#include <linux/seq_file.h> 103#include <linux/sched.h> 104#include <linux/timer.h> 105#include <linux/string.h> 106#include <linux/sockios.h> 107#include <linux/net.h> 108#include <linux/mm.h> 109#include <linux/slab.h> 110#include <linux/interrupt.h> 111#include <linux/poll.h> 112#include <linux/tcp.h> 113#include <linux/init.h> 114#include <linux/highmem.h> 115#include <linux/user_namespace.h> 116#include <linux/static_key.h> 117#include <linux/memcontrol.h> 118#include <linux/prefetch.h> 119 120#include <asm/uaccess.h> 121 122#include <linux/netdevice.h> 123#include <net/protocol.h> 124#include <linux/skbuff.h> 125#include <net/net_namespace.h> 126#include <net/request_sock.h> 127#include <net/sock.h> 128#include <linux/net_tstamp.h> 129#include <net/xfrm.h> 130#include <linux/ipsec.h> 131#include <net/cls_cgroup.h> 132#include <net/netprio_cgroup.h> 133 134#include <linux/filter.h> 135 136#include <trace/events/sock.h> 137 138#ifdef CONFIG_INET 139#include <net/tcp.h> 140#endif 141 142static DEFINE_MUTEX(proto_list_mutex); 143static LIST_HEAD(proto_list); 144 145#ifdef CONFIG_MEMCG_KMEM 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 147{ 148 struct proto *proto; 149 int ret = 0; 150 151 mutex_lock(&proto_list_mutex); 152 list_for_each_entry(proto, &proto_list, node) { 153 if (proto->init_cgroup) { 154 ret = proto->init_cgroup(memcg, ss); 155 if (ret) 156 goto out; 157 } 158 } 159 160 mutex_unlock(&proto_list_mutex); 161 return ret; 162out: 163 list_for_each_entry_continue_reverse(proto, &proto_list, node) 164 if (proto->destroy_cgroup) 165 proto->destroy_cgroup(memcg); 166 mutex_unlock(&proto_list_mutex); 167 return ret; 168} 169 170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 171{ 172 struct proto *proto; 173 174 mutex_lock(&proto_list_mutex); 175 list_for_each_entry_reverse(proto, &proto_list, node) 176 if (proto->destroy_cgroup) 177 proto->destroy_cgroup(memcg); 178 mutex_unlock(&proto_list_mutex); 179} 180#endif 181 182/* 183 * Each address family might have different locking rules, so we have 184 * one slock key per address family: 185 */ 186static struct lock_class_key af_family_keys[AF_MAX]; 187static struct lock_class_key af_family_slock_keys[AF_MAX]; 188 189struct static_key memcg_socket_limit_enabled; 190EXPORT_SYMBOL(memcg_socket_limit_enabled); 191 192/* 193 * Make lock validator output more readable. (we pre-construct these 194 * strings build-time, so that runtime initialization of socket 195 * locks is fast): 196 */ 197static const char *const af_family_key_strings[AF_MAX+1] = { 198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 212}; 213static const char *const af_family_slock_key_strings[AF_MAX+1] = { 214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 223 "slock-27" , "slock-28" , "slock-AF_CAN" , 224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 227 "slock-AF_NFC" , "slock-AF_MAX" 228}; 229static const char *const af_family_clock_key_strings[AF_MAX+1] = { 230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 239 "clock-27" , "clock-28" , "clock-AF_CAN" , 240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 243 "clock-AF_NFC" , "clock-AF_MAX" 244}; 245 246/* 247 * sk_callback_lock locking rules are per-address-family, 248 * so split the lock classes by using a per-AF key: 249 */ 250static struct lock_class_key af_callback_keys[AF_MAX]; 251 252/* Take into consideration the size of the struct sk_buff overhead in the 253 * determination of these values, since that is non-constant across 254 * platforms. This makes socket queueing behavior and performance 255 * not depend upon such differences. 256 */ 257#define _SK_MEM_PACKETS 256 258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 261 262/* Run time adjustable parameters. */ 263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 264EXPORT_SYMBOL(sysctl_wmem_max); 265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 266EXPORT_SYMBOL(sysctl_rmem_max); 267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 269 270/* Maximal space eaten by iovec or ancillary data plus some space */ 271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 272EXPORT_SYMBOL(sysctl_optmem_max); 273 274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; 275EXPORT_SYMBOL_GPL(memalloc_socks); 276 277/** 278 * sk_set_memalloc - sets %SOCK_MEMALLOC 279 * @sk: socket to set it on 280 * 281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 282 * It's the responsibility of the admin to adjust min_free_kbytes 283 * to meet the requirements 284 */ 285void sk_set_memalloc(struct sock *sk) 286{ 287 sock_set_flag(sk, SOCK_MEMALLOC); 288 sk->sk_allocation |= __GFP_MEMALLOC; 289 static_key_slow_inc(&memalloc_socks); 290} 291EXPORT_SYMBOL_GPL(sk_set_memalloc); 292 293void sk_clear_memalloc(struct sock *sk) 294{ 295 sock_reset_flag(sk, SOCK_MEMALLOC); 296 sk->sk_allocation &= ~__GFP_MEMALLOC; 297 static_key_slow_dec(&memalloc_socks); 298 299 /* 300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while 302 * it has rmem allocations there is a risk that the user of the 303 * socket cannot make forward progress due to exceeding the rmem 304 * limits. By rights, sk_clear_memalloc() should only be called 305 * on sockets being torn down but warn and reset the accounting if 306 * that assumption breaks. 307 */ 308 if (WARN_ON(sk->sk_forward_alloc)) 309 sk_mem_reclaim(sk); 310} 311EXPORT_SYMBOL_GPL(sk_clear_memalloc); 312 313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 314{ 315 int ret; 316 unsigned long pflags = current->flags; 317 318 /* these should have been dropped before queueing */ 319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 320 321 current->flags |= PF_MEMALLOC; 322 ret = sk->sk_backlog_rcv(sk, skb); 323 tsk_restore_flags(current, pflags, PF_MEMALLOC); 324 325 return ret; 326} 327EXPORT_SYMBOL(__sk_backlog_rcv); 328 329#if defined(CONFIG_CGROUPS) 330#if !defined(CONFIG_NET_CLS_CGROUP) 331int net_cls_subsys_id = -1; 332EXPORT_SYMBOL_GPL(net_cls_subsys_id); 333#endif 334#if !defined(CONFIG_NETPRIO_CGROUP) 335int net_prio_subsys_id = -1; 336EXPORT_SYMBOL_GPL(net_prio_subsys_id); 337#endif 338#endif 339 340static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 341{ 342 struct timeval tv; 343 344 if (optlen < sizeof(tv)) 345 return -EINVAL; 346 if (copy_from_user(&tv, optval, sizeof(tv))) 347 return -EFAULT; 348 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 349 return -EDOM; 350 351 if (tv.tv_sec < 0) { 352 static int warned __read_mostly; 353 354 *timeo_p = 0; 355 if (warned < 10 && net_ratelimit()) { 356 warned++; 357 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 358 __func__, current->comm, task_pid_nr(current)); 359 } 360 return 0; 361 } 362 *timeo_p = MAX_SCHEDULE_TIMEOUT; 363 if (tv.tv_sec == 0 && tv.tv_usec == 0) 364 return 0; 365 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 366 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 367 return 0; 368} 369 370static void sock_warn_obsolete_bsdism(const char *name) 371{ 372 static int warned; 373 static char warncomm[TASK_COMM_LEN]; 374 if (strcmp(warncomm, current->comm) && warned < 5) { 375 strcpy(warncomm, current->comm); 376 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 377 warncomm, name); 378 warned++; 379 } 380} 381 382#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 383 384static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 385{ 386 if (sk->sk_flags & flags) { 387 sk->sk_flags &= ~flags; 388 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 389 net_disable_timestamp(); 390 } 391} 392 393 394int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 395{ 396 int err; 397 int skb_len; 398 unsigned long flags; 399 struct sk_buff_head *list = &sk->sk_receive_queue; 400 401 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 402 atomic_inc(&sk->sk_drops); 403 trace_sock_rcvqueue_full(sk, skb); 404 return -ENOMEM; 405 } 406 407 err = sk_filter(sk, skb); 408 if (err) 409 return err; 410 411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 412 atomic_inc(&sk->sk_drops); 413 return -ENOBUFS; 414 } 415 416 skb->dev = NULL; 417 skb_set_owner_r(skb, sk); 418 419 /* Cache the SKB length before we tack it onto the receive 420 * queue. Once it is added it no longer belongs to us and 421 * may be freed by other threads of control pulling packets 422 * from the queue. 423 */ 424 skb_len = skb->len; 425 426 /* we escape from rcu protected region, make sure we dont leak 427 * a norefcounted dst 428 */ 429 skb_dst_force(skb); 430 431 spin_lock_irqsave(&list->lock, flags); 432 skb->dropcount = atomic_read(&sk->sk_drops); 433 __skb_queue_tail(list, skb); 434 spin_unlock_irqrestore(&list->lock, flags); 435 436 if (!sock_flag(sk, SOCK_DEAD)) 437 sk->sk_data_ready(sk, skb_len); 438 return 0; 439} 440EXPORT_SYMBOL(sock_queue_rcv_skb); 441 442int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 443{ 444 int rc = NET_RX_SUCCESS; 445 446 if (sk_filter(sk, skb)) 447 goto discard_and_relse; 448 449 skb->dev = NULL; 450 451 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { 452 atomic_inc(&sk->sk_drops); 453 goto discard_and_relse; 454 } 455 if (nested) 456 bh_lock_sock_nested(sk); 457 else 458 bh_lock_sock(sk); 459 if (!sock_owned_by_user(sk)) { 460 /* 461 * trylock + unlock semantics: 462 */ 463 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 464 465 rc = sk_backlog_rcv(sk, skb); 466 467 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 468 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 469 bh_unlock_sock(sk); 470 atomic_inc(&sk->sk_drops); 471 goto discard_and_relse; 472 } 473 474 bh_unlock_sock(sk); 475out: 476 sock_put(sk); 477 return rc; 478discard_and_relse: 479 kfree_skb(skb); 480 goto out; 481} 482EXPORT_SYMBOL(sk_receive_skb); 483 484void sk_reset_txq(struct sock *sk) 485{ 486 sk_tx_queue_clear(sk); 487} 488EXPORT_SYMBOL(sk_reset_txq); 489 490struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 491{ 492 struct dst_entry *dst = __sk_dst_get(sk); 493 494 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 495 sk_tx_queue_clear(sk); 496 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 497 dst_release(dst); 498 return NULL; 499 } 500 501 return dst; 502} 503EXPORT_SYMBOL(__sk_dst_check); 504 505struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 506{ 507 struct dst_entry *dst = sk_dst_get(sk); 508 509 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 510 sk_dst_reset(sk); 511 dst_release(dst); 512 return NULL; 513 } 514 515 return dst; 516} 517EXPORT_SYMBOL(sk_dst_check); 518 519static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 520{ 521 int ret = -ENOPROTOOPT; 522#ifdef CONFIG_NETDEVICES 523 struct net *net = sock_net(sk); 524 char devname[IFNAMSIZ]; 525 int index; 526 527 /* Sorry... */ 528 ret = -EPERM; 529 if (!capable(CAP_NET_RAW)) 530 goto out; 531 532 ret = -EINVAL; 533 if (optlen < 0) 534 goto out; 535 536 /* Bind this socket to a particular device like "eth0", 537 * as specified in the passed interface name. If the 538 * name is "" or the option length is zero the socket 539 * is not bound. 540 */ 541 if (optlen > IFNAMSIZ - 1) 542 optlen = IFNAMSIZ - 1; 543 memset(devname, 0, sizeof(devname)); 544 545 ret = -EFAULT; 546 if (copy_from_user(devname, optval, optlen)) 547 goto out; 548 549 index = 0; 550 if (devname[0] != '\0') { 551 struct net_device *dev; 552 553 rcu_read_lock(); 554 dev = dev_get_by_name_rcu(net, devname); 555 if (dev) 556 index = dev->ifindex; 557 rcu_read_unlock(); 558 ret = -ENODEV; 559 if (!dev) 560 goto out; 561 } 562 563 lock_sock(sk); 564 sk->sk_bound_dev_if = index; 565 sk_dst_reset(sk); 566 release_sock(sk); 567 568 ret = 0; 569 570out: 571#endif 572 573 return ret; 574} 575 576static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 577{ 578 if (valbool) 579 sock_set_flag(sk, bit); 580 else 581 sock_reset_flag(sk, bit); 582} 583 584/* 585 * This is meant for all protocols to use and covers goings on 586 * at the socket level. Everything here is generic. 587 */ 588 589int sock_setsockopt(struct socket *sock, int level, int optname, 590 char __user *optval, unsigned int optlen) 591{ 592 struct sock *sk = sock->sk; 593 int val; 594 int valbool; 595 struct linger ling; 596 int ret = 0; 597 598 /* 599 * Options without arguments 600 */ 601 602 if (optname == SO_BINDTODEVICE) 603 return sock_bindtodevice(sk, optval, optlen); 604 605 if (optlen < sizeof(int)) 606 return -EINVAL; 607 608 if (get_user(val, (int __user *)optval)) 609 return -EFAULT; 610 611 valbool = val ? 1 : 0; 612 613 lock_sock(sk); 614 615 switch (optname) { 616 case SO_DEBUG: 617 if (val && !capable(CAP_NET_ADMIN)) 618 ret = -EACCES; 619 else 620 sock_valbool_flag(sk, SOCK_DBG, valbool); 621 break; 622 case SO_REUSEADDR: 623 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 624 break; 625 case SO_TYPE: 626 case SO_PROTOCOL: 627 case SO_DOMAIN: 628 case SO_ERROR: 629 ret = -ENOPROTOOPT; 630 break; 631 case SO_DONTROUTE: 632 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 633 break; 634 case SO_BROADCAST: 635 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 636 break; 637 case SO_SNDBUF: 638 /* Don't error on this BSD doesn't and if you think 639 * about it this is right. Otherwise apps have to 640 * play 'guess the biggest size' games. RCVBUF/SNDBUF 641 * are treated in BSD as hints 642 */ 643 val = min_t(u32, val, sysctl_wmem_max); 644set_sndbuf: 645 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 646 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 647 /* Wake up sending tasks if we upped the value. */ 648 sk->sk_write_space(sk); 649 break; 650 651 case SO_SNDBUFFORCE: 652 if (!capable(CAP_NET_ADMIN)) { 653 ret = -EPERM; 654 break; 655 } 656 goto set_sndbuf; 657 658 case SO_RCVBUF: 659 /* Don't error on this BSD doesn't and if you think 660 * about it this is right. Otherwise apps have to 661 * play 'guess the biggest size' games. RCVBUF/SNDBUF 662 * are treated in BSD as hints 663 */ 664 val = min_t(u32, val, sysctl_rmem_max); 665set_rcvbuf: 666 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 667 /* 668 * We double it on the way in to account for 669 * "struct sk_buff" etc. overhead. Applications 670 * assume that the SO_RCVBUF setting they make will 671 * allow that much actual data to be received on that 672 * socket. 673 * 674 * Applications are unaware that "struct sk_buff" and 675 * other overheads allocate from the receive buffer 676 * during socket buffer allocation. 677 * 678 * And after considering the possible alternatives, 679 * returning the value we actually used in getsockopt 680 * is the most desirable behavior. 681 */ 682 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 683 break; 684 685 case SO_RCVBUFFORCE: 686 if (!capable(CAP_NET_ADMIN)) { 687 ret = -EPERM; 688 break; 689 } 690 goto set_rcvbuf; 691 692 case SO_KEEPALIVE: 693#ifdef CONFIG_INET 694 if (sk->sk_protocol == IPPROTO_TCP) 695 tcp_set_keepalive(sk, valbool); 696#endif 697 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 698 break; 699 700 case SO_OOBINLINE: 701 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 702 break; 703 704 case SO_NO_CHECK: 705 sk->sk_no_check = valbool; 706 break; 707 708 case SO_PRIORITY: 709 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 710 sk->sk_priority = val; 711 else 712 ret = -EPERM; 713 break; 714 715 case SO_LINGER: 716 if (optlen < sizeof(ling)) { 717 ret = -EINVAL; /* 1003.1g */ 718 break; 719 } 720 if (copy_from_user(&ling, optval, sizeof(ling))) { 721 ret = -EFAULT; 722 break; 723 } 724 if (!ling.l_onoff) 725 sock_reset_flag(sk, SOCK_LINGER); 726 else { 727#if (BITS_PER_LONG == 32) 728 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 729 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 730 else 731#endif 732 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 733 sock_set_flag(sk, SOCK_LINGER); 734 } 735 break; 736 737 case SO_BSDCOMPAT: 738 sock_warn_obsolete_bsdism("setsockopt"); 739 break; 740 741 case SO_PASSCRED: 742 if (valbool) 743 set_bit(SOCK_PASSCRED, &sock->flags); 744 else 745 clear_bit(SOCK_PASSCRED, &sock->flags); 746 break; 747 748 case SO_TIMESTAMP: 749 case SO_TIMESTAMPNS: 750 if (valbool) { 751 if (optname == SO_TIMESTAMP) 752 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 753 else 754 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 755 sock_set_flag(sk, SOCK_RCVTSTAMP); 756 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 757 } else { 758 sock_reset_flag(sk, SOCK_RCVTSTAMP); 759 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 760 } 761 break; 762 763 case SO_TIMESTAMPING: 764 if (val & ~SOF_TIMESTAMPING_MASK) { 765 ret = -EINVAL; 766 break; 767 } 768 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 769 val & SOF_TIMESTAMPING_TX_HARDWARE); 770 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 771 val & SOF_TIMESTAMPING_TX_SOFTWARE); 772 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 773 val & SOF_TIMESTAMPING_RX_HARDWARE); 774 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 775 sock_enable_timestamp(sk, 776 SOCK_TIMESTAMPING_RX_SOFTWARE); 777 else 778 sock_disable_timestamp(sk, 779 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 780 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 781 val & SOF_TIMESTAMPING_SOFTWARE); 782 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 783 val & SOF_TIMESTAMPING_SYS_HARDWARE); 784 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 785 val & SOF_TIMESTAMPING_RAW_HARDWARE); 786 break; 787 788 case SO_RCVLOWAT: 789 if (val < 0) 790 val = INT_MAX; 791 sk->sk_rcvlowat = val ? : 1; 792 break; 793 794 case SO_RCVTIMEO: 795 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 796 break; 797 798 case SO_SNDTIMEO: 799 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 800 break; 801 802 case SO_ATTACH_FILTER: 803 ret = -EINVAL; 804 if (optlen == sizeof(struct sock_fprog)) { 805 struct sock_fprog fprog; 806 807 ret = -EFAULT; 808 if (copy_from_user(&fprog, optval, sizeof(fprog))) 809 break; 810 811 ret = sk_attach_filter(&fprog, sk); 812 } 813 break; 814 815 case SO_DETACH_FILTER: 816 ret = sk_detach_filter(sk); 817 break; 818 819 case SO_PASSSEC: 820 if (valbool) 821 set_bit(SOCK_PASSSEC, &sock->flags); 822 else 823 clear_bit(SOCK_PASSSEC, &sock->flags); 824 break; 825 case SO_MARK: 826 if (!capable(CAP_NET_ADMIN)) 827 ret = -EPERM; 828 else 829 sk->sk_mark = val; 830 break; 831 832 /* We implement the SO_SNDLOWAT etc to 833 not be settable (1003.1g 5.3) */ 834 case SO_RXQ_OVFL: 835 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 836 break; 837 838 case SO_WIFI_STATUS: 839 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 840 break; 841 842 case SO_PEEK_OFF: 843 if (sock->ops->set_peek_off) 844 sock->ops->set_peek_off(sk, val); 845 else 846 ret = -EOPNOTSUPP; 847 break; 848 849 case SO_NOFCS: 850 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 851 break; 852 853 default: 854 ret = -ENOPROTOOPT; 855 break; 856 } 857 release_sock(sk); 858 return ret; 859} 860EXPORT_SYMBOL(sock_setsockopt); 861 862 863void cred_to_ucred(struct pid *pid, const struct cred *cred, 864 struct ucred *ucred) 865{ 866 ucred->pid = pid_vnr(pid); 867 ucred->uid = ucred->gid = -1; 868 if (cred) { 869 struct user_namespace *current_ns = current_user_ns(); 870 871 ucred->uid = from_kuid(current_ns, cred->euid); 872 ucred->gid = from_kgid(current_ns, cred->egid); 873 } 874} 875EXPORT_SYMBOL_GPL(cred_to_ucred); 876 877int sock_getsockopt(struct socket *sock, int level, int optname, 878 char __user *optval, int __user *optlen) 879{ 880 struct sock *sk = sock->sk; 881 882 union { 883 int val; 884 struct linger ling; 885 struct timeval tm; 886 } v; 887 888 int lv = sizeof(int); 889 int len; 890 891 if (get_user(len, optlen)) 892 return -EFAULT; 893 if (len < 0) 894 return -EINVAL; 895 896 memset(&v, 0, sizeof(v)); 897 898 switch (optname) { 899 case SO_DEBUG: 900 v.val = sock_flag(sk, SOCK_DBG); 901 break; 902 903 case SO_DONTROUTE: 904 v.val = sock_flag(sk, SOCK_LOCALROUTE); 905 break; 906 907 case SO_BROADCAST: 908 v.val = sock_flag(sk, SOCK_BROADCAST); 909 break; 910 911 case SO_SNDBUF: 912 v.val = sk->sk_sndbuf; 913 break; 914 915 case SO_RCVBUF: 916 v.val = sk->sk_rcvbuf; 917 break; 918 919 case SO_REUSEADDR: 920 v.val = sk->sk_reuse; 921 break; 922 923 case SO_KEEPALIVE: 924 v.val = sock_flag(sk, SOCK_KEEPOPEN); 925 break; 926 927 case SO_TYPE: 928 v.val = sk->sk_type; 929 break; 930 931 case SO_PROTOCOL: 932 v.val = sk->sk_protocol; 933 break; 934 935 case SO_DOMAIN: 936 v.val = sk->sk_family; 937 break; 938 939 case SO_ERROR: 940 v.val = -sock_error(sk); 941 if (v.val == 0) 942 v.val = xchg(&sk->sk_err_soft, 0); 943 break; 944 945 case SO_OOBINLINE: 946 v.val = sock_flag(sk, SOCK_URGINLINE); 947 break; 948 949 case SO_NO_CHECK: 950 v.val = sk->sk_no_check; 951 break; 952 953 case SO_PRIORITY: 954 v.val = sk->sk_priority; 955 break; 956 957 case SO_LINGER: 958 lv = sizeof(v.ling); 959 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 960 v.ling.l_linger = sk->sk_lingertime / HZ; 961 break; 962 963 case SO_BSDCOMPAT: 964 sock_warn_obsolete_bsdism("getsockopt"); 965 break; 966 967 case SO_TIMESTAMP: 968 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 969 !sock_flag(sk, SOCK_RCVTSTAMPNS); 970 break; 971 972 case SO_TIMESTAMPNS: 973 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 974 break; 975 976 case SO_TIMESTAMPING: 977 v.val = 0; 978 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 979 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 980 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 981 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 982 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 983 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 984 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 985 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 986 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 987 v.val |= SOF_TIMESTAMPING_SOFTWARE; 988 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 989 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 990 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 991 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 992 break; 993 994 case SO_RCVTIMEO: 995 lv = sizeof(struct timeval); 996 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 997 v.tm.tv_sec = 0; 998 v.tm.tv_usec = 0; 999 } else { 1000 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 1001 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 1002 } 1003 break; 1004 1005 case SO_SNDTIMEO: 1006 lv = sizeof(struct timeval); 1007 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 1008 v.tm.tv_sec = 0; 1009 v.tm.tv_usec = 0; 1010 } else { 1011 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 1012 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 1013 } 1014 break; 1015 1016 case SO_RCVLOWAT: 1017 v.val = sk->sk_rcvlowat; 1018 break; 1019 1020 case SO_SNDLOWAT: 1021 v.val = 1; 1022 break; 1023 1024 case SO_PASSCRED: 1025 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1026 break; 1027 1028 case SO_PEERCRED: 1029 { 1030 struct ucred peercred; 1031 if (len > sizeof(peercred)) 1032 len = sizeof(peercred); 1033 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1034 if (copy_to_user(optval, &peercred, len)) 1035 return -EFAULT; 1036 goto lenout; 1037 } 1038 1039 case SO_PEERNAME: 1040 { 1041 char address[128]; 1042 1043 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 1044 return -ENOTCONN; 1045 if (lv < len) 1046 return -EINVAL; 1047 if (copy_to_user(optval, address, len)) 1048 return -EFAULT; 1049 goto lenout; 1050 } 1051 1052 /* Dubious BSD thing... Probably nobody even uses it, but 1053 * the UNIX standard wants it for whatever reason... -DaveM 1054 */ 1055 case SO_ACCEPTCONN: 1056 v.val = sk->sk_state == TCP_LISTEN; 1057 break; 1058 1059 case SO_PASSSEC: 1060 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1061 break; 1062 1063 case SO_PEERSEC: 1064 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1065 1066 case SO_MARK: 1067 v.val = sk->sk_mark; 1068 break; 1069 1070 case SO_RXQ_OVFL: 1071 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1072 break; 1073 1074 case SO_WIFI_STATUS: 1075 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1076 break; 1077 1078 case SO_PEEK_OFF: 1079 if (!sock->ops->set_peek_off) 1080 return -EOPNOTSUPP; 1081 1082 v.val = sk->sk_peek_off; 1083 break; 1084 case SO_NOFCS: 1085 v.val = sock_flag(sk, SOCK_NOFCS); 1086 break; 1087 default: 1088 return -ENOPROTOOPT; 1089 } 1090 1091 if (len > lv) 1092 len = lv; 1093 if (copy_to_user(optval, &v, len)) 1094 return -EFAULT; 1095lenout: 1096 if (put_user(len, optlen)) 1097 return -EFAULT; 1098 return 0; 1099} 1100 1101/* 1102 * Initialize an sk_lock. 1103 * 1104 * (We also register the sk_lock with the lock validator.) 1105 */ 1106static inline void sock_lock_init(struct sock *sk) 1107{ 1108 sock_lock_init_class_and_name(sk, 1109 af_family_slock_key_strings[sk->sk_family], 1110 af_family_slock_keys + sk->sk_family, 1111 af_family_key_strings[sk->sk_family], 1112 af_family_keys + sk->sk_family); 1113} 1114 1115/* 1116 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1117 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1118 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1119 */ 1120static void sock_copy(struct sock *nsk, const struct sock *osk) 1121{ 1122#ifdef CONFIG_SECURITY_NETWORK 1123 void *sptr = nsk->sk_security; 1124#endif 1125 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1126 1127 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1128 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1129 1130#ifdef CONFIG_SECURITY_NETWORK 1131 nsk->sk_security = sptr; 1132 security_sk_clone(osk, nsk); 1133#endif 1134} 1135 1136/* 1137 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1138 * un-modified. Special care is taken when initializing object to zero. 1139 */ 1140static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1141{ 1142 if (offsetof(struct sock, sk_node.next) != 0) 1143 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1144 memset(&sk->sk_node.pprev, 0, 1145 size - offsetof(struct sock, sk_node.pprev)); 1146} 1147 1148void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1149{ 1150 unsigned long nulls1, nulls2; 1151 1152 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1153 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1154 if (nulls1 > nulls2) 1155 swap(nulls1, nulls2); 1156 1157 if (nulls1 != 0) 1158 memset((char *)sk, 0, nulls1); 1159 memset((char *)sk + nulls1 + sizeof(void *), 0, 1160 nulls2 - nulls1 - sizeof(void *)); 1161 memset((char *)sk + nulls2 + sizeof(void *), 0, 1162 size - nulls2 - sizeof(void *)); 1163} 1164EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1165 1166static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1167 int family) 1168{ 1169 struct sock *sk; 1170 struct kmem_cache *slab; 1171 1172 slab = prot->slab; 1173 if (slab != NULL) { 1174 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1175 if (!sk) 1176 return sk; 1177 if (priority & __GFP_ZERO) { 1178 if (prot->clear_sk) 1179 prot->clear_sk(sk, prot->obj_size); 1180 else 1181 sk_prot_clear_nulls(sk, prot->obj_size); 1182 } 1183 } else 1184 sk = kmalloc(prot->obj_size, priority); 1185 1186 if (sk != NULL) { 1187 kmemcheck_annotate_bitfield(sk, flags); 1188 1189 if (security_sk_alloc(sk, family, priority)) 1190 goto out_free; 1191 1192 if (!try_module_get(prot->owner)) 1193 goto out_free_sec; 1194 sk_tx_queue_clear(sk); 1195 } 1196 1197 return sk; 1198 1199out_free_sec: 1200 security_sk_free(sk); 1201out_free: 1202 if (slab != NULL) 1203 kmem_cache_free(slab, sk); 1204 else 1205 kfree(sk); 1206 return NULL; 1207} 1208 1209static void sk_prot_free(struct proto *prot, struct sock *sk) 1210{ 1211 struct kmem_cache *slab; 1212 struct module *owner; 1213 1214 owner = prot->owner; 1215 slab = prot->slab; 1216 1217 security_sk_free(sk); 1218 if (slab != NULL) 1219 kmem_cache_free(slab, sk); 1220 else 1221 kfree(sk); 1222 module_put(owner); 1223} 1224 1225#ifdef CONFIG_CGROUPS 1226void sock_update_classid(struct sock *sk) 1227{ 1228 u32 classid; 1229 1230 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1231 classid = task_cls_classid(current); 1232 rcu_read_unlock(); 1233 if (classid && classid != sk->sk_classid) 1234 sk->sk_classid = classid; 1235} 1236EXPORT_SYMBOL(sock_update_classid); 1237 1238void sock_update_netprioidx(struct sock *sk, struct task_struct *task) 1239{ 1240 if (in_interrupt()) 1241 return; 1242 1243 sk->sk_cgrp_prioidx = task_netprioidx(task); 1244} 1245EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1246#endif 1247 1248/** 1249 * sk_alloc - All socket objects are allocated here 1250 * @net: the applicable net namespace 1251 * @family: protocol family 1252 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1253 * @prot: struct proto associated with this new sock instance 1254 */ 1255struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1256 struct proto *prot) 1257{ 1258 struct sock *sk; 1259 1260 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1261 if (sk) { 1262 sk->sk_family = family; 1263 /* 1264 * See comment in struct sock definition to understand 1265 * why we need sk_prot_creator -acme 1266 */ 1267 sk->sk_prot = sk->sk_prot_creator = prot; 1268 sock_lock_init(sk); 1269 sock_net_set(sk, get_net(net)); 1270 atomic_set(&sk->sk_wmem_alloc, 1); 1271 1272 sock_update_classid(sk); 1273 sock_update_netprioidx(sk, current); 1274 } 1275 1276 return sk; 1277} 1278EXPORT_SYMBOL(sk_alloc); 1279 1280static void __sk_free(struct sock *sk) 1281{ 1282 struct sk_filter *filter; 1283 1284 if (sk->sk_destruct) 1285 sk->sk_destruct(sk); 1286 1287 filter = rcu_dereference_check(sk->sk_filter, 1288 atomic_read(&sk->sk_wmem_alloc) == 0); 1289 if (filter) { 1290 sk_filter_uncharge(sk, filter); 1291 RCU_INIT_POINTER(sk->sk_filter, NULL); 1292 } 1293 1294 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1295 1296 if (atomic_read(&sk->sk_omem_alloc)) 1297 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1298 __func__, atomic_read(&sk->sk_omem_alloc)); 1299 1300 if (sk->sk_peer_cred) 1301 put_cred(sk->sk_peer_cred); 1302 put_pid(sk->sk_peer_pid); 1303 put_net(sock_net(sk)); 1304 sk_prot_free(sk->sk_prot_creator, sk); 1305} 1306 1307void sk_free(struct sock *sk) 1308{ 1309 /* 1310 * We subtract one from sk_wmem_alloc and can know if 1311 * some packets are still in some tx queue. 1312 * If not null, sock_wfree() will call __sk_free(sk) later 1313 */ 1314 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1315 __sk_free(sk); 1316} 1317EXPORT_SYMBOL(sk_free); 1318 1319/* 1320 * Last sock_put should drop reference to sk->sk_net. It has already 1321 * been dropped in sk_change_net. Taking reference to stopping namespace 1322 * is not an option. 1323 * Take reference to a socket to remove it from hash _alive_ and after that 1324 * destroy it in the context of init_net. 1325 */ 1326void sk_release_kernel(struct sock *sk) 1327{ 1328 if (sk == NULL || sk->sk_socket == NULL) 1329 return; 1330 1331 sock_hold(sk); 1332 sock_release(sk->sk_socket); 1333 release_net(sock_net(sk)); 1334 sock_net_set(sk, get_net(&init_net)); 1335 sock_put(sk); 1336} 1337EXPORT_SYMBOL(sk_release_kernel); 1338 1339static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1340{ 1341 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1342 sock_update_memcg(newsk); 1343} 1344 1345/** 1346 * sk_clone_lock - clone a socket, and lock its clone 1347 * @sk: the socket to clone 1348 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1349 * 1350 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1351 */ 1352struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1353{ 1354 struct sock *newsk; 1355 1356 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1357 if (newsk != NULL) { 1358 struct sk_filter *filter; 1359 1360 sock_copy(newsk, sk); 1361 1362 /* SANITY */ 1363 get_net(sock_net(newsk)); 1364 sk_node_init(&newsk->sk_node); 1365 sock_lock_init(newsk); 1366 bh_lock_sock(newsk); 1367 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1368 newsk->sk_backlog.len = 0; 1369 1370 atomic_set(&newsk->sk_rmem_alloc, 0); 1371 /* 1372 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1373 */ 1374 atomic_set(&newsk->sk_wmem_alloc, 1); 1375 atomic_set(&newsk->sk_omem_alloc, 0); 1376 skb_queue_head_init(&newsk->sk_receive_queue); 1377 skb_queue_head_init(&newsk->sk_write_queue); 1378#ifdef CONFIG_NET_DMA 1379 skb_queue_head_init(&newsk->sk_async_wait_queue); 1380#endif 1381 1382 spin_lock_init(&newsk->sk_dst_lock); 1383 rwlock_init(&newsk->sk_callback_lock); 1384 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1385 af_callback_keys + newsk->sk_family, 1386 af_family_clock_key_strings[newsk->sk_family]); 1387 1388 newsk->sk_dst_cache = NULL; 1389 newsk->sk_wmem_queued = 0; 1390 newsk->sk_forward_alloc = 0; 1391 newsk->sk_send_head = NULL; 1392 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1393 1394 sock_reset_flag(newsk, SOCK_DONE); 1395 skb_queue_head_init(&newsk->sk_error_queue); 1396 1397 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1398 if (filter != NULL) 1399 sk_filter_charge(newsk, filter); 1400 1401 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1402 /* It is still raw copy of parent, so invalidate 1403 * destructor and make plain sk_free() */ 1404 newsk->sk_destruct = NULL; 1405 bh_unlock_sock(newsk); 1406 sk_free(newsk); 1407 newsk = NULL; 1408 goto out; 1409 } 1410 1411 newsk->sk_err = 0; 1412 newsk->sk_priority = 0; 1413 /* 1414 * Before updating sk_refcnt, we must commit prior changes to memory 1415 * (Documentation/RCU/rculist_nulls.txt for details) 1416 */ 1417 smp_wmb(); 1418 atomic_set(&newsk->sk_refcnt, 2); 1419 1420 /* 1421 * Increment the counter in the same struct proto as the master 1422 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1423 * is the same as sk->sk_prot->socks, as this field was copied 1424 * with memcpy). 1425 * 1426 * This _changes_ the previous behaviour, where 1427 * tcp_create_openreq_child always was incrementing the 1428 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1429 * to be taken into account in all callers. -acme 1430 */ 1431 sk_refcnt_debug_inc(newsk); 1432 sk_set_socket(newsk, NULL); 1433 newsk->sk_wq = NULL; 1434 1435 sk_update_clone(sk, newsk); 1436 1437 if (newsk->sk_prot->sockets_allocated) 1438 sk_sockets_allocated_inc(newsk); 1439 1440 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1441 net_enable_timestamp(); 1442 } 1443out: 1444 return newsk; 1445} 1446EXPORT_SYMBOL_GPL(sk_clone_lock); 1447 1448void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1449{ 1450 __sk_dst_set(sk, dst); 1451 sk->sk_route_caps = dst->dev->features; 1452 if (sk->sk_route_caps & NETIF_F_GSO) 1453 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1454 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1455 if (sk_can_gso(sk)) { 1456 if (dst->header_len) { 1457 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1458 } else { 1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1460 sk->sk_gso_max_size = dst->dev->gso_max_size; 1461 sk->sk_gso_max_segs = dst->dev->gso_max_segs; 1462 } 1463 } 1464} 1465EXPORT_SYMBOL_GPL(sk_setup_caps); 1466 1467void __init sk_init(void) 1468{ 1469 if (totalram_pages <= 4096) { 1470 sysctl_wmem_max = 32767; 1471 sysctl_rmem_max = 32767; 1472 sysctl_wmem_default = 32767; 1473 sysctl_rmem_default = 32767; 1474 } else if (totalram_pages >= 131072) { 1475 sysctl_wmem_max = 131071; 1476 sysctl_rmem_max = 131071; 1477 } 1478} 1479 1480/* 1481 * Simple resource managers for sockets. 1482 */ 1483 1484 1485/* 1486 * Write buffer destructor automatically called from kfree_skb. 1487 */ 1488void sock_wfree(struct sk_buff *skb) 1489{ 1490 struct sock *sk = skb->sk; 1491 unsigned int len = skb->truesize; 1492 1493 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1494 /* 1495 * Keep a reference on sk_wmem_alloc, this will be released 1496 * after sk_write_space() call 1497 */ 1498 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1499 sk->sk_write_space(sk); 1500 len = 1; 1501 } 1502 /* 1503 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1504 * could not do because of in-flight packets 1505 */ 1506 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1507 __sk_free(sk); 1508} 1509EXPORT_SYMBOL(sock_wfree); 1510 1511/* 1512 * Read buffer destructor automatically called from kfree_skb. 1513 */ 1514void sock_rfree(struct sk_buff *skb) 1515{ 1516 struct sock *sk = skb->sk; 1517 unsigned int len = skb->truesize; 1518 1519 atomic_sub(len, &sk->sk_rmem_alloc); 1520 sk_mem_uncharge(sk, len); 1521} 1522EXPORT_SYMBOL(sock_rfree); 1523 1524void sock_edemux(struct sk_buff *skb) 1525{ 1526 struct sock *sk = skb->sk; 1527 1528 if (sk->sk_state == TCP_TIME_WAIT) 1529 inet_twsk_put(inet_twsk(sk)); 1530 else 1531 sock_put(sk); 1532} 1533EXPORT_SYMBOL(sock_edemux); 1534 1535int sock_i_uid(struct sock *sk) 1536{ 1537 int uid; 1538 1539 read_lock_bh(&sk->sk_callback_lock); 1540 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1541 read_unlock_bh(&sk->sk_callback_lock); 1542 return uid; 1543} 1544EXPORT_SYMBOL(sock_i_uid); 1545 1546unsigned long sock_i_ino(struct sock *sk) 1547{ 1548 unsigned long ino; 1549 1550 read_lock_bh(&sk->sk_callback_lock); 1551 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1552 read_unlock_bh(&sk->sk_callback_lock); 1553 return ino; 1554} 1555EXPORT_SYMBOL(sock_i_ino); 1556 1557/* 1558 * Allocate a skb from the socket's send buffer. 1559 */ 1560struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1561 gfp_t priority) 1562{ 1563 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1564 struct sk_buff *skb = alloc_skb(size, priority); 1565 if (skb) { 1566 skb_set_owner_w(skb, sk); 1567 return skb; 1568 } 1569 } 1570 return NULL; 1571} 1572EXPORT_SYMBOL(sock_wmalloc); 1573 1574/* 1575 * Allocate a skb from the socket's receive buffer. 1576 */ 1577struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1578 gfp_t priority) 1579{ 1580 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1581 struct sk_buff *skb = alloc_skb(size, priority); 1582 if (skb) { 1583 skb_set_owner_r(skb, sk); 1584 return skb; 1585 } 1586 } 1587 return NULL; 1588} 1589 1590/* 1591 * Allocate a memory block from the socket's option memory buffer. 1592 */ 1593void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1594{ 1595 if ((unsigned int)size <= sysctl_optmem_max && 1596 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1597 void *mem; 1598 /* First do the add, to avoid the race if kmalloc 1599 * might sleep. 1600 */ 1601 atomic_add(size, &sk->sk_omem_alloc); 1602 mem = kmalloc(size, priority); 1603 if (mem) 1604 return mem; 1605 atomic_sub(size, &sk->sk_omem_alloc); 1606 } 1607 return NULL; 1608} 1609EXPORT_SYMBOL(sock_kmalloc); 1610 1611/* 1612 * Free an option memory block. 1613 */ 1614void sock_kfree_s(struct sock *sk, void *mem, int size) 1615{ 1616 kfree(mem); 1617 atomic_sub(size, &sk->sk_omem_alloc); 1618} 1619EXPORT_SYMBOL(sock_kfree_s); 1620 1621/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1622 I think, these locks should be removed for datagram sockets. 1623 */ 1624static long sock_wait_for_wmem(struct sock *sk, long timeo) 1625{ 1626 DEFINE_WAIT(wait); 1627 1628 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1629 for (;;) { 1630 if (!timeo) 1631 break; 1632 if (signal_pending(current)) 1633 break; 1634 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1635 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1636 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1637 break; 1638 if (sk->sk_shutdown & SEND_SHUTDOWN) 1639 break; 1640 if (sk->sk_err) 1641 break; 1642 timeo = schedule_timeout(timeo); 1643 } 1644 finish_wait(sk_sleep(sk), &wait); 1645 return timeo; 1646} 1647 1648 1649/* 1650 * Generic send/receive buffer handlers 1651 */ 1652 1653struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1654 unsigned long data_len, int noblock, 1655 int *errcode) 1656{ 1657 struct sk_buff *skb; 1658 gfp_t gfp_mask; 1659 long timeo; 1660 int err; 1661 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1662 1663 err = -EMSGSIZE; 1664 if (npages > MAX_SKB_FRAGS) 1665 goto failure; 1666 1667 gfp_mask = sk->sk_allocation; 1668 if (gfp_mask & __GFP_WAIT) 1669 gfp_mask |= __GFP_REPEAT; 1670 1671 timeo = sock_sndtimeo(sk, noblock); 1672 while (1) { 1673 err = sock_error(sk); 1674 if (err != 0) 1675 goto failure; 1676 1677 err = -EPIPE; 1678 if (sk->sk_shutdown & SEND_SHUTDOWN) 1679 goto failure; 1680 1681 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1682 skb = alloc_skb(header_len, gfp_mask); 1683 if (skb) { 1684 int i; 1685 1686 /* No pages, we're done... */ 1687 if (!data_len) 1688 break; 1689 1690 skb->truesize += data_len; 1691 skb_shinfo(skb)->nr_frags = npages; 1692 for (i = 0; i < npages; i++) { 1693 struct page *page; 1694 1695 page = alloc_pages(sk->sk_allocation, 0); 1696 if (!page) { 1697 err = -ENOBUFS; 1698 skb_shinfo(skb)->nr_frags = i; 1699 kfree_skb(skb); 1700 goto failure; 1701 } 1702 1703 __skb_fill_page_desc(skb, i, 1704 page, 0, 1705 (data_len >= PAGE_SIZE ? 1706 PAGE_SIZE : 1707 data_len)); 1708 data_len -= PAGE_SIZE; 1709 } 1710 1711 /* Full success... */ 1712 break; 1713 } 1714 err = -ENOBUFS; 1715 goto failure; 1716 } 1717 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1718 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1719 err = -EAGAIN; 1720 if (!timeo) 1721 goto failure; 1722 if (signal_pending(current)) 1723 goto interrupted; 1724 timeo = sock_wait_for_wmem(sk, timeo); 1725 } 1726 1727 skb_set_owner_w(skb, sk); 1728 return skb; 1729 1730interrupted: 1731 err = sock_intr_errno(timeo); 1732failure: 1733 *errcode = err; 1734 return NULL; 1735} 1736EXPORT_SYMBOL(sock_alloc_send_pskb); 1737 1738struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1739 int noblock, int *errcode) 1740{ 1741 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1742} 1743EXPORT_SYMBOL(sock_alloc_send_skb); 1744 1745static void __lock_sock(struct sock *sk) 1746 __releases(&sk->sk_lock.slock) 1747 __acquires(&sk->sk_lock.slock) 1748{ 1749 DEFINE_WAIT(wait); 1750 1751 for (;;) { 1752 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1753 TASK_UNINTERRUPTIBLE); 1754 spin_unlock_bh(&sk->sk_lock.slock); 1755 schedule(); 1756 spin_lock_bh(&sk->sk_lock.slock); 1757 if (!sock_owned_by_user(sk)) 1758 break; 1759 } 1760 finish_wait(&sk->sk_lock.wq, &wait); 1761} 1762 1763static void __release_sock(struct sock *sk) 1764 __releases(&sk->sk_lock.slock) 1765 __acquires(&sk->sk_lock.slock) 1766{ 1767 struct sk_buff *skb = sk->sk_backlog.head; 1768 1769 do { 1770 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1771 bh_unlock_sock(sk); 1772 1773 do { 1774 struct sk_buff *next = skb->next; 1775 1776 prefetch(next); 1777 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1778 skb->next = NULL; 1779 sk_backlog_rcv(sk, skb); 1780 1781 /* 1782 * We are in process context here with softirqs 1783 * disabled, use cond_resched_softirq() to preempt. 1784 * This is safe to do because we've taken the backlog 1785 * queue private: 1786 */ 1787 cond_resched_softirq(); 1788 1789 skb = next; 1790 } while (skb != NULL); 1791 1792 bh_lock_sock(sk); 1793 } while ((skb = sk->sk_backlog.head) != NULL); 1794 1795 /* 1796 * Doing the zeroing here guarantee we can not loop forever 1797 * while a wild producer attempts to flood us. 1798 */ 1799 sk->sk_backlog.len = 0; 1800} 1801 1802/** 1803 * sk_wait_data - wait for data to arrive at sk_receive_queue 1804 * @sk: sock to wait on 1805 * @timeo: for how long 1806 * 1807 * Now socket state including sk->sk_err is changed only under lock, 1808 * hence we may omit checks after joining wait queue. 1809 * We check receive queue before schedule() only as optimization; 1810 * it is very likely that release_sock() added new data. 1811 */ 1812int sk_wait_data(struct sock *sk, long *timeo) 1813{ 1814 int rc; 1815 DEFINE_WAIT(wait); 1816 1817 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1818 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1819 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1820 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1821 finish_wait(sk_sleep(sk), &wait); 1822 return rc; 1823} 1824EXPORT_SYMBOL(sk_wait_data); 1825 1826/** 1827 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1828 * @sk: socket 1829 * @size: memory size to allocate 1830 * @kind: allocation type 1831 * 1832 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1833 * rmem allocation. This function assumes that protocols which have 1834 * memory_pressure use sk_wmem_queued as write buffer accounting. 1835 */ 1836int __sk_mem_schedule(struct sock *sk, int size, int kind) 1837{ 1838 struct proto *prot = sk->sk_prot; 1839 int amt = sk_mem_pages(size); 1840 long allocated; 1841 int parent_status = UNDER_LIMIT; 1842 1843 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1844 1845 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1846 1847 /* Under limit. */ 1848 if (parent_status == UNDER_LIMIT && 1849 allocated <= sk_prot_mem_limits(sk, 0)) { 1850 sk_leave_memory_pressure(sk); 1851 return 1; 1852 } 1853 1854 /* Under pressure. (we or our parents) */ 1855 if ((parent_status > SOFT_LIMIT) || 1856 allocated > sk_prot_mem_limits(sk, 1)) 1857 sk_enter_memory_pressure(sk); 1858 1859 /* Over hard limit (we or our parents) */ 1860 if ((parent_status == OVER_LIMIT) || 1861 (allocated > sk_prot_mem_limits(sk, 2))) 1862 goto suppress_allocation; 1863 1864 /* guarantee minimum buffer size under pressure */ 1865 if (kind == SK_MEM_RECV) { 1866 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1867 return 1; 1868 1869 } else { /* SK_MEM_SEND */ 1870 if (sk->sk_type == SOCK_STREAM) { 1871 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1872 return 1; 1873 } else if (atomic_read(&sk->sk_wmem_alloc) < 1874 prot->sysctl_wmem[0]) 1875 return 1; 1876 } 1877 1878 if (sk_has_memory_pressure(sk)) { 1879 int alloc; 1880 1881 if (!sk_under_memory_pressure(sk)) 1882 return 1; 1883 alloc = sk_sockets_allocated_read_positive(sk); 1884 if (sk_prot_mem_limits(sk, 2) > alloc * 1885 sk_mem_pages(sk->sk_wmem_queued + 1886 atomic_read(&sk->sk_rmem_alloc) + 1887 sk->sk_forward_alloc)) 1888 return 1; 1889 } 1890 1891suppress_allocation: 1892 1893 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1894 sk_stream_moderate_sndbuf(sk); 1895 1896 /* Fail only if socket is _under_ its sndbuf. 1897 * In this case we cannot block, so that we have to fail. 1898 */ 1899 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1900 return 1; 1901 } 1902 1903 trace_sock_exceed_buf_limit(sk, prot, allocated); 1904 1905 /* Alas. Undo changes. */ 1906 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1907 1908 sk_memory_allocated_sub(sk, amt); 1909 1910 return 0; 1911} 1912EXPORT_SYMBOL(__sk_mem_schedule); 1913 1914/** 1915 * __sk_reclaim - reclaim memory_allocated 1916 * @sk: socket 1917 */ 1918void __sk_mem_reclaim(struct sock *sk) 1919{ 1920 sk_memory_allocated_sub(sk, 1921 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 1922 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1923 1924 if (sk_under_memory_pressure(sk) && 1925 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 1926 sk_leave_memory_pressure(sk); 1927} 1928EXPORT_SYMBOL(__sk_mem_reclaim); 1929 1930 1931/* 1932 * Set of default routines for initialising struct proto_ops when 1933 * the protocol does not support a particular function. In certain 1934 * cases where it makes no sense for a protocol to have a "do nothing" 1935 * function, some default processing is provided. 1936 */ 1937 1938int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1939{ 1940 return -EOPNOTSUPP; 1941} 1942EXPORT_SYMBOL(sock_no_bind); 1943 1944int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1945 int len, int flags) 1946{ 1947 return -EOPNOTSUPP; 1948} 1949EXPORT_SYMBOL(sock_no_connect); 1950 1951int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1952{ 1953 return -EOPNOTSUPP; 1954} 1955EXPORT_SYMBOL(sock_no_socketpair); 1956 1957int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1958{ 1959 return -EOPNOTSUPP; 1960} 1961EXPORT_SYMBOL(sock_no_accept); 1962 1963int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1964 int *len, int peer) 1965{ 1966 return -EOPNOTSUPP; 1967} 1968EXPORT_SYMBOL(sock_no_getname); 1969 1970unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 1971{ 1972 return 0; 1973} 1974EXPORT_SYMBOL(sock_no_poll); 1975 1976int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1977{ 1978 return -EOPNOTSUPP; 1979} 1980EXPORT_SYMBOL(sock_no_ioctl); 1981 1982int sock_no_listen(struct socket *sock, int backlog) 1983{ 1984 return -EOPNOTSUPP; 1985} 1986EXPORT_SYMBOL(sock_no_listen); 1987 1988int sock_no_shutdown(struct socket *sock, int how) 1989{ 1990 return -EOPNOTSUPP; 1991} 1992EXPORT_SYMBOL(sock_no_shutdown); 1993 1994int sock_no_setsockopt(struct socket *sock, int level, int optname, 1995 char __user *optval, unsigned int optlen) 1996{ 1997 return -EOPNOTSUPP; 1998} 1999EXPORT_SYMBOL(sock_no_setsockopt); 2000 2001int sock_no_getsockopt(struct socket *sock, int level, int optname, 2002 char __user *optval, int __user *optlen) 2003{ 2004 return -EOPNOTSUPP; 2005} 2006EXPORT_SYMBOL(sock_no_getsockopt); 2007 2008int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2009 size_t len) 2010{ 2011 return -EOPNOTSUPP; 2012} 2013EXPORT_SYMBOL(sock_no_sendmsg); 2014 2015int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2016 size_t len, int flags) 2017{ 2018 return -EOPNOTSUPP; 2019} 2020EXPORT_SYMBOL(sock_no_recvmsg); 2021 2022int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2023{ 2024 /* Mirror missing mmap method error code */ 2025 return -ENODEV; 2026} 2027EXPORT_SYMBOL(sock_no_mmap); 2028 2029ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2030{ 2031 ssize_t res; 2032 struct msghdr msg = {.msg_flags = flags}; 2033 struct kvec iov; 2034 char *kaddr = kmap(page); 2035 iov.iov_base = kaddr + offset; 2036 iov.iov_len = size; 2037 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2038 kunmap(page); 2039 return res; 2040} 2041EXPORT_SYMBOL(sock_no_sendpage); 2042 2043/* 2044 * Default Socket Callbacks 2045 */ 2046 2047static void sock_def_wakeup(struct sock *sk) 2048{ 2049 struct socket_wq *wq; 2050 2051 rcu_read_lock(); 2052 wq = rcu_dereference(sk->sk_wq); 2053 if (wq_has_sleeper(wq)) 2054 wake_up_interruptible_all(&wq->wait); 2055 rcu_read_unlock(); 2056} 2057 2058static void sock_def_error_report(struct sock *sk) 2059{ 2060 struct socket_wq *wq; 2061 2062 rcu_read_lock(); 2063 wq = rcu_dereference(sk->sk_wq); 2064 if (wq_has_sleeper(wq)) 2065 wake_up_interruptible_poll(&wq->wait, POLLERR); 2066 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2067 rcu_read_unlock(); 2068} 2069 2070static void sock_def_readable(struct sock *sk, int len) 2071{ 2072 struct socket_wq *wq; 2073 2074 rcu_read_lock(); 2075 wq = rcu_dereference(sk->sk_wq); 2076 if (wq_has_sleeper(wq)) 2077 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2078 POLLRDNORM | POLLRDBAND); 2079 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2080 rcu_read_unlock(); 2081} 2082 2083static void sock_def_write_space(struct sock *sk) 2084{ 2085 struct socket_wq *wq; 2086 2087 rcu_read_lock(); 2088 2089 /* Do not wake up a writer until he can make "significant" 2090 * progress. --DaveM 2091 */ 2092 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2093 wq = rcu_dereference(sk->sk_wq); 2094 if (wq_has_sleeper(wq)) 2095 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2096 POLLWRNORM | POLLWRBAND); 2097 2098 /* Should agree with poll, otherwise some programs break */ 2099 if (sock_writeable(sk)) 2100 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2101 } 2102 2103 rcu_read_unlock(); 2104} 2105 2106static void sock_def_destruct(struct sock *sk) 2107{ 2108 kfree(sk->sk_protinfo); 2109} 2110 2111void sk_send_sigurg(struct sock *sk) 2112{ 2113 if (sk->sk_socket && sk->sk_socket->file) 2114 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2115 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2116} 2117EXPORT_SYMBOL(sk_send_sigurg); 2118 2119void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2120 unsigned long expires) 2121{ 2122 if (!mod_timer(timer, expires)) 2123 sock_hold(sk); 2124} 2125EXPORT_SYMBOL(sk_reset_timer); 2126 2127void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2128{ 2129 if (timer_pending(timer) && del_timer(timer)) 2130 __sock_put(sk); 2131} 2132EXPORT_SYMBOL(sk_stop_timer); 2133 2134void sock_init_data(struct socket *sock, struct sock *sk) 2135{ 2136 skb_queue_head_init(&sk->sk_receive_queue); 2137 skb_queue_head_init(&sk->sk_write_queue); 2138 skb_queue_head_init(&sk->sk_error_queue); 2139#ifdef CONFIG_NET_DMA 2140 skb_queue_head_init(&sk->sk_async_wait_queue); 2141#endif 2142 2143 sk->sk_send_head = NULL; 2144 2145 init_timer(&sk->sk_timer); 2146 2147 sk->sk_allocation = GFP_KERNEL; 2148 sk->sk_rcvbuf = sysctl_rmem_default; 2149 sk->sk_sndbuf = sysctl_wmem_default; 2150 sk->sk_state = TCP_CLOSE; 2151 sk_set_socket(sk, sock); 2152 2153 sock_set_flag(sk, SOCK_ZAPPED); 2154 2155 if (sock) { 2156 sk->sk_type = sock->type; 2157 sk->sk_wq = sock->wq; 2158 sock->sk = sk; 2159 } else 2160 sk->sk_wq = NULL; 2161 2162 spin_lock_init(&sk->sk_dst_lock); 2163 rwlock_init(&sk->sk_callback_lock); 2164 lockdep_set_class_and_name(&sk->sk_callback_lock, 2165 af_callback_keys + sk->sk_family, 2166 af_family_clock_key_strings[sk->sk_family]); 2167 2168 sk->sk_state_change = sock_def_wakeup; 2169 sk->sk_data_ready = sock_def_readable; 2170 sk->sk_write_space = sock_def_write_space; 2171 sk->sk_error_report = sock_def_error_report; 2172 sk->sk_destruct = sock_def_destruct; 2173 2174 sk->sk_sndmsg_page = NULL; 2175 sk->sk_sndmsg_off = 0; 2176 sk->sk_peek_off = -1; 2177 2178 sk->sk_peer_pid = NULL; 2179 sk->sk_peer_cred = NULL; 2180 sk->sk_write_pending = 0; 2181 sk->sk_rcvlowat = 1; 2182 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2183 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2184 2185 sk->sk_stamp = ktime_set(-1L, 0); 2186 2187 /* 2188 * Before updating sk_refcnt, we must commit prior changes to memory 2189 * (Documentation/RCU/rculist_nulls.txt for details) 2190 */ 2191 smp_wmb(); 2192 atomic_set(&sk->sk_refcnt, 1); 2193 atomic_set(&sk->sk_drops, 0); 2194} 2195EXPORT_SYMBOL(sock_init_data); 2196 2197void lock_sock_nested(struct sock *sk, int subclass) 2198{ 2199 might_sleep(); 2200 spin_lock_bh(&sk->sk_lock.slock); 2201 if (sk->sk_lock.owned) 2202 __lock_sock(sk); 2203 sk->sk_lock.owned = 1; 2204 spin_unlock(&sk->sk_lock.slock); 2205 /* 2206 * The sk_lock has mutex_lock() semantics here: 2207 */ 2208 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2209 local_bh_enable(); 2210} 2211EXPORT_SYMBOL(lock_sock_nested); 2212 2213void release_sock(struct sock *sk) 2214{ 2215 /* 2216 * The sk_lock has mutex_unlock() semantics: 2217 */ 2218 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2219 2220 spin_lock_bh(&sk->sk_lock.slock); 2221 if (sk->sk_backlog.tail) 2222 __release_sock(sk); 2223 2224 if (sk->sk_prot->release_cb) 2225 sk->sk_prot->release_cb(sk); 2226 2227 sk->sk_lock.owned = 0; 2228 if (waitqueue_active(&sk->sk_lock.wq)) 2229 wake_up(&sk->sk_lock.wq); 2230 spin_unlock_bh(&sk->sk_lock.slock); 2231} 2232EXPORT_SYMBOL(release_sock); 2233 2234/** 2235 * lock_sock_fast - fast version of lock_sock 2236 * @sk: socket 2237 * 2238 * This version should be used for very small section, where process wont block 2239 * return false if fast path is taken 2240 * sk_lock.slock locked, owned = 0, BH disabled 2241 * return true if slow path is taken 2242 * sk_lock.slock unlocked, owned = 1, BH enabled 2243 */ 2244bool lock_sock_fast(struct sock *sk) 2245{ 2246 might_sleep(); 2247 spin_lock_bh(&sk->sk_lock.slock); 2248 2249 if (!sk->sk_lock.owned) 2250 /* 2251 * Note : We must disable BH 2252 */ 2253 return false; 2254 2255 __lock_sock(sk); 2256 sk->sk_lock.owned = 1; 2257 spin_unlock(&sk->sk_lock.slock); 2258 /* 2259 * The sk_lock has mutex_lock() semantics here: 2260 */ 2261 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2262 local_bh_enable(); 2263 return true; 2264} 2265EXPORT_SYMBOL(lock_sock_fast); 2266 2267int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2268{ 2269 struct timeval tv; 2270 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2271 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2272 tv = ktime_to_timeval(sk->sk_stamp); 2273 if (tv.tv_sec == -1) 2274 return -ENOENT; 2275 if (tv.tv_sec == 0) { 2276 sk->sk_stamp = ktime_get_real(); 2277 tv = ktime_to_timeval(sk->sk_stamp); 2278 } 2279 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2280} 2281EXPORT_SYMBOL(sock_get_timestamp); 2282 2283int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2284{ 2285 struct timespec ts; 2286 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2287 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2288 ts = ktime_to_timespec(sk->sk_stamp); 2289 if (ts.tv_sec == -1) 2290 return -ENOENT; 2291 if (ts.tv_sec == 0) { 2292 sk->sk_stamp = ktime_get_real(); 2293 ts = ktime_to_timespec(sk->sk_stamp); 2294 } 2295 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2296} 2297EXPORT_SYMBOL(sock_get_timestampns); 2298 2299void sock_enable_timestamp(struct sock *sk, int flag) 2300{ 2301 if (!sock_flag(sk, flag)) { 2302 unsigned long previous_flags = sk->sk_flags; 2303 2304 sock_set_flag(sk, flag); 2305 /* 2306 * we just set one of the two flags which require net 2307 * time stamping, but time stamping might have been on 2308 * already because of the other one 2309 */ 2310 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2311 net_enable_timestamp(); 2312 } 2313} 2314 2315/* 2316 * Get a socket option on an socket. 2317 * 2318 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2319 * asynchronous errors should be reported by getsockopt. We assume 2320 * this means if you specify SO_ERROR (otherwise whats the point of it). 2321 */ 2322int sock_common_getsockopt(struct socket *sock, int level, int optname, 2323 char __user *optval, int __user *optlen) 2324{ 2325 struct sock *sk = sock->sk; 2326 2327 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2328} 2329EXPORT_SYMBOL(sock_common_getsockopt); 2330 2331#ifdef CONFIG_COMPAT 2332int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2333 char __user *optval, int __user *optlen) 2334{ 2335 struct sock *sk = sock->sk; 2336 2337 if (sk->sk_prot->compat_getsockopt != NULL) 2338 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2339 optval, optlen); 2340 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2341} 2342EXPORT_SYMBOL(compat_sock_common_getsockopt); 2343#endif 2344 2345int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2346 struct msghdr *msg, size_t size, int flags) 2347{ 2348 struct sock *sk = sock->sk; 2349 int addr_len = 0; 2350 int err; 2351 2352 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2353 flags & ~MSG_DONTWAIT, &addr_len); 2354 if (err >= 0) 2355 msg->msg_namelen = addr_len; 2356 return err; 2357} 2358EXPORT_SYMBOL(sock_common_recvmsg); 2359 2360/* 2361 * Set socket options on an inet socket. 2362 */ 2363int sock_common_setsockopt(struct socket *sock, int level, int optname, 2364 char __user *optval, unsigned int optlen) 2365{ 2366 struct sock *sk = sock->sk; 2367 2368 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2369} 2370EXPORT_SYMBOL(sock_common_setsockopt); 2371 2372#ifdef CONFIG_COMPAT 2373int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2374 char __user *optval, unsigned int optlen) 2375{ 2376 struct sock *sk = sock->sk; 2377 2378 if (sk->sk_prot->compat_setsockopt != NULL) 2379 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2380 optval, optlen); 2381 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2382} 2383EXPORT_SYMBOL(compat_sock_common_setsockopt); 2384#endif 2385 2386void sk_common_release(struct sock *sk) 2387{ 2388 if (sk->sk_prot->destroy) 2389 sk->sk_prot->destroy(sk); 2390 2391 /* 2392 * Observation: when sock_common_release is called, processes have 2393 * no access to socket. But net still has. 2394 * Step one, detach it from networking: 2395 * 2396 * A. Remove from hash tables. 2397 */ 2398 2399 sk->sk_prot->unhash(sk); 2400 2401 /* 2402 * In this point socket cannot receive new packets, but it is possible 2403 * that some packets are in flight because some CPU runs receiver and 2404 * did hash table lookup before we unhashed socket. They will achieve 2405 * receive queue and will be purged by socket destructor. 2406 * 2407 * Also we still have packets pending on receive queue and probably, 2408 * our own packets waiting in device queues. sock_destroy will drain 2409 * receive queue, but transmitted packets will delay socket destruction 2410 * until the last reference will be released. 2411 */ 2412 2413 sock_orphan(sk); 2414 2415 xfrm_sk_free_policy(sk); 2416 2417 sk_refcnt_debug_release(sk); 2418 sock_put(sk); 2419} 2420EXPORT_SYMBOL(sk_common_release); 2421 2422#ifdef CONFIG_PROC_FS 2423#define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2424struct prot_inuse { 2425 int val[PROTO_INUSE_NR]; 2426}; 2427 2428static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2429 2430#ifdef CONFIG_NET_NS 2431void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2432{ 2433 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2434} 2435EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2436 2437int sock_prot_inuse_get(struct net *net, struct proto *prot) 2438{ 2439 int cpu, idx = prot->inuse_idx; 2440 int res = 0; 2441 2442 for_each_possible_cpu(cpu) 2443 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2444 2445 return res >= 0 ? res : 0; 2446} 2447EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2448 2449static int __net_init sock_inuse_init_net(struct net *net) 2450{ 2451 net->core.inuse = alloc_percpu(struct prot_inuse); 2452 return net->core.inuse ? 0 : -ENOMEM; 2453} 2454 2455static void __net_exit sock_inuse_exit_net(struct net *net) 2456{ 2457 free_percpu(net->core.inuse); 2458} 2459 2460static struct pernet_operations net_inuse_ops = { 2461 .init = sock_inuse_init_net, 2462 .exit = sock_inuse_exit_net, 2463}; 2464 2465static __init int net_inuse_init(void) 2466{ 2467 if (register_pernet_subsys(&net_inuse_ops)) 2468 panic("Cannot initialize net inuse counters"); 2469 2470 return 0; 2471} 2472 2473core_initcall(net_inuse_init); 2474#else 2475static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2476 2477void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2478{ 2479 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2480} 2481EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2482 2483int sock_prot_inuse_get(struct net *net, struct proto *prot) 2484{ 2485 int cpu, idx = prot->inuse_idx; 2486 int res = 0; 2487 2488 for_each_possible_cpu(cpu) 2489 res += per_cpu(prot_inuse, cpu).val[idx]; 2490 2491 return res >= 0 ? res : 0; 2492} 2493EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2494#endif 2495 2496static void assign_proto_idx(struct proto *prot) 2497{ 2498 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2499 2500 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2501 pr_err("PROTO_INUSE_NR exhausted\n"); 2502 return; 2503 } 2504 2505 set_bit(prot->inuse_idx, proto_inuse_idx); 2506} 2507 2508static void release_proto_idx(struct proto *prot) 2509{ 2510 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2511 clear_bit(prot->inuse_idx, proto_inuse_idx); 2512} 2513#else 2514static inline void assign_proto_idx(struct proto *prot) 2515{ 2516} 2517 2518static inline void release_proto_idx(struct proto *prot) 2519{ 2520} 2521#endif 2522 2523int proto_register(struct proto *prot, int alloc_slab) 2524{ 2525 if (alloc_slab) { 2526 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2527 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2528 NULL); 2529 2530 if (prot->slab == NULL) { 2531 pr_crit("%s: Can't create sock SLAB cache!\n", 2532 prot->name); 2533 goto out; 2534 } 2535 2536 if (prot->rsk_prot != NULL) { 2537 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2538 if (prot->rsk_prot->slab_name == NULL) 2539 goto out_free_sock_slab; 2540 2541 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2542 prot->rsk_prot->obj_size, 0, 2543 SLAB_HWCACHE_ALIGN, NULL); 2544 2545 if (prot->rsk_prot->slab == NULL) { 2546 pr_crit("%s: Can't create request sock SLAB cache!\n", 2547 prot->name); 2548 goto out_free_request_sock_slab_name; 2549 } 2550 } 2551 2552 if (prot->twsk_prot != NULL) { 2553 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2554 2555 if (prot->twsk_prot->twsk_slab_name == NULL) 2556 goto out_free_request_sock_slab; 2557 2558 prot->twsk_prot->twsk_slab = 2559 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2560 prot->twsk_prot->twsk_obj_size, 2561 0, 2562 SLAB_HWCACHE_ALIGN | 2563 prot->slab_flags, 2564 NULL); 2565 if (prot->twsk_prot->twsk_slab == NULL) 2566 goto out_free_timewait_sock_slab_name; 2567 } 2568 } 2569 2570 mutex_lock(&proto_list_mutex); 2571 list_add(&prot->node, &proto_list); 2572 assign_proto_idx(prot); 2573 mutex_unlock(&proto_list_mutex); 2574 return 0; 2575 2576out_free_timewait_sock_slab_name: 2577 kfree(prot->twsk_prot->twsk_slab_name); 2578out_free_request_sock_slab: 2579 if (prot->rsk_prot && prot->rsk_prot->slab) { 2580 kmem_cache_destroy(prot->rsk_prot->slab); 2581 prot->rsk_prot->slab = NULL; 2582 } 2583out_free_request_sock_slab_name: 2584 if (prot->rsk_prot) 2585 kfree(prot->rsk_prot->slab_name); 2586out_free_sock_slab: 2587 kmem_cache_destroy(prot->slab); 2588 prot->slab = NULL; 2589out: 2590 return -ENOBUFS; 2591} 2592EXPORT_SYMBOL(proto_register); 2593 2594void proto_unregister(struct proto *prot) 2595{ 2596 mutex_lock(&proto_list_mutex); 2597 release_proto_idx(prot); 2598 list_del(&prot->node); 2599 mutex_unlock(&proto_list_mutex); 2600 2601 if (prot->slab != NULL) { 2602 kmem_cache_destroy(prot->slab); 2603 prot->slab = NULL; 2604 } 2605 2606 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2607 kmem_cache_destroy(prot->rsk_prot->slab); 2608 kfree(prot->rsk_prot->slab_name); 2609 prot->rsk_prot->slab = NULL; 2610 } 2611 2612 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2613 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2614 kfree(prot->twsk_prot->twsk_slab_name); 2615 prot->twsk_prot->twsk_slab = NULL; 2616 } 2617} 2618EXPORT_SYMBOL(proto_unregister); 2619 2620#ifdef CONFIG_PROC_FS 2621static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2622 __acquires(proto_list_mutex) 2623{ 2624 mutex_lock(&proto_list_mutex); 2625 return seq_list_start_head(&proto_list, *pos); 2626} 2627 2628static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2629{ 2630 return seq_list_next(v, &proto_list, pos); 2631} 2632 2633static void proto_seq_stop(struct seq_file *seq, void *v) 2634 __releases(proto_list_mutex) 2635{ 2636 mutex_unlock(&proto_list_mutex); 2637} 2638 2639static char proto_method_implemented(const void *method) 2640{ 2641 return method == NULL ? 'n' : 'y'; 2642} 2643static long sock_prot_memory_allocated(struct proto *proto) 2644{ 2645 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 2646} 2647 2648static char *sock_prot_memory_pressure(struct proto *proto) 2649{ 2650 return proto->memory_pressure != NULL ? 2651 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2652} 2653 2654static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2655{ 2656 2657 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2658 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2659 proto->name, 2660 proto->obj_size, 2661 sock_prot_inuse_get(seq_file_net(seq), proto), 2662 sock_prot_memory_allocated(proto), 2663 sock_prot_memory_pressure(proto), 2664 proto->max_header, 2665 proto->slab == NULL ? "no" : "yes", 2666 module_name(proto->owner), 2667 proto_method_implemented(proto->close), 2668 proto_method_implemented(proto->connect), 2669 proto_method_implemented(proto->disconnect), 2670 proto_method_implemented(proto->accept), 2671 proto_method_implemented(proto->ioctl), 2672 proto_method_implemented(proto->init), 2673 proto_method_implemented(proto->destroy), 2674 proto_method_implemented(proto->shutdown), 2675 proto_method_implemented(proto->setsockopt), 2676 proto_method_implemented(proto->getsockopt), 2677 proto_method_implemented(proto->sendmsg), 2678 proto_method_implemented(proto->recvmsg), 2679 proto_method_implemented(proto->sendpage), 2680 proto_method_implemented(proto->bind), 2681 proto_method_implemented(proto->backlog_rcv), 2682 proto_method_implemented(proto->hash), 2683 proto_method_implemented(proto->unhash), 2684 proto_method_implemented(proto->get_port), 2685 proto_method_implemented(proto->enter_memory_pressure)); 2686} 2687 2688static int proto_seq_show(struct seq_file *seq, void *v) 2689{ 2690 if (v == &proto_list) 2691 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2692 "protocol", 2693 "size", 2694 "sockets", 2695 "memory", 2696 "press", 2697 "maxhdr", 2698 "slab", 2699 "module", 2700 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2701 else 2702 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2703 return 0; 2704} 2705 2706static const struct seq_operations proto_seq_ops = { 2707 .start = proto_seq_start, 2708 .next = proto_seq_next, 2709 .stop = proto_seq_stop, 2710 .show = proto_seq_show, 2711}; 2712 2713static int proto_seq_open(struct inode *inode, struct file *file) 2714{ 2715 return seq_open_net(inode, file, &proto_seq_ops, 2716 sizeof(struct seq_net_private)); 2717} 2718 2719static const struct file_operations proto_seq_fops = { 2720 .owner = THIS_MODULE, 2721 .open = proto_seq_open, 2722 .read = seq_read, 2723 .llseek = seq_lseek, 2724 .release = seq_release_net, 2725}; 2726 2727static __net_init int proto_init_net(struct net *net) 2728{ 2729 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2730 return -ENOMEM; 2731 2732 return 0; 2733} 2734 2735static __net_exit void proto_exit_net(struct net *net) 2736{ 2737 proc_net_remove(net, "protocols"); 2738} 2739 2740 2741static __net_initdata struct pernet_operations proto_net_ops = { 2742 .init = proto_init_net, 2743 .exit = proto_exit_net, 2744}; 2745 2746static int __init proto_init(void) 2747{ 2748 return register_pernet_subsys(&proto_net_ops); 2749} 2750 2751subsys_initcall(proto_init); 2752 2753#endif /* PROC_FS */ 2754