1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 * 20 * Fixes: 21 * Alan Cox : Numerous verify_area() calls 22 * Alan Cox : Set the ACK bit on a reset 23 * Alan Cox : Stopped it crashing if it closed while 24 * sk->inuse=1 and was trying to connect 25 * (tcp_err()). 26 * Alan Cox : All icmp error handling was broken 27 * pointers passed where wrong and the 28 * socket was looked up backwards. Nobody 29 * tested any icmp error code obviously. 30 * Alan Cox : tcp_err() now handled properly. It 31 * wakes people on errors. poll 32 * behaves and the icmp error race 33 * has gone by moving it into sock.c 34 * Alan Cox : tcp_send_reset() fixed to work for 35 * everything not just packets for 36 * unknown sockets. 37 * Alan Cox : tcp option processing. 38 * Alan Cox : Reset tweaked (still not 100%) [Had 39 * syn rule wrong] 40 * Herp Rosmanith : More reset fixes 41 * Alan Cox : No longer acks invalid rst frames. 42 * Acking any kind of RST is right out. 43 * Alan Cox : Sets an ignore me flag on an rst 44 * receive otherwise odd bits of prattle 45 * escape still 46 * Alan Cox : Fixed another acking RST frame bug. 47 * Should stop LAN workplace lockups. 48 * Alan Cox : Some tidyups using the new skb list 49 * facilities 50 * Alan Cox : sk->keepopen now seems to work 51 * Alan Cox : Pulls options out correctly on accepts 52 * Alan Cox : Fixed assorted sk->rqueue->next errors 53 * Alan Cox : PSH doesn't end a TCP read. Switched a 54 * bit to skb ops. 55 * Alan Cox : Tidied tcp_data to avoid a potential 56 * nasty. 57 * Alan Cox : Added some better commenting, as the 58 * tcp is hard to follow 59 * Alan Cox : Removed incorrect check for 20 * psh 60 * Michael O'Reilly : ack < copied bug fix. 61 * Johannes Stille : Misc tcp fixes (not all in yet). 62 * Alan Cox : FIN with no memory -> CRASH 63 * Alan Cox : Added socket option proto entries. 64 * Also added awareness of them to accept. 65 * Alan Cox : Added TCP options (SOL_TCP) 66 * Alan Cox : Switched wakeup calls to callbacks, 67 * so the kernel can layer network 68 * sockets. 69 * Alan Cox : Use ip_tos/ip_ttl settings. 70 * Alan Cox : Handle FIN (more) properly (we hope). 71 * Alan Cox : RST frames sent on unsynchronised 72 * state ack error. 73 * Alan Cox : Put in missing check for SYN bit. 74 * Alan Cox : Added tcp_select_window() aka NET2E 75 * window non shrink trick. 76 * Alan Cox : Added a couple of small NET2E timer 77 * fixes 78 * Charles Hedrick : TCP fixes 79 * Toomas Tamm : TCP window fixes 80 * Alan Cox : Small URG fix to rlogin ^C ack fight 81 * Charles Hedrick : Rewrote most of it to actually work 82 * Linus : Rewrote tcp_read() and URG handling 83 * completely 84 * Gerhard Koerting: Fixed some missing timer handling 85 * Matthew Dillon : Reworked TCP machine states as per RFC 86 * Gerhard Koerting: PC/TCP workarounds 87 * Adam Caldwell : Assorted timer/timing errors 88 * Matthew Dillon : Fixed another RST bug 89 * Alan Cox : Move to kernel side addressing changes. 90 * Alan Cox : Beginning work on TCP fastpathing 91 * (not yet usable) 92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 93 * Alan Cox : TCP fast path debugging 94 * Alan Cox : Window clamping 95 * Michael Riepe : Bug in tcp_check() 96 * Matt Dillon : More TCP improvements and RST bug fixes 97 * Matt Dillon : Yet more small nasties remove from the 98 * TCP code (Be very nice to this man if 99 * tcp finally works 100%) 8) 100 * Alan Cox : BSD accept semantics. 101 * Alan Cox : Reset on closedown bug. 102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 103 * Michael Pall : Handle poll() after URG properly in 104 * all cases. 105 * Michael Pall : Undo the last fix in tcp_read_urg() 106 * (multi URG PUSH broke rlogin). 107 * Michael Pall : Fix the multi URG PUSH problem in 108 * tcp_readable(), poll() after URG 109 * works now. 110 * Michael Pall : recv(...,MSG_OOB) never blocks in the 111 * BSD api. 112 * Alan Cox : Changed the semantics of sk->socket to 113 * fix a race and a signal problem with 114 * accept() and async I/O. 115 * Alan Cox : Relaxed the rules on tcp_sendto(). 116 * Yury Shevchuk : Really fixed accept() blocking problem. 117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 118 * clients/servers which listen in on 119 * fixed ports. 120 * Alan Cox : Cleaned the above up and shrank it to 121 * a sensible code size. 122 * Alan Cox : Self connect lockup fix. 123 * Alan Cox : No connect to multicast. 124 * Ross Biro : Close unaccepted children on master 125 * socket close. 126 * Alan Cox : Reset tracing code. 127 * Alan Cox : Spurious resets on shutdown. 128 * Alan Cox : Giant 15 minute/60 second timer error 129 * Alan Cox : Small whoops in polling before an 130 * accept. 131 * Alan Cox : Kept the state trace facility since 132 * it's handy for debugging. 133 * Alan Cox : More reset handler fixes. 134 * Alan Cox : Started rewriting the code based on 135 * the RFC's for other useful protocol 136 * references see: Comer, KA9Q NOS, and 137 * for a reference on the difference 138 * between specifications and how BSD 139 * works see the 4.4lite source. 140 * A.N.Kuznetsov : Don't time wait on completion of tidy 141 * close. 142 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 143 * Linus Torvalds : Fixed BSD port reuse to work first syn 144 * Alan Cox : Reimplemented timers as per the RFC 145 * and using multiple timers for sanity. 146 * Alan Cox : Small bug fixes, and a lot of new 147 * comments. 148 * Alan Cox : Fixed dual reader crash by locking 149 * the buffers (much like datagram.c) 150 * Alan Cox : Fixed stuck sockets in probe. A probe 151 * now gets fed up of retrying without 152 * (even a no space) answer. 153 * Alan Cox : Extracted closing code better 154 * Alan Cox : Fixed the closing state machine to 155 * resemble the RFC. 156 * Alan Cox : More 'per spec' fixes. 157 * Jorge Cwik : Even faster checksumming. 158 * Alan Cox : tcp_data() doesn't ack illegal PSH 159 * only frames. At least one pc tcp stack 160 * generates them. 161 * Alan Cox : Cache last socket. 162 * Alan Cox : Per route irtt. 163 * Matt Day : poll()->select() match BSD precisely on error 164 * Alan Cox : New buffers 165 * Marc Tamsky : Various sk->prot->retransmits and 166 * sk->retransmits misupdating fixed. 167 * Fixed tcp_write_timeout: stuck close, 168 * and TCP syn retries gets used now. 169 * Mark Yarvis : In tcp_read_wakeup(), don't send an 170 * ack if state is TCP_CLOSED. 171 * Alan Cox : Look up device on a retransmit - routes may 172 * change. Doesn't yet cope with MSS shrink right 173 * but it's a start! 174 * Marc Tamsky : Closing in closing fixes. 175 * Mike Shaver : RFC1122 verifications. 176 * Alan Cox : rcv_saddr errors. 177 * Alan Cox : Block double connect(). 178 * Alan Cox : Small hooks for enSKIP. 179 * Alexey Kuznetsov: Path MTU discovery. 180 * Alan Cox : Support soft errors. 181 * Alan Cox : Fix MTU discovery pathological case 182 * when the remote claims no mtu! 183 * Marc Tamsky : TCP_CLOSE fix. 184 * Colin (G3TNE) : Send a reset on syn ack replies in 185 * window but wrong (fixes NT lpd problems) 186 * Pedro Roque : Better TCP window handling, delayed ack. 187 * Joerg Reuter : No modification of locked buffers in 188 * tcp_do_retransmit() 189 * Eric Schenk : Changed receiver side silly window 190 * avoidance algorithm to BSD style 191 * algorithm. This doubles throughput 192 * against machines running Solaris, 193 * and seems to result in general 194 * improvement. 195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 196 * Willy Konynenberg : Transparent proxying support. 197 * Mike McLagan : Routing by source 198 * Keith Owens : Do proper merging with partial SKB's in 199 * tcp_do_sendmsg to avoid burstiness. 200 * Eric Schenk : Fix fast close down bug with 201 * shutdown() followed by close(). 202 * Andi Kleen : Make poll agree with SIGIO 203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 204 * lingertime == 0 (RFC 793 ABORT Call) 205 * Hirokazu Takahashi : Use copy_from_user() instead of 206 * csum_and_copy_from_user() if possible. 207 * 208 * This program is free software; you can redistribute it and/or 209 * modify it under the terms of the GNU General Public License 210 * as published by the Free Software Foundation; either version 211 * 2 of the License, or(at your option) any later version. 212 * 213 * Description of States: 214 * 215 * TCP_SYN_SENT sent a connection request, waiting for ack 216 * 217 * TCP_SYN_RECV received a connection request, sent ack, 218 * waiting for final ack in three-way handshake. 219 * 220 * TCP_ESTABLISHED connection established 221 * 222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 223 * transmission of remaining buffered data 224 * 225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 226 * to shutdown 227 * 228 * TCP_CLOSING both sides have shutdown but we still have 229 * data we have to finish sending 230 * 231 * TCP_TIME_WAIT timeout to catch resent junk before entering 232 * closed, can only be entered from FIN_WAIT2 233 * or CLOSING. Required because the other end 234 * may not have gotten our last ACK causing it 235 * to retransmit the data packet (which we ignore) 236 * 237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 238 * us to finish writing our data and to shutdown 239 * (we have to close() to move on to LAST_ACK) 240 * 241 * TCP_LAST_ACK out side has shutdown after remote has 242 * shutdown. There may still be data in our 243 * buffer that we have to finish sending 244 * 245 * TCP_CLOSE socket is finished 246 */ 247 248#define pr_fmt(fmt) "TCP: " fmt 249 250#include <linux/kernel.h> 251#include <linux/module.h> 252#include <linux/types.h> 253#include <linux/fcntl.h> 254#include <linux/poll.h> 255#include <linux/init.h> 256#include <linux/fs.h> 257#include <linux/skbuff.h> 258#include <linux/scatterlist.h> 259#include <linux/splice.h> 260#include <linux/net.h> 261#include <linux/socket.h> 262#include <linux/random.h> 263#include <linux/bootmem.h> 264#include <linux/highmem.h> 265#include <linux/swap.h> 266#include <linux/cache.h> 267#include <linux/err.h> 268#include <linux/crypto.h> 269#include <linux/time.h> 270#include <linux/slab.h> 271#include <linux/uid_stat.h> 272 273#include <net/icmp.h> 274#include <net/inet_common.h> 275#include <net/tcp.h> 276#include <net/xfrm.h> 277#include <net/ip.h> 278#include <net/ip6_route.h> 279#include <net/ipv6.h> 280#include <net/transp_v6.h> 281#include <net/netdma.h> 282#include <net/sock.h> 283 284#include <asm/uaccess.h> 285#include <asm/ioctls.h> 286 287int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 288 289struct percpu_counter tcp_orphan_count; 290EXPORT_SYMBOL_GPL(tcp_orphan_count); 291 292int sysctl_tcp_wmem[3] __read_mostly; 293int sysctl_tcp_rmem[3] __read_mostly; 294 295EXPORT_SYMBOL(sysctl_tcp_rmem); 296EXPORT_SYMBOL(sysctl_tcp_wmem); 297 298atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ 299EXPORT_SYMBOL(tcp_memory_allocated); 300 301/* 302 * Current number of TCP sockets. 303 */ 304struct percpu_counter tcp_sockets_allocated; 305EXPORT_SYMBOL(tcp_sockets_allocated); 306 307/* 308 * TCP splice context 309 */ 310struct tcp_splice_state { 311 struct pipe_inode_info *pipe; 312 size_t len; 313 unsigned int flags; 314}; 315 316/* 317 * Pressure flag: try to collapse. 318 * Technical note: it is used by multiple contexts non atomically. 319 * All the __sk_mem_schedule() is of this nature: accounting 320 * is strict, actions are advisory and have some latency. 321 */ 322int tcp_memory_pressure __read_mostly; 323EXPORT_SYMBOL(tcp_memory_pressure); 324 325void tcp_enter_memory_pressure(struct sock *sk) 326{ 327 if (!tcp_memory_pressure) { 328 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 329 tcp_memory_pressure = 1; 330 } 331} 332EXPORT_SYMBOL(tcp_enter_memory_pressure); 333 334/* Convert seconds to retransmits based on initial and max timeout */ 335static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 336{ 337 u8 res = 0; 338 339 if (seconds > 0) { 340 int period = timeout; 341 342 res = 1; 343 while (seconds > period && res < 255) { 344 res++; 345 timeout <<= 1; 346 if (timeout > rto_max) 347 timeout = rto_max; 348 period += timeout; 349 } 350 } 351 return res; 352} 353 354/* Convert retransmits to seconds based on initial and max timeout */ 355static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 356{ 357 int period = 0; 358 359 if (retrans > 0) { 360 period = timeout; 361 while (--retrans) { 362 timeout <<= 1; 363 if (timeout > rto_max) 364 timeout = rto_max; 365 period += timeout; 366 } 367 } 368 return period; 369} 370 371/* Address-family independent initialization for a tcp_sock. 372 * 373 * NOTE: A lot of things set to zero explicitly by call to 374 * sk_alloc() so need not be done here. 375 */ 376void tcp_init_sock(struct sock *sk) 377{ 378 struct inet_connection_sock *icsk = inet_csk(sk); 379 struct tcp_sock *tp = tcp_sk(sk); 380 381 skb_queue_head_init(&tp->out_of_order_queue); 382 tcp_init_xmit_timers(sk); 383 tcp_prequeue_init(tp); 384 INIT_LIST_HEAD(&tp->tsq_node); 385 386 icsk->icsk_rto = TCP_TIMEOUT_INIT; 387 tp->mdev = TCP_TIMEOUT_INIT; 388 389 /* So many TCP implementations out there (incorrectly) count the 390 * initial SYN frame in their delayed-ACK and congestion control 391 * algorithms that we must have the following bandaid to talk 392 * efficiently to them. -DaveM 393 */ 394 tp->snd_cwnd = TCP_INIT_CWND; 395 396 /* See draft-stevens-tcpca-spec-01 for discussion of the 397 * initialization of these values. 398 */ 399 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 400 tp->snd_cwnd_clamp = ~0; 401 tp->mss_cache = TCP_MSS_DEFAULT; 402 403 tp->reordering = sysctl_tcp_reordering; 404 tcp_enable_early_retrans(tp); 405 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 406 407 tp->tsoffset = 0; 408 409 sk->sk_state = TCP_CLOSE; 410 411 sk->sk_write_space = sk_stream_write_space; 412 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 413 414 icsk->icsk_sync_mss = tcp_sync_mss; 415 416 /* Presumed zeroed, in order of appearance: 417 * cookie_in_always, cookie_out_never, 418 * s_data_constant, s_data_in, s_data_out 419 */ 420 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 421 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 422 423 local_bh_disable(); 424 sock_update_memcg(sk); 425 sk_sockets_allocated_inc(sk); 426 local_bh_enable(); 427} 428EXPORT_SYMBOL(tcp_init_sock); 429 430/* 431 * Wait for a TCP event. 432 * 433 * Note that we don't need to lock the socket, as the upper poll layers 434 * take care of normal races (between the test and the event) and we don't 435 * go look at any of the socket buffers directly. 436 */ 437unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 438{ 439 unsigned int mask; 440 struct sock *sk = sock->sk; 441 const struct tcp_sock *tp = tcp_sk(sk); 442 443 sock_poll_wait(file, sk_sleep(sk), wait); 444 if (sk->sk_state == TCP_LISTEN) 445 return inet_csk_listen_poll(sk); 446 447 /* Socket is not locked. We are protected from async events 448 * by poll logic and correct handling of state changes 449 * made by other threads is impossible in any case. 450 */ 451 452 mask = 0; 453 454 /* 455 * POLLHUP is certainly not done right. But poll() doesn't 456 * have a notion of HUP in just one direction, and for a 457 * socket the read side is more interesting. 458 * 459 * Some poll() documentation says that POLLHUP is incompatible 460 * with the POLLOUT/POLLWR flags, so somebody should check this 461 * all. But careful, it tends to be safer to return too many 462 * bits than too few, and you can easily break real applications 463 * if you don't tell them that something has hung up! 464 * 465 * Check-me. 466 * 467 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 468 * our fs/select.c). It means that after we received EOF, 469 * poll always returns immediately, making impossible poll() on write() 470 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 471 * if and only if shutdown has been made in both directions. 472 * Actually, it is interesting to look how Solaris and DUX 473 * solve this dilemma. I would prefer, if POLLHUP were maskable, 474 * then we could set it on SND_SHUTDOWN. BTW examples given 475 * in Stevens' books assume exactly this behaviour, it explains 476 * why POLLHUP is incompatible with POLLOUT. --ANK 477 * 478 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 479 * blocking on fresh not-connected or disconnected socket. --ANK 480 */ 481 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 482 mask |= POLLHUP; 483 if (sk->sk_shutdown & RCV_SHUTDOWN) 484 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 485 486 /* Connected or passive Fast Open socket? */ 487 if (sk->sk_state != TCP_SYN_SENT && 488 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { 489 int target = sock_rcvlowat(sk, 0, INT_MAX); 490 491 if (tp->urg_seq == tp->copied_seq && 492 !sock_flag(sk, SOCK_URGINLINE) && 493 tp->urg_data) 494 target++; 495 496 /* Potential race condition. If read of tp below will 497 * escape above sk->sk_state, we can be illegally awaken 498 * in SYN_* states. */ 499 if (tp->rcv_nxt - tp->copied_seq >= target) 500 mask |= POLLIN | POLLRDNORM; 501 502 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 503 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 504 mask |= POLLOUT | POLLWRNORM; 505 } else { /* send SIGIO later */ 506 set_bit(SOCK_ASYNC_NOSPACE, 507 &sk->sk_socket->flags); 508 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 509 510 /* Race breaker. If space is freed after 511 * wspace test but before the flags are set, 512 * IO signal will be lost. 513 */ 514 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 515 mask |= POLLOUT | POLLWRNORM; 516 } 517 } else 518 mask |= POLLOUT | POLLWRNORM; 519 520 if (tp->urg_data & TCP_URG_VALID) 521 mask |= POLLPRI; 522 } 523 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 524 smp_rmb(); 525 if (sk->sk_err) 526 mask |= POLLERR; 527 528 return mask; 529} 530EXPORT_SYMBOL(tcp_poll); 531 532int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 533{ 534 struct tcp_sock *tp = tcp_sk(sk); 535 int answ; 536 bool slow; 537 538 switch (cmd) { 539 case SIOCINQ: 540 if (sk->sk_state == TCP_LISTEN) 541 return -EINVAL; 542 543 slow = lock_sock_fast(sk); 544 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 545 answ = 0; 546 else if (sock_flag(sk, SOCK_URGINLINE) || 547 !tp->urg_data || 548 before(tp->urg_seq, tp->copied_seq) || 549 !before(tp->urg_seq, tp->rcv_nxt)) { 550 551 answ = tp->rcv_nxt - tp->copied_seq; 552 553 /* Subtract 1, if FIN was received */ 554 if (answ && sock_flag(sk, SOCK_DONE)) 555 answ--; 556 } else 557 answ = tp->urg_seq - tp->copied_seq; 558 unlock_sock_fast(sk, slow); 559 break; 560 case SIOCATMARK: 561 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 562 break; 563 case SIOCOUTQ: 564 if (sk->sk_state == TCP_LISTEN) 565 return -EINVAL; 566 567 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 568 answ = 0; 569 else 570 answ = tp->write_seq - tp->snd_una; 571 break; 572 case SIOCOUTQNSD: 573 if (sk->sk_state == TCP_LISTEN) 574 return -EINVAL; 575 576 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 577 answ = 0; 578 else 579 answ = tp->write_seq - tp->snd_nxt; 580 break; 581 default: 582 return -ENOIOCTLCMD; 583 } 584 585 return put_user(answ, (int __user *)arg); 586} 587EXPORT_SYMBOL(tcp_ioctl); 588 589static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 590{ 591 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 592 tp->pushed_seq = tp->write_seq; 593} 594 595static inline bool forced_push(const struct tcp_sock *tp) 596{ 597 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 598} 599 600static inline void skb_entail(struct sock *sk, struct sk_buff *skb) 601{ 602 struct tcp_sock *tp = tcp_sk(sk); 603 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 604 605 skb->csum = 0; 606 tcb->seq = tcb->end_seq = tp->write_seq; 607 tcb->tcp_flags = TCPHDR_ACK; 608 tcb->sacked = 0; 609 skb_header_release(skb); 610 tcp_add_write_queue_tail(sk, skb); 611 sk->sk_wmem_queued += skb->truesize; 612 sk_mem_charge(sk, skb->truesize); 613 if (tp->nonagle & TCP_NAGLE_PUSH) 614 tp->nonagle &= ~TCP_NAGLE_PUSH; 615} 616 617static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 618{ 619 if (flags & MSG_OOB) 620 tp->snd_up = tp->write_seq; 621} 622 623static inline void tcp_push(struct sock *sk, int flags, int mss_now, 624 int nonagle) 625{ 626 if (tcp_send_head(sk)) { 627 struct tcp_sock *tp = tcp_sk(sk); 628 629 if (!(flags & MSG_MORE) || forced_push(tp)) 630 tcp_mark_push(tp, tcp_write_queue_tail(sk)); 631 632 tcp_mark_urg(tp, flags); 633 __tcp_push_pending_frames(sk, mss_now, 634 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 635 } 636} 637 638static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 639 unsigned int offset, size_t len) 640{ 641 struct tcp_splice_state *tss = rd_desc->arg.data; 642 int ret; 643 644 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), 645 tss->flags); 646 if (ret > 0) 647 rd_desc->count -= ret; 648 return ret; 649} 650 651static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 652{ 653 /* Store TCP splice context information in read_descriptor_t. */ 654 read_descriptor_t rd_desc = { 655 .arg.data = tss, 656 .count = tss->len, 657 }; 658 659 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 660} 661 662/** 663 * tcp_splice_read - splice data from TCP socket to a pipe 664 * @sock: socket to splice from 665 * @ppos: position (not valid) 666 * @pipe: pipe to splice to 667 * @len: number of bytes to splice 668 * @flags: splice modifier flags 669 * 670 * Description: 671 * Will read pages from given socket and fill them into a pipe. 672 * 673 **/ 674ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 675 struct pipe_inode_info *pipe, size_t len, 676 unsigned int flags) 677{ 678 struct sock *sk = sock->sk; 679 struct tcp_splice_state tss = { 680 .pipe = pipe, 681 .len = len, 682 .flags = flags, 683 }; 684 long timeo; 685 ssize_t spliced; 686 int ret; 687 688 sock_rps_record_flow(sk); 689 /* 690 * We can't seek on a socket input 691 */ 692 if (unlikely(*ppos)) 693 return -ESPIPE; 694 695 ret = spliced = 0; 696 697 lock_sock(sk); 698 699 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 700 while (tss.len) { 701 ret = __tcp_splice_read(sk, &tss); 702 if (ret < 0) 703 break; 704 else if (!ret) { 705 if (spliced) 706 break; 707 if (sock_flag(sk, SOCK_DONE)) 708 break; 709 if (sk->sk_err) { 710 ret = sock_error(sk); 711 break; 712 } 713 if (sk->sk_shutdown & RCV_SHUTDOWN) 714 break; 715 if (sk->sk_state == TCP_CLOSE) { 716 /* 717 * This occurs when user tries to read 718 * from never connected socket. 719 */ 720 if (!sock_flag(sk, SOCK_DONE)) 721 ret = -ENOTCONN; 722 break; 723 } 724 if (!timeo) { 725 ret = -EAGAIN; 726 break; 727 } 728 sk_wait_data(sk, &timeo); 729 if (signal_pending(current)) { 730 ret = sock_intr_errno(timeo); 731 break; 732 } 733 continue; 734 } 735 tss.len -= ret; 736 spliced += ret; 737 738 if (!timeo) 739 break; 740 release_sock(sk); 741 lock_sock(sk); 742 743 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 744 (sk->sk_shutdown & RCV_SHUTDOWN) || 745 signal_pending(current)) 746 break; 747 } 748 749 release_sock(sk); 750 751 if (spliced) 752 return spliced; 753 754 return ret; 755} 756EXPORT_SYMBOL(tcp_splice_read); 757 758struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) 759{ 760 struct sk_buff *skb; 761 762 /* The TCP header must be at least 32-bit aligned. */ 763 size = ALIGN(size, 4); 764 765 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 766 if (skb) { 767 if (sk_wmem_schedule(sk, skb->truesize)) { 768 skb_reserve(skb, sk->sk_prot->max_header); 769 /* 770 * Make sure that we have exactly size bytes 771 * available to the caller, no more, no less. 772 */ 773 skb->reserved_tailroom = skb->end - skb->tail - size; 774 return skb; 775 } 776 __kfree_skb(skb); 777 } else { 778 sk->sk_prot->enter_memory_pressure(sk); 779 sk_stream_moderate_sndbuf(sk); 780 } 781 return NULL; 782} 783 784static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 785 int large_allowed) 786{ 787 struct tcp_sock *tp = tcp_sk(sk); 788 u32 xmit_size_goal, old_size_goal; 789 790 xmit_size_goal = mss_now; 791 792 if (large_allowed && sk_can_gso(sk)) { 793 xmit_size_goal = ((sk->sk_gso_max_size - 1) - 794 inet_csk(sk)->icsk_af_ops->net_header_len - 795 inet_csk(sk)->icsk_ext_hdr_len - 796 tp->tcp_header_len); 797 798 /* TSQ : try to have two TSO segments in flight */ 799 xmit_size_goal = min_t(u32, xmit_size_goal, 800 sysctl_tcp_limit_output_bytes >> 1); 801 802 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); 803 804 /* We try hard to avoid divides here */ 805 old_size_goal = tp->xmit_size_goal_segs * mss_now; 806 807 if (likely(old_size_goal <= xmit_size_goal && 808 old_size_goal + mss_now > xmit_size_goal)) { 809 xmit_size_goal = old_size_goal; 810 } else { 811 tp->xmit_size_goal_segs = 812 min_t(u16, xmit_size_goal / mss_now, 813 sk->sk_gso_max_segs); 814 xmit_size_goal = tp->xmit_size_goal_segs * mss_now; 815 } 816 } 817 818 return max(xmit_size_goal, mss_now); 819} 820 821static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 822{ 823 int mss_now; 824 825 mss_now = tcp_current_mss(sk); 826 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 827 828 return mss_now; 829} 830 831static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 832 size_t size, int flags) 833{ 834 struct tcp_sock *tp = tcp_sk(sk); 835 int mss_now, size_goal; 836 int err; 837 ssize_t copied; 838 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 839 840 /* Wait for a connection to finish. One exception is TCP Fast Open 841 * (passive side) where data is allowed to be sent before a connection 842 * is fully established. 843 */ 844 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 845 !tcp_passive_fastopen(sk)) { 846 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 847 goto out_err; 848 } 849 850 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 851 852 mss_now = tcp_send_mss(sk, &size_goal, flags); 853 copied = 0; 854 855 err = -EPIPE; 856 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 857 goto out_err; 858 859 while (size > 0) { 860 struct sk_buff *skb = tcp_write_queue_tail(sk); 861 int copy, i; 862 bool can_coalesce; 863 864 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 865new_segment: 866 if (!sk_stream_memory_free(sk)) 867 goto wait_for_sndbuf; 868 869 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); 870 if (!skb) 871 goto wait_for_memory; 872 873 skb_entail(sk, skb); 874 copy = size_goal; 875 } 876 877 if (copy > size) 878 copy = size; 879 880 i = skb_shinfo(skb)->nr_frags; 881 can_coalesce = skb_can_coalesce(skb, i, page, offset); 882 if (!can_coalesce && i >= MAX_SKB_FRAGS) { 883 tcp_mark_push(tp, skb); 884 goto new_segment; 885 } 886 if (!sk_wmem_schedule(sk, copy)) 887 goto wait_for_memory; 888 889 if (can_coalesce) { 890 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 891 } else { 892 get_page(page); 893 skb_fill_page_desc(skb, i, page, offset, copy); 894 } 895 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 896 897 skb->len += copy; 898 skb->data_len += copy; 899 skb->truesize += copy; 900 sk->sk_wmem_queued += copy; 901 sk_mem_charge(sk, copy); 902 skb->ip_summed = CHECKSUM_PARTIAL; 903 tp->write_seq += copy; 904 TCP_SKB_CB(skb)->end_seq += copy; 905 skb_shinfo(skb)->gso_segs = 0; 906 907 if (!copied) 908 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 909 910 copied += copy; 911 offset += copy; 912 if (!(size -= copy)) 913 goto out; 914 915 if (skb->len < size_goal || (flags & MSG_OOB)) 916 continue; 917 918 if (forced_push(tp)) { 919 tcp_mark_push(tp, skb); 920 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 921 } else if (skb == tcp_send_head(sk)) 922 tcp_push_one(sk, mss_now); 923 continue; 924 925wait_for_sndbuf: 926 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 927wait_for_memory: 928 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 929 930 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 931 goto do_error; 932 933 mss_now = tcp_send_mss(sk, &size_goal, flags); 934 } 935 936out: 937 if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) 938 tcp_push(sk, flags, mss_now, tp->nonagle); 939 return copied; 940 941do_error: 942 if (copied) 943 goto out; 944out_err: 945 return sk_stream_error(sk, flags, err); 946} 947 948int tcp_sendpage(struct sock *sk, struct page *page, int offset, 949 size_t size, int flags) 950{ 951 ssize_t res; 952 953 if (!(sk->sk_route_caps & NETIF_F_SG) || 954 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 955 return sock_no_sendpage(sk->sk_socket, page, offset, size, 956 flags); 957 958 lock_sock(sk); 959 res = do_tcp_sendpages(sk, page, offset, size, flags); 960 release_sock(sk); 961 return res; 962} 963EXPORT_SYMBOL(tcp_sendpage); 964 965static inline int select_size(const struct sock *sk, bool sg) 966{ 967 const struct tcp_sock *tp = tcp_sk(sk); 968 int tmp = tp->mss_cache; 969 970 if (sg) { 971 if (sk_can_gso(sk)) { 972 /* Small frames wont use a full page: 973 * Payload will immediately follow tcp header. 974 */ 975 tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); 976 } else { 977 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); 978 979 if (tmp >= pgbreak && 980 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) 981 tmp = pgbreak; 982 } 983 } 984 985 return tmp; 986} 987 988void tcp_free_fastopen_req(struct tcp_sock *tp) 989{ 990 if (tp->fastopen_req != NULL) { 991 kfree(tp->fastopen_req); 992 tp->fastopen_req = NULL; 993 } 994} 995 996static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) 997{ 998 struct tcp_sock *tp = tcp_sk(sk); 999 int err, flags; 1000 1001 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1002 return -EOPNOTSUPP; 1003 if (tp->fastopen_req != NULL) 1004 return -EALREADY; /* Another Fast Open is in progress */ 1005 1006 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1007 sk->sk_allocation); 1008 if (unlikely(tp->fastopen_req == NULL)) 1009 return -ENOBUFS; 1010 tp->fastopen_req->data = msg; 1011 1012 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1013 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1014 msg->msg_namelen, flags); 1015 *size = tp->fastopen_req->copied; 1016 tcp_free_fastopen_req(tp); 1017 return err; 1018} 1019 1020int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1021 size_t size) 1022{ 1023 struct iovec *iov; 1024 struct tcp_sock *tp = tcp_sk(sk); 1025 struct sk_buff *skb; 1026 int iovlen, flags, err, copied = 0; 1027 int mss_now = 0, size_goal, copied_syn = 0, offset = 0; 1028 bool sg; 1029 long timeo; 1030 1031 lock_sock(sk); 1032 1033 flags = msg->msg_flags; 1034 if (flags & MSG_FASTOPEN) { 1035 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); 1036 if (err == -EINPROGRESS && copied_syn > 0) 1037 goto out; 1038 else if (err) 1039 goto out_err; 1040 offset = copied_syn; 1041 } 1042 1043 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1044 1045 /* Wait for a connection to finish. One exception is TCP Fast Open 1046 * (passive side) where data is allowed to be sent before a connection 1047 * is fully established. 1048 */ 1049 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1050 !tcp_passive_fastopen(sk)) { 1051 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1052 goto do_error; 1053 } 1054 1055 if (unlikely(tp->repair)) { 1056 if (tp->repair_queue == TCP_RECV_QUEUE) { 1057 copied = tcp_send_rcvq(sk, msg, size); 1058 goto out; 1059 } 1060 1061 err = -EINVAL; 1062 if (tp->repair_queue == TCP_NO_QUEUE) 1063 goto out_err; 1064 1065 /* 'common' sending to sendq */ 1066 } 1067 1068 /* This should be in poll */ 1069 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1070 1071 mss_now = tcp_send_mss(sk, &size_goal, flags); 1072 1073 /* Ok commence sending. */ 1074 iovlen = msg->msg_iovlen; 1075 iov = msg->msg_iov; 1076 copied = 0; 1077 1078 err = -EPIPE; 1079 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1080 goto out_err; 1081 1082 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1083 1084 while (--iovlen >= 0) { 1085 size_t seglen = iov->iov_len; 1086 unsigned char __user *from = iov->iov_base; 1087 1088 iov++; 1089 if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */ 1090 if (offset >= seglen) { 1091 offset -= seglen; 1092 continue; 1093 } 1094 seglen -= offset; 1095 from += offset; 1096 offset = 0; 1097 } 1098 1099 while (seglen > 0) { 1100 int copy = 0; 1101 int max = size_goal; 1102 1103 skb = tcp_write_queue_tail(sk); 1104 if (tcp_send_head(sk)) { 1105 if (skb->ip_summed == CHECKSUM_NONE) 1106 max = mss_now; 1107 copy = max - skb->len; 1108 } 1109 1110 if (copy <= 0) { 1111new_segment: 1112 /* Allocate new segment. If the interface is SG, 1113 * allocate skb fitting to single page. 1114 */ 1115 if (!sk_stream_memory_free(sk)) 1116 goto wait_for_sndbuf; 1117 1118 skb = sk_stream_alloc_skb(sk, 1119 select_size(sk, sg), 1120 sk->sk_allocation); 1121 if (!skb) 1122 goto wait_for_memory; 1123 1124 /* 1125 * Check whether we can use HW checksum. 1126 */ 1127 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1128 skb->ip_summed = CHECKSUM_PARTIAL; 1129 1130 skb_entail(sk, skb); 1131 copy = size_goal; 1132 max = size_goal; 1133 } 1134 1135 /* Try to append data to the end of skb. */ 1136 if (copy > seglen) 1137 copy = seglen; 1138 1139 /* Where to copy to? */ 1140 if (skb_availroom(skb) > 0) { 1141 /* We have some space in skb head. Superb! */ 1142 copy = min_t(int, copy, skb_availroom(skb)); 1143 err = skb_add_data_nocache(sk, skb, from, copy); 1144 if (err) 1145 goto do_fault; 1146 } else { 1147 bool merge = true; 1148 int i = skb_shinfo(skb)->nr_frags; 1149 struct page_frag *pfrag = sk_page_frag(sk); 1150 1151 if (!sk_page_frag_refill(sk, pfrag)) 1152 goto wait_for_memory; 1153 1154 if (!skb_can_coalesce(skb, i, pfrag->page, 1155 pfrag->offset)) { 1156 if (i == MAX_SKB_FRAGS || !sg) { 1157 tcp_mark_push(tp, skb); 1158 goto new_segment; 1159 } 1160 merge = false; 1161 } 1162 1163 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1164 1165 if (!sk_wmem_schedule(sk, copy)) 1166 goto wait_for_memory; 1167 1168 err = skb_copy_to_page_nocache(sk, from, skb, 1169 pfrag->page, 1170 pfrag->offset, 1171 copy); 1172 if (err) 1173 goto do_error; 1174 1175 /* Update the skb. */ 1176 if (merge) { 1177 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1178 } else { 1179 skb_fill_page_desc(skb, i, pfrag->page, 1180 pfrag->offset, copy); 1181 get_page(pfrag->page); 1182 } 1183 pfrag->offset += copy; 1184 } 1185 1186 if (!copied) 1187 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1188 1189 tp->write_seq += copy; 1190 TCP_SKB_CB(skb)->end_seq += copy; 1191 skb_shinfo(skb)->gso_segs = 0; 1192 1193 from += copy; 1194 copied += copy; 1195 if ((seglen -= copy) == 0 && iovlen == 0) 1196 goto out; 1197 1198 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) 1199 continue; 1200 1201 if (forced_push(tp)) { 1202 tcp_mark_push(tp, skb); 1203 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1204 } else if (skb == tcp_send_head(sk)) 1205 tcp_push_one(sk, mss_now); 1206 continue; 1207 1208wait_for_sndbuf: 1209 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1210wait_for_memory: 1211 if (copied) 1212 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 1213 1214 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1215 goto do_error; 1216 1217 mss_now = tcp_send_mss(sk, &size_goal, flags); 1218 } 1219 } 1220 1221out: 1222 if (copied) 1223 tcp_push(sk, flags, mss_now, tp->nonagle); 1224 release_sock(sk); 1225 1226 if (copied + copied_syn) 1227 uid_stat_tcp_snd(current_uid(), copied + copied_syn); 1228 return copied + copied_syn; 1229 1230do_fault: 1231 if (!skb->len) { 1232 tcp_unlink_write_queue(skb, sk); 1233 /* It is the one place in all of TCP, except connection 1234 * reset, where we can be unlinking the send_head. 1235 */ 1236 tcp_check_send_head(sk, skb); 1237 sk_wmem_free_skb(sk, skb); 1238 } 1239 1240do_error: 1241 if (copied + copied_syn) 1242 goto out; 1243out_err: 1244 err = sk_stream_error(sk, flags, err); 1245 release_sock(sk); 1246 return err; 1247} 1248EXPORT_SYMBOL(tcp_sendmsg); 1249 1250/* 1251 * Handle reading urgent data. BSD has very simple semantics for 1252 * this, no blocking and very strange errors 8) 1253 */ 1254 1255static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1256{ 1257 struct tcp_sock *tp = tcp_sk(sk); 1258 1259 /* No URG data to read. */ 1260 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1261 tp->urg_data == TCP_URG_READ) 1262 return -EINVAL; /* Yes this is right ! */ 1263 1264 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1265 return -ENOTCONN; 1266 1267 if (tp->urg_data & TCP_URG_VALID) { 1268 int err = 0; 1269 char c = tp->urg_data; 1270 1271 if (!(flags & MSG_PEEK)) 1272 tp->urg_data = TCP_URG_READ; 1273 1274 /* Read urgent data. */ 1275 msg->msg_flags |= MSG_OOB; 1276 1277 if (len > 0) { 1278 if (!(flags & MSG_TRUNC)) 1279 err = memcpy_toiovec(msg->msg_iov, &c, 1); 1280 len = 1; 1281 } else 1282 msg->msg_flags |= MSG_TRUNC; 1283 1284 return err ? -EFAULT : len; 1285 } 1286 1287 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1288 return 0; 1289 1290 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1291 * the available implementations agree in this case: 1292 * this call should never block, independent of the 1293 * blocking state of the socket. 1294 * Mike <pall@rz.uni-karlsruhe.de> 1295 */ 1296 return -EAGAIN; 1297} 1298 1299static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1300{ 1301 struct sk_buff *skb; 1302 int copied = 0, err = 0; 1303 1304 /* XXX -- need to support SO_PEEK_OFF */ 1305 1306 skb_queue_walk(&sk->sk_write_queue, skb) { 1307 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); 1308 if (err) 1309 break; 1310 1311 copied += skb->len; 1312 } 1313 1314 return err ?: copied; 1315} 1316 1317/* Clean up the receive buffer for full frames taken by the user, 1318 * then send an ACK if necessary. COPIED is the number of bytes 1319 * tcp_recvmsg has given to the user so far, it speeds up the 1320 * calculation of whether or not we must ACK for the sake of 1321 * a window update. 1322 */ 1323void tcp_cleanup_rbuf(struct sock *sk, int copied) 1324{ 1325 struct tcp_sock *tp = tcp_sk(sk); 1326 bool time_to_ack = false; 1327 1328 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1329 1330 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1331 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1332 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1333 1334 if (inet_csk_ack_scheduled(sk)) { 1335 const struct inet_connection_sock *icsk = inet_csk(sk); 1336 /* Delayed ACKs frequently hit locked sockets during bulk 1337 * receive. */ 1338 if (icsk->icsk_ack.blocked || 1339 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 1340 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1341 /* 1342 * If this read emptied read buffer, we send ACK, if 1343 * connection is not bidirectional, user drained 1344 * receive buffer and there was a small segment 1345 * in queue. 1346 */ 1347 (copied > 0 && 1348 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1349 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1350 !icsk->icsk_ack.pingpong)) && 1351 !atomic_read(&sk->sk_rmem_alloc))) 1352 time_to_ack = true; 1353 } 1354 1355 /* We send an ACK if we can now advertise a non-zero window 1356 * which has been raised "significantly". 1357 * 1358 * Even if window raised up to infinity, do not send window open ACK 1359 * in states, where we will not receive more. It is useless. 1360 */ 1361 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1362 __u32 rcv_window_now = tcp_receive_window(tp); 1363 1364 /* Optimize, __tcp_select_window() is not cheap. */ 1365 if (2*rcv_window_now <= tp->window_clamp) { 1366 __u32 new_window = __tcp_select_window(sk); 1367 1368 /* Send ACK now, if this read freed lots of space 1369 * in our buffer. Certainly, new_window is new window. 1370 * We can advertise it now, if it is not less than current one. 1371 * "Lots" means "at least twice" here. 1372 */ 1373 if (new_window && new_window >= 2 * rcv_window_now) 1374 time_to_ack = true; 1375 } 1376 } 1377 if (time_to_ack) 1378 tcp_send_ack(sk); 1379} 1380 1381static void tcp_prequeue_process(struct sock *sk) 1382{ 1383 struct sk_buff *skb; 1384 struct tcp_sock *tp = tcp_sk(sk); 1385 1386 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); 1387 1388 /* RX process wants to run with disabled BHs, though it is not 1389 * necessary */ 1390 local_bh_disable(); 1391 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1392 sk_backlog_rcv(sk, skb); 1393 local_bh_enable(); 1394 1395 /* Clear memory counter. */ 1396 tp->ucopy.memory = 0; 1397} 1398 1399#ifdef CONFIG_NET_DMA 1400static void tcp_service_net_dma(struct sock *sk, bool wait) 1401{ 1402 dma_cookie_t done, used; 1403 dma_cookie_t last_issued; 1404 struct tcp_sock *tp = tcp_sk(sk); 1405 1406 if (!tp->ucopy.dma_chan) 1407 return; 1408 1409 last_issued = tp->ucopy.dma_cookie; 1410 dma_async_issue_pending(tp->ucopy.dma_chan); 1411 1412 do { 1413 if (dma_async_is_tx_complete(tp->ucopy.dma_chan, 1414 last_issued, &done, 1415 &used) == DMA_SUCCESS) { 1416 /* Safe to free early-copied skbs now */ 1417 __skb_queue_purge(&sk->sk_async_wait_queue); 1418 break; 1419 } else { 1420 struct sk_buff *skb; 1421 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1422 (dma_async_is_complete(skb->dma_cookie, done, 1423 used) == DMA_SUCCESS)) { 1424 __skb_dequeue(&sk->sk_async_wait_queue); 1425 kfree_skb(skb); 1426 } 1427 } 1428 } while (wait); 1429} 1430#endif 1431 1432static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1433{ 1434 struct sk_buff *skb; 1435 u32 offset; 1436 1437 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1438 offset = seq - TCP_SKB_CB(skb)->seq; 1439 if (tcp_hdr(skb)->syn) 1440 offset--; 1441 if (offset < skb->len || tcp_hdr(skb)->fin) { 1442 *off = offset; 1443 return skb; 1444 } 1445 /* This looks weird, but this can happen if TCP collapsing 1446 * splitted a fat GRO packet, while we released socket lock 1447 * in skb_splice_bits() 1448 */ 1449 sk_eat_skb(sk, skb, false); 1450 } 1451 return NULL; 1452} 1453 1454/* 1455 * This routine provides an alternative to tcp_recvmsg() for routines 1456 * that would like to handle copying from skbuffs directly in 'sendfile' 1457 * fashion. 1458 * Note: 1459 * - It is assumed that the socket was locked by the caller. 1460 * - The routine does not block. 1461 * - At present, there is no support for reading OOB data 1462 * or for 'peeking' the socket using this routine 1463 * (although both would be easy to implement). 1464 */ 1465int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1466 sk_read_actor_t recv_actor) 1467{ 1468 struct sk_buff *skb; 1469 struct tcp_sock *tp = tcp_sk(sk); 1470 u32 seq = tp->copied_seq; 1471 u32 offset; 1472 int copied = 0; 1473 1474 if (sk->sk_state == TCP_LISTEN) 1475 return -ENOTCONN; 1476 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1477 if (offset < skb->len) { 1478 int used; 1479 size_t len; 1480 1481 len = skb->len - offset; 1482 /* Stop reading if we hit a patch of urgent data */ 1483 if (tp->urg_data) { 1484 u32 urg_offset = tp->urg_seq - seq; 1485 if (urg_offset < len) 1486 len = urg_offset; 1487 if (!len) 1488 break; 1489 } 1490 used = recv_actor(desc, skb, offset, len); 1491 if (used <= 0) { 1492 if (!copied) 1493 copied = used; 1494 break; 1495 } else if (used <= len) { 1496 seq += used; 1497 copied += used; 1498 offset += used; 1499 } 1500 /* If recv_actor drops the lock (e.g. TCP splice 1501 * receive) the skb pointer might be invalid when 1502 * getting here: tcp_collapse might have deleted it 1503 * while aggregating skbs from the socket queue. 1504 */ 1505 skb = tcp_recv_skb(sk, seq - 1, &offset); 1506 if (!skb) 1507 break; 1508 /* TCP coalescing might have appended data to the skb. 1509 * Try to splice more frags 1510 */ 1511 if (offset + 1 != skb->len) 1512 continue; 1513 } 1514 if (tcp_hdr(skb)->fin) { 1515 sk_eat_skb(sk, skb, false); 1516 ++seq; 1517 break; 1518 } 1519 sk_eat_skb(sk, skb, false); 1520 if (!desc->count) 1521 break; 1522 tp->copied_seq = seq; 1523 } 1524 tp->copied_seq = seq; 1525 1526 tcp_rcv_space_adjust(sk); 1527 1528 /* Clean up data we have read: This will do ACK frames. */ 1529 if (copied > 0) { 1530 tcp_recv_skb(sk, seq, &offset); 1531 tcp_cleanup_rbuf(sk, copied); 1532 uid_stat_tcp_rcv(current_uid(), copied); 1533 } 1534 return copied; 1535} 1536EXPORT_SYMBOL(tcp_read_sock); 1537 1538/* 1539 * This routine copies from a sock struct into the user buffer. 1540 * 1541 * Technical note: in 2.3 we work on _locked_ socket, so that 1542 * tricks with *seq access order and skb->users are not required. 1543 * Probably, code can be easily improved even more. 1544 */ 1545 1546int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1547 size_t len, int nonblock, int flags, int *addr_len) 1548{ 1549 struct tcp_sock *tp = tcp_sk(sk); 1550 int copied = 0; 1551 u32 peek_seq; 1552 u32 *seq; 1553 unsigned long used; 1554 int err; 1555 int target; /* Read at least this many bytes */ 1556 long timeo; 1557 struct task_struct *user_recv = NULL; 1558 bool copied_early = false; 1559 struct sk_buff *skb; 1560 u32 urg_hole = 0; 1561 1562 lock_sock(sk); 1563 1564 err = -ENOTCONN; 1565 if (sk->sk_state == TCP_LISTEN) 1566 goto out; 1567 1568 timeo = sock_rcvtimeo(sk, nonblock); 1569 1570 /* Urgent data needs to be handled specially. */ 1571 if (flags & MSG_OOB) 1572 goto recv_urg; 1573 1574 if (unlikely(tp->repair)) { 1575 err = -EPERM; 1576 if (!(flags & MSG_PEEK)) 1577 goto out; 1578 1579 if (tp->repair_queue == TCP_SEND_QUEUE) 1580 goto recv_sndq; 1581 1582 err = -EINVAL; 1583 if (tp->repair_queue == TCP_NO_QUEUE) 1584 goto out; 1585 1586 /* 'common' recv queue MSG_PEEK-ing */ 1587 } 1588 1589 seq = &tp->copied_seq; 1590 if (flags & MSG_PEEK) { 1591 peek_seq = tp->copied_seq; 1592 seq = &peek_seq; 1593 } 1594 1595 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1596 1597#ifdef CONFIG_NET_DMA 1598 tp->ucopy.dma_chan = NULL; 1599 preempt_disable(); 1600 skb = skb_peek_tail(&sk->sk_receive_queue); 1601 { 1602 int available = 0; 1603 1604 if (skb) 1605 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); 1606 if ((available < target) && 1607 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1608 !sysctl_tcp_low_latency && 1609 net_dma_find_channel()) { 1610 preempt_enable_no_resched(); 1611 tp->ucopy.pinned_list = 1612 dma_pin_iovec_pages(msg->msg_iov, len); 1613 } else { 1614 preempt_enable_no_resched(); 1615 } 1616 } 1617#endif 1618 1619 do { 1620 u32 offset; 1621 1622 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1623 if (tp->urg_data && tp->urg_seq == *seq) { 1624 if (copied) 1625 break; 1626 if (signal_pending(current)) { 1627 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 1628 break; 1629 } 1630 } 1631 1632 /* Next get a buffer. */ 1633 1634 skb_queue_walk(&sk->sk_receive_queue, skb) { 1635 /* Now that we have two receive queues this 1636 * shouldn't happen. 1637 */ 1638 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1639 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", 1640 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1641 flags)) 1642 break; 1643 1644 offset = *seq - TCP_SKB_CB(skb)->seq; 1645 if (tcp_hdr(skb)->syn) 1646 offset--; 1647 if (offset < skb->len) 1648 goto found_ok_skb; 1649 if (tcp_hdr(skb)->fin) 1650 goto found_fin_ok; 1651 WARN(!(flags & MSG_PEEK), 1652 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", 1653 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 1654 } 1655 1656 /* Well, if we have backlog, try to process it now yet. */ 1657 1658 if (copied >= target && !sk->sk_backlog.tail) 1659 break; 1660 1661 if (copied) { 1662 if (sk->sk_err || 1663 sk->sk_state == TCP_CLOSE || 1664 (sk->sk_shutdown & RCV_SHUTDOWN) || 1665 !timeo || 1666 signal_pending(current)) 1667 break; 1668 } else { 1669 if (sock_flag(sk, SOCK_DONE)) 1670 break; 1671 1672 if (sk->sk_err) { 1673 copied = sock_error(sk); 1674 break; 1675 } 1676 1677 if (sk->sk_shutdown & RCV_SHUTDOWN) 1678 break; 1679 1680 if (sk->sk_state == TCP_CLOSE) { 1681 if (!sock_flag(sk, SOCK_DONE)) { 1682 /* This occurs when user tries to read 1683 * from never connected socket. 1684 */ 1685 copied = -ENOTCONN; 1686 break; 1687 } 1688 break; 1689 } 1690 1691 if (!timeo) { 1692 copied = -EAGAIN; 1693 break; 1694 } 1695 1696 if (signal_pending(current)) { 1697 copied = sock_intr_errno(timeo); 1698 break; 1699 } 1700 } 1701 1702 tcp_cleanup_rbuf(sk, copied); 1703 1704 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { 1705 /* Install new reader */ 1706 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { 1707 user_recv = current; 1708 tp->ucopy.task = user_recv; 1709 tp->ucopy.iov = msg->msg_iov; 1710 } 1711 1712 tp->ucopy.len = len; 1713 1714 WARN_ON(tp->copied_seq != tp->rcv_nxt && 1715 !(flags & (MSG_PEEK | MSG_TRUNC))); 1716 1717 /* Ugly... If prequeue is not empty, we have to 1718 * process it before releasing socket, otherwise 1719 * order will be broken at second iteration. 1720 * More elegant solution is required!!! 1721 * 1722 * Look: we have the following (pseudo)queues: 1723 * 1724 * 1. packets in flight 1725 * 2. backlog 1726 * 3. prequeue 1727 * 4. receive_queue 1728 * 1729 * Each queue can be processed only if the next ones 1730 * are empty. At this point we have empty receive_queue. 1731 * But prequeue _can_ be not empty after 2nd iteration, 1732 * when we jumped to start of loop because backlog 1733 * processing added something to receive_queue. 1734 * We cannot release_sock(), because backlog contains 1735 * packets arrived _after_ prequeued ones. 1736 * 1737 * Shortly, algorithm is clear --- to process all 1738 * the queues in order. We could make it more directly, 1739 * requeueing packets from backlog to prequeue, if 1740 * is not empty. It is more elegant, but eats cycles, 1741 * unfortunately. 1742 */ 1743 if (!skb_queue_empty(&tp->ucopy.prequeue)) 1744 goto do_prequeue; 1745 1746 /* __ Set realtime policy in scheduler __ */ 1747 } 1748 1749#ifdef CONFIG_NET_DMA 1750 if (tp->ucopy.dma_chan) { 1751 if (tp->rcv_wnd == 0 && 1752 !skb_queue_empty(&sk->sk_async_wait_queue)) { 1753 tcp_service_net_dma(sk, true); 1754 tcp_cleanup_rbuf(sk, copied); 1755 } else 1756 dma_async_issue_pending(tp->ucopy.dma_chan); 1757 } 1758#endif 1759 if (copied >= target) { 1760 /* Do not sleep, just process backlog. */ 1761 release_sock(sk); 1762 lock_sock(sk); 1763 } else 1764 sk_wait_data(sk, &timeo); 1765 1766#ifdef CONFIG_NET_DMA 1767 tcp_service_net_dma(sk, false); /* Don't block */ 1768 tp->ucopy.wakeup = 0; 1769#endif 1770 1771 if (user_recv) { 1772 int chunk; 1773 1774 /* __ Restore normal policy in scheduler __ */ 1775 1776 if ((chunk = len - tp->ucopy.len) != 0) { 1777 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1778 len -= chunk; 1779 copied += chunk; 1780 } 1781 1782 if (tp->rcv_nxt == tp->copied_seq && 1783 !skb_queue_empty(&tp->ucopy.prequeue)) { 1784do_prequeue: 1785 tcp_prequeue_process(sk); 1786 1787 if ((chunk = len - tp->ucopy.len) != 0) { 1788 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1789 len -= chunk; 1790 copied += chunk; 1791 } 1792 } 1793 } 1794 if ((flags & MSG_PEEK) && 1795 (peek_seq - copied - urg_hole != tp->copied_seq)) { 1796 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 1797 current->comm, 1798 task_pid_nr(current)); 1799 peek_seq = tp->copied_seq; 1800 } 1801 continue; 1802 1803 found_ok_skb: 1804 /* Ok so how much can we use? */ 1805 used = skb->len - offset; 1806 if (len < used) 1807 used = len; 1808 1809 /* Do we have urgent data here? */ 1810 if (tp->urg_data) { 1811 u32 urg_offset = tp->urg_seq - *seq; 1812 if (urg_offset < used) { 1813 if (!urg_offset) { 1814 if (!sock_flag(sk, SOCK_URGINLINE)) { 1815 ++*seq; 1816 urg_hole++; 1817 offset++; 1818 used--; 1819 if (!used) 1820 goto skip_copy; 1821 } 1822 } else 1823 used = urg_offset; 1824 } 1825 } 1826 1827 if (!(flags & MSG_TRUNC)) { 1828#ifdef CONFIG_NET_DMA 1829 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1830 tp->ucopy.dma_chan = net_dma_find_channel(); 1831 1832 if (tp->ucopy.dma_chan) { 1833 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1834 tp->ucopy.dma_chan, skb, offset, 1835 msg->msg_iov, used, 1836 tp->ucopy.pinned_list); 1837 1838 if (tp->ucopy.dma_cookie < 0) { 1839 1840 pr_alert("%s: dma_cookie < 0\n", 1841 __func__); 1842 1843 /* Exception. Bailout! */ 1844 if (!copied) 1845 copied = -EFAULT; 1846 break; 1847 } 1848 1849 dma_async_issue_pending(tp->ucopy.dma_chan); 1850 1851 if ((offset + used) == skb->len) 1852 copied_early = true; 1853 1854 } else 1855#endif 1856 { 1857 err = skb_copy_datagram_iovec(skb, offset, 1858 msg->msg_iov, used); 1859 if (err) { 1860 /* Exception. Bailout! */ 1861 if (!copied) 1862 copied = -EFAULT; 1863 break; 1864 } 1865 } 1866 } 1867 1868 *seq += used; 1869 copied += used; 1870 len -= used; 1871 1872 tcp_rcv_space_adjust(sk); 1873 1874skip_copy: 1875 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1876 tp->urg_data = 0; 1877 tcp_fast_path_check(sk); 1878 } 1879 if (used + offset < skb->len) 1880 continue; 1881 1882 if (tcp_hdr(skb)->fin) 1883 goto found_fin_ok; 1884 if (!(flags & MSG_PEEK)) { 1885 sk_eat_skb(sk, skb, copied_early); 1886 copied_early = false; 1887 } 1888 continue; 1889 1890 found_fin_ok: 1891 /* Process the FIN. */ 1892 ++*seq; 1893 if (!(flags & MSG_PEEK)) { 1894 sk_eat_skb(sk, skb, copied_early); 1895 copied_early = false; 1896 } 1897 break; 1898 } while (len > 0); 1899 1900 if (user_recv) { 1901 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1902 int chunk; 1903 1904 tp->ucopy.len = copied > 0 ? len : 0; 1905 1906 tcp_prequeue_process(sk); 1907 1908 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1909 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1910 len -= chunk; 1911 copied += chunk; 1912 } 1913 } 1914 1915 tp->ucopy.task = NULL; 1916 tp->ucopy.len = 0; 1917 } 1918 1919#ifdef CONFIG_NET_DMA 1920 tcp_service_net_dma(sk, true); /* Wait for queue to drain */ 1921 tp->ucopy.dma_chan = NULL; 1922 1923 if (tp->ucopy.pinned_list) { 1924 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1925 tp->ucopy.pinned_list = NULL; 1926 } 1927#endif 1928 1929 /* According to UNIX98, msg_name/msg_namelen are ignored 1930 * on connected socket. I was just happy when found this 8) --ANK 1931 */ 1932 1933 /* Clean up data we have read: This will do ACK frames. */ 1934 tcp_cleanup_rbuf(sk, copied); 1935 1936 release_sock(sk); 1937 1938 if (copied > 0) 1939 uid_stat_tcp_rcv(current_uid(), copied); 1940 return copied; 1941 1942out: 1943 release_sock(sk); 1944 return err; 1945 1946recv_urg: 1947 err = tcp_recv_urg(sk, msg, len, flags); 1948 if (err > 0) 1949 uid_stat_tcp_rcv(current_uid(), err); 1950 goto out; 1951 1952recv_sndq: 1953 err = tcp_peek_sndq(sk, msg, len); 1954 goto out; 1955} 1956EXPORT_SYMBOL(tcp_recvmsg); 1957 1958void tcp_set_state(struct sock *sk, int state) 1959{ 1960 int oldstate = sk->sk_state; 1961 1962 switch (state) { 1963 case TCP_ESTABLISHED: 1964 if (oldstate != TCP_ESTABLISHED) 1965 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1966 break; 1967 1968 case TCP_CLOSE: 1969 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 1970 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 1971 1972 sk->sk_prot->unhash(sk); 1973 if (inet_csk(sk)->icsk_bind_hash && 1974 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1975 inet_put_port(sk); 1976 /* fall through */ 1977 default: 1978 if (oldstate == TCP_ESTABLISHED) 1979 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1980 } 1981 1982 /* Change state AFTER socket is unhashed to avoid closed 1983 * socket sitting in hash tables. 1984 */ 1985 sk->sk_state = state; 1986 1987#ifdef STATE_TRACE 1988 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 1989#endif 1990} 1991EXPORT_SYMBOL_GPL(tcp_set_state); 1992 1993/* 1994 * State processing on a close. This implements the state shift for 1995 * sending our FIN frame. Note that we only send a FIN for some 1996 * states. A shutdown() may have already sent the FIN, or we may be 1997 * closed. 1998 */ 1999 2000static const unsigned char new_state[16] = { 2001 /* current state: new state: action: */ 2002 /* (Invalid) */ TCP_CLOSE, 2003 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2004 /* TCP_SYN_SENT */ TCP_CLOSE, 2005 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2006 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, 2007 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, 2008 /* TCP_TIME_WAIT */ TCP_CLOSE, 2009 /* TCP_CLOSE */ TCP_CLOSE, 2010 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, 2011 /* TCP_LAST_ACK */ TCP_LAST_ACK, 2012 /* TCP_LISTEN */ TCP_CLOSE, 2013 /* TCP_CLOSING */ TCP_CLOSING, 2014}; 2015 2016static int tcp_close_state(struct sock *sk) 2017{ 2018 int next = (int)new_state[sk->sk_state]; 2019 int ns = next & TCP_STATE_MASK; 2020 2021 tcp_set_state(sk, ns); 2022 2023 return next & TCP_ACTION_FIN; 2024} 2025 2026/* 2027 * Shutdown the sending side of a connection. Much like close except 2028 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 2029 */ 2030 2031void tcp_shutdown(struct sock *sk, int how) 2032{ 2033 /* We need to grab some memory, and put together a FIN, 2034 * and then put it into the queue to be sent. 2035 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 2036 */ 2037 if (!(how & SEND_SHUTDOWN)) 2038 return; 2039 2040 /* If we've already sent a FIN, or it's a closed state, skip this. */ 2041 if ((1 << sk->sk_state) & 2042 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 2043 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 2044 /* Clear out any half completed packets. FIN if needed. */ 2045 if (tcp_close_state(sk)) 2046 tcp_send_fin(sk); 2047 } 2048} 2049EXPORT_SYMBOL(tcp_shutdown); 2050 2051bool tcp_check_oom(struct sock *sk, int shift) 2052{ 2053 bool too_many_orphans, out_of_socket_memory; 2054 2055 too_many_orphans = tcp_too_many_orphans(sk, shift); 2056 out_of_socket_memory = tcp_out_of_memory(sk); 2057 2058 if (too_many_orphans) 2059 net_info_ratelimited("too many orphaned sockets\n"); 2060 if (out_of_socket_memory) 2061 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2062 return too_many_orphans || out_of_socket_memory; 2063} 2064 2065void tcp_close(struct sock *sk, long timeout) 2066{ 2067 struct sk_buff *skb; 2068 int data_was_unread = 0; 2069 int state; 2070 2071 lock_sock(sk); 2072 sk->sk_shutdown = SHUTDOWN_MASK; 2073 2074 if (sk->sk_state == TCP_LISTEN) { 2075 tcp_set_state(sk, TCP_CLOSE); 2076 2077 /* Special case. */ 2078 inet_csk_listen_stop(sk); 2079 2080 goto adjudge_to_death; 2081 } 2082 2083 /* We need to flush the recv. buffs. We do this only on the 2084 * descriptor close, not protocol-sourced closes, because the 2085 * reader process may not have drained the data yet! 2086 */ 2087 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2088 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 2089 tcp_hdr(skb)->fin; 2090 data_was_unread += len; 2091 __kfree_skb(skb); 2092 } 2093 2094 sk_mem_reclaim(sk); 2095 2096 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2097 if (sk->sk_state == TCP_CLOSE) 2098 goto adjudge_to_death; 2099 2100 /* As outlined in RFC 2525, section 2.17, we send a RST here because 2101 * data was lost. To witness the awful effects of the old behavior of 2102 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 2103 * GET in an FTP client, suspend the process, wait for the client to 2104 * advertise a zero window, then kill -9 the FTP client, wheee... 2105 * Note: timeout is always zero in such a case. 2106 */ 2107 if (unlikely(tcp_sk(sk)->repair)) { 2108 sk->sk_prot->disconnect(sk, 0); 2109 } else if (data_was_unread) { 2110 /* Unread data was tossed, zap the connection. */ 2111 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2112 tcp_set_state(sk, TCP_CLOSE); 2113 tcp_send_active_reset(sk, sk->sk_allocation); 2114 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2115 /* Check zero linger _after_ checking for unread data. */ 2116 sk->sk_prot->disconnect(sk, 0); 2117 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2118 } else if (tcp_close_state(sk)) { 2119 /* We FIN if the application ate all the data before 2120 * zapping the connection. 2121 */ 2122 2123 /* RED-PEN. Formally speaking, we have broken TCP state 2124 * machine. State transitions: 2125 * 2126 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 2127 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 2128 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 2129 * 2130 * are legal only when FIN has been sent (i.e. in window), 2131 * rather than queued out of window. Purists blame. 2132 * 2133 * F.e. "RFC state" is ESTABLISHED, 2134 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 2135 * 2136 * The visible declinations are that sometimes 2137 * we enter time-wait state, when it is not required really 2138 * (harmless), do not send active resets, when they are 2139 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 2140 * they look as CLOSING or LAST_ACK for Linux) 2141 * Probably, I missed some more holelets. 2142 * --ANK 2143 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 2144 * in a single packet! (May consider it later but will 2145 * probably need API support or TCP_CORK SYN-ACK until 2146 * data is written and socket is closed.) 2147 */ 2148 tcp_send_fin(sk); 2149 } 2150 2151 sk_stream_wait_close(sk, timeout); 2152 2153adjudge_to_death: 2154 state = sk->sk_state; 2155 sock_hold(sk); 2156 sock_orphan(sk); 2157 2158 /* It is the last release_sock in its life. It will remove backlog. */ 2159 release_sock(sk); 2160 2161 2162 /* Now socket is owned by kernel and we acquire BH lock 2163 to finish close. No need to check for user refs. 2164 */ 2165 local_bh_disable(); 2166 bh_lock_sock(sk); 2167 WARN_ON(sock_owned_by_user(sk)); 2168 2169 percpu_counter_inc(sk->sk_prot->orphan_count); 2170 2171 /* Have we already been destroyed by a softirq or backlog? */ 2172 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 2173 goto out; 2174 2175 /* This is a (useful) BSD violating of the RFC. There is a 2176 * problem with TCP as specified in that the other end could 2177 * keep a socket open forever with no application left this end. 2178 * We use a 3 minute timeout (about the same as BSD) then kill 2179 * our end. If they send after that then tough - BUT: long enough 2180 * that we won't make the old 4*rto = almost no time - whoops 2181 * reset mistake. 2182 * 2183 * Nope, it was not mistake. It is really desired behaviour 2184 * f.e. on http servers, when such sockets are useless, but 2185 * consume significant resources. Let's do it with special 2186 * linger2 option. --ANK 2187 */ 2188 2189 if (sk->sk_state == TCP_FIN_WAIT2) { 2190 struct tcp_sock *tp = tcp_sk(sk); 2191 if (tp->linger2 < 0) { 2192 tcp_set_state(sk, TCP_CLOSE); 2193 tcp_send_active_reset(sk, GFP_ATOMIC); 2194 NET_INC_STATS_BH(sock_net(sk), 2195 LINUX_MIB_TCPABORTONLINGER); 2196 } else { 2197 const int tmo = tcp_fin_time(sk); 2198 2199 if (tmo > TCP_TIMEWAIT_LEN) { 2200 inet_csk_reset_keepalive_timer(sk, 2201 tmo - TCP_TIMEWAIT_LEN); 2202 } else { 2203 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 2204 goto out; 2205 } 2206 } 2207 } 2208 if (sk->sk_state != TCP_CLOSE) { 2209 sk_mem_reclaim(sk); 2210 if (tcp_check_oom(sk, 0)) { 2211 tcp_set_state(sk, TCP_CLOSE); 2212 tcp_send_active_reset(sk, GFP_ATOMIC); 2213 NET_INC_STATS_BH(sock_net(sk), 2214 LINUX_MIB_TCPABORTONMEMORY); 2215 } 2216 } 2217 2218 if (sk->sk_state == TCP_CLOSE) { 2219 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 2220 /* We could get here with a non-NULL req if the socket is 2221 * aborted (e.g., closed with unread data) before 3WHS 2222 * finishes. 2223 */ 2224 if (req != NULL) 2225 reqsk_fastopen_remove(sk, req, false); 2226 inet_csk_destroy_sock(sk); 2227 } 2228 /* Otherwise, socket is reprieved until protocol close. */ 2229 2230out: 2231 bh_unlock_sock(sk); 2232 local_bh_enable(); 2233 sock_put(sk); 2234} 2235EXPORT_SYMBOL(tcp_close); 2236 2237/* These states need RST on ABORT according to RFC793 */ 2238 2239static inline bool tcp_need_reset(int state) 2240{ 2241 return (1 << state) & 2242 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2243 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 2244} 2245 2246int tcp_disconnect(struct sock *sk, int flags) 2247{ 2248 struct inet_sock *inet = inet_sk(sk); 2249 struct inet_connection_sock *icsk = inet_csk(sk); 2250 struct tcp_sock *tp = tcp_sk(sk); 2251 int err = 0; 2252 int old_state = sk->sk_state; 2253 2254 if (old_state != TCP_CLOSE) 2255 tcp_set_state(sk, TCP_CLOSE); 2256 2257 /* ABORT function of RFC793 */ 2258 if (old_state == TCP_LISTEN) { 2259 inet_csk_listen_stop(sk); 2260 } else if (unlikely(tp->repair)) { 2261 sk->sk_err = ECONNABORTED; 2262 } else if (tcp_need_reset(old_state) || 2263 (tp->snd_nxt != tp->write_seq && 2264 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2265 /* The last check adjusts for discrepancy of Linux wrt. RFC 2266 * states 2267 */ 2268 tcp_send_active_reset(sk, gfp_any()); 2269 sk->sk_err = ECONNRESET; 2270 } else if (old_state == TCP_SYN_SENT) 2271 sk->sk_err = ECONNRESET; 2272 2273 tcp_clear_xmit_timers(sk); 2274 __skb_queue_purge(&sk->sk_receive_queue); 2275 tcp_write_queue_purge(sk); 2276 __skb_queue_purge(&tp->out_of_order_queue); 2277#ifdef CONFIG_NET_DMA 2278 __skb_queue_purge(&sk->sk_async_wait_queue); 2279#endif 2280 2281 inet->inet_dport = 0; 2282 2283 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 2284 inet_reset_saddr(sk); 2285 2286 sk->sk_shutdown = 0; 2287 sock_reset_flag(sk, SOCK_DONE); 2288 tp->srtt = 0; 2289 if ((tp->write_seq += tp->max_window + 2) == 0) 2290 tp->write_seq = 1; 2291 icsk->icsk_backoff = 0; 2292 tp->snd_cwnd = 2; 2293 icsk->icsk_probes_out = 0; 2294 tp->packets_out = 0; 2295 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2296 tp->snd_cwnd_cnt = 0; 2297 tp->window_clamp = 0; 2298 tcp_set_ca_state(sk, TCP_CA_Open); 2299 tcp_clear_retrans(tp); 2300 inet_csk_delack_init(sk); 2301 tcp_init_send_head(sk); 2302 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2303 __sk_dst_reset(sk); 2304 2305 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2306 2307 sk->sk_error_report(sk); 2308 return err; 2309} 2310EXPORT_SYMBOL(tcp_disconnect); 2311 2312void tcp_sock_destruct(struct sock *sk) 2313{ 2314 inet_sock_destruct(sk); 2315 2316 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); 2317} 2318 2319static inline bool tcp_can_repair_sock(const struct sock *sk) 2320{ 2321 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 2322 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2323} 2324 2325static int tcp_repair_options_est(struct tcp_sock *tp, 2326 struct tcp_repair_opt __user *optbuf, unsigned int len) 2327{ 2328 struct tcp_repair_opt opt; 2329 2330 while (len >= sizeof(opt)) { 2331 if (copy_from_user(&opt, optbuf, sizeof(opt))) 2332 return -EFAULT; 2333 2334 optbuf++; 2335 len -= sizeof(opt); 2336 2337 switch (opt.opt_code) { 2338 case TCPOPT_MSS: 2339 tp->rx_opt.mss_clamp = opt.opt_val; 2340 break; 2341 case TCPOPT_WINDOW: 2342 { 2343 u16 snd_wscale = opt.opt_val & 0xFFFF; 2344 u16 rcv_wscale = opt.opt_val >> 16; 2345 2346 if (snd_wscale > 14 || rcv_wscale > 14) 2347 return -EFBIG; 2348 2349 tp->rx_opt.snd_wscale = snd_wscale; 2350 tp->rx_opt.rcv_wscale = rcv_wscale; 2351 tp->rx_opt.wscale_ok = 1; 2352 } 2353 break; 2354 case TCPOPT_SACK_PERM: 2355 if (opt.opt_val != 0) 2356 return -EINVAL; 2357 2358 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 2359 if (sysctl_tcp_fack) 2360 tcp_enable_fack(tp); 2361 break; 2362 case TCPOPT_TIMESTAMP: 2363 if (opt.opt_val != 0) 2364 return -EINVAL; 2365 2366 tp->rx_opt.tstamp_ok = 1; 2367 break; 2368 } 2369 } 2370 2371 return 0; 2372} 2373 2374/* 2375 * Socket option code for TCP. 2376 */ 2377static int do_tcp_setsockopt(struct sock *sk, int level, 2378 int optname, char __user *optval, unsigned int optlen) 2379{ 2380 struct tcp_sock *tp = tcp_sk(sk); 2381 struct inet_connection_sock *icsk = inet_csk(sk); 2382 int val; 2383 int err = 0; 2384 2385 /* These are data/string values, all the others are ints */ 2386 switch (optname) { 2387 case TCP_CONGESTION: { 2388 char name[TCP_CA_NAME_MAX]; 2389 2390 if (optlen < 1) 2391 return -EINVAL; 2392 2393 val = strncpy_from_user(name, optval, 2394 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 2395 if (val < 0) 2396 return -EFAULT; 2397 name[val] = 0; 2398 2399 lock_sock(sk); 2400 err = tcp_set_congestion_control(sk, name); 2401 release_sock(sk); 2402 return err; 2403 } 2404 default: 2405 /* fallthru */ 2406 break; 2407 } 2408 2409 if (optlen < sizeof(int)) 2410 return -EINVAL; 2411 2412 if (get_user(val, (int __user *)optval)) 2413 return -EFAULT; 2414 2415 lock_sock(sk); 2416 2417 switch (optname) { 2418 case TCP_MAXSEG: 2419 /* Values greater than interface MTU won't take effect. However 2420 * at the point when this call is done we typically don't yet 2421 * know which interface is going to be used */ 2422 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { 2423 err = -EINVAL; 2424 break; 2425 } 2426 tp->rx_opt.user_mss = val; 2427 break; 2428 2429 case TCP_NODELAY: 2430 if (val) { 2431 /* TCP_NODELAY is weaker than TCP_CORK, so that 2432 * this option on corked socket is remembered, but 2433 * it is not activated until cork is cleared. 2434 * 2435 * However, when TCP_NODELAY is set we make 2436 * an explicit push, which overrides even TCP_CORK 2437 * for currently queued segments. 2438 */ 2439 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 2440 tcp_push_pending_frames(sk); 2441 } else { 2442 tp->nonagle &= ~TCP_NAGLE_OFF; 2443 } 2444 break; 2445 2446 case TCP_THIN_LINEAR_TIMEOUTS: 2447 if (val < 0 || val > 1) 2448 err = -EINVAL; 2449 else 2450 tp->thin_lto = val; 2451 break; 2452 2453 case TCP_THIN_DUPACK: 2454 if (val < 0 || val > 1) 2455 err = -EINVAL; 2456 else 2457 tp->thin_dupack = val; 2458 if (tp->thin_dupack) 2459 tcp_disable_early_retrans(tp); 2460 break; 2461 2462 case TCP_REPAIR: 2463 if (!tcp_can_repair_sock(sk)) 2464 err = -EPERM; 2465 else if (val == 1) { 2466 tp->repair = 1; 2467 sk->sk_reuse = SK_FORCE_REUSE; 2468 tp->repair_queue = TCP_NO_QUEUE; 2469 } else if (val == 0) { 2470 tp->repair = 0; 2471 sk->sk_reuse = SK_NO_REUSE; 2472 tcp_send_window_probe(sk); 2473 } else 2474 err = -EINVAL; 2475 2476 break; 2477 2478 case TCP_REPAIR_QUEUE: 2479 if (!tp->repair) 2480 err = -EPERM; 2481 else if (val < TCP_QUEUES_NR) 2482 tp->repair_queue = val; 2483 else 2484 err = -EINVAL; 2485 break; 2486 2487 case TCP_QUEUE_SEQ: 2488 if (sk->sk_state != TCP_CLOSE) 2489 err = -EPERM; 2490 else if (tp->repair_queue == TCP_SEND_QUEUE) 2491 tp->write_seq = val; 2492 else if (tp->repair_queue == TCP_RECV_QUEUE) 2493 tp->rcv_nxt = val; 2494 else 2495 err = -EINVAL; 2496 break; 2497 2498 case TCP_REPAIR_OPTIONS: 2499 if (!tp->repair) 2500 err = -EINVAL; 2501 else if (sk->sk_state == TCP_ESTABLISHED) 2502 err = tcp_repair_options_est(tp, 2503 (struct tcp_repair_opt __user *)optval, 2504 optlen); 2505 else 2506 err = -EPERM; 2507 break; 2508 2509 case TCP_CORK: 2510 /* When set indicates to always queue non-full frames. 2511 * Later the user clears this option and we transmit 2512 * any pending partial frames in the queue. This is 2513 * meant to be used alongside sendfile() to get properly 2514 * filled frames when the user (for example) must write 2515 * out headers with a write() call first and then use 2516 * sendfile to send out the data parts. 2517 * 2518 * TCP_CORK can be set together with TCP_NODELAY and it is 2519 * stronger than TCP_NODELAY. 2520 */ 2521 if (val) { 2522 tp->nonagle |= TCP_NAGLE_CORK; 2523 } else { 2524 tp->nonagle &= ~TCP_NAGLE_CORK; 2525 if (tp->nonagle&TCP_NAGLE_OFF) 2526 tp->nonagle |= TCP_NAGLE_PUSH; 2527 tcp_push_pending_frames(sk); 2528 } 2529 break; 2530 2531 case TCP_KEEPIDLE: 2532 if (val < 1 || val > MAX_TCP_KEEPIDLE) 2533 err = -EINVAL; 2534 else { 2535 tp->keepalive_time = val * HZ; 2536 if (sock_flag(sk, SOCK_KEEPOPEN) && 2537 !((1 << sk->sk_state) & 2538 (TCPF_CLOSE | TCPF_LISTEN))) { 2539 u32 elapsed = keepalive_time_elapsed(tp); 2540 if (tp->keepalive_time > elapsed) 2541 elapsed = tp->keepalive_time - elapsed; 2542 else 2543 elapsed = 0; 2544 inet_csk_reset_keepalive_timer(sk, elapsed); 2545 } 2546 } 2547 break; 2548 case TCP_KEEPINTVL: 2549 if (val < 1 || val > MAX_TCP_KEEPINTVL) 2550 err = -EINVAL; 2551 else 2552 tp->keepalive_intvl = val * HZ; 2553 break; 2554 case TCP_KEEPCNT: 2555 if (val < 1 || val > MAX_TCP_KEEPCNT) 2556 err = -EINVAL; 2557 else 2558 tp->keepalive_probes = val; 2559 break; 2560 case TCP_SYNCNT: 2561 if (val < 1 || val > MAX_TCP_SYNCNT) 2562 err = -EINVAL; 2563 else 2564 icsk->icsk_syn_retries = val; 2565 break; 2566 2567 case TCP_LINGER2: 2568 if (val < 0) 2569 tp->linger2 = -1; 2570 else if (val > sysctl_tcp_fin_timeout / HZ) 2571 tp->linger2 = 0; 2572 else 2573 tp->linger2 = val * HZ; 2574 break; 2575 2576 case TCP_DEFER_ACCEPT: 2577 /* Translate value in seconds to number of retransmits */ 2578 icsk->icsk_accept_queue.rskq_defer_accept = 2579 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 2580 TCP_RTO_MAX / HZ); 2581 break; 2582 2583 case TCP_WINDOW_CLAMP: 2584 if (!val) { 2585 if (sk->sk_state != TCP_CLOSE) { 2586 err = -EINVAL; 2587 break; 2588 } 2589 tp->window_clamp = 0; 2590 } else 2591 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 2592 SOCK_MIN_RCVBUF / 2 : val; 2593 break; 2594 2595 case TCP_QUICKACK: 2596 if (!val) { 2597 icsk->icsk_ack.pingpong = 1; 2598 } else { 2599 icsk->icsk_ack.pingpong = 0; 2600 if ((1 << sk->sk_state) & 2601 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 2602 inet_csk_ack_scheduled(sk)) { 2603 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 2604 tcp_cleanup_rbuf(sk, 1); 2605 if (!(val & 1)) 2606 icsk->icsk_ack.pingpong = 1; 2607 } 2608 } 2609 break; 2610 2611#ifdef CONFIG_TCP_MD5SIG 2612 case TCP_MD5SIG: 2613 /* Read the IP->Key mappings from userspace */ 2614 err = tp->af_specific->md5_parse(sk, optval, optlen); 2615 break; 2616#endif 2617 case TCP_USER_TIMEOUT: 2618 /* Cap the max timeout in ms TCP will retry/retrans 2619 * before giving up and aborting (ETIMEDOUT) a connection. 2620 */ 2621 if (val < 0) 2622 err = -EINVAL; 2623 else 2624 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2625 break; 2626 2627 case TCP_FASTOPEN: 2628 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 2629 TCPF_LISTEN))) 2630 err = fastopen_init_queue(sk, val); 2631 else 2632 err = -EINVAL; 2633 break; 2634 case TCP_TIMESTAMP: 2635 if (!tp->repair) 2636 err = -EPERM; 2637 else 2638 tp->tsoffset = val - tcp_time_stamp; 2639 break; 2640 default: 2641 err = -ENOPROTOOPT; 2642 break; 2643 } 2644 2645 release_sock(sk); 2646 return err; 2647} 2648 2649int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2650 unsigned int optlen) 2651{ 2652 const struct inet_connection_sock *icsk = inet_csk(sk); 2653 2654 if (level != SOL_TCP) 2655 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2656 optval, optlen); 2657 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2658} 2659EXPORT_SYMBOL(tcp_setsockopt); 2660 2661#ifdef CONFIG_COMPAT 2662int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2663 char __user *optval, unsigned int optlen) 2664{ 2665 if (level != SOL_TCP) 2666 return inet_csk_compat_setsockopt(sk, level, optname, 2667 optval, optlen); 2668 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2669} 2670EXPORT_SYMBOL(compat_tcp_setsockopt); 2671#endif 2672 2673/* Return information about state of tcp endpoint in API format. */ 2674void tcp_get_info(const struct sock *sk, struct tcp_info *info) 2675{ 2676 const struct tcp_sock *tp = tcp_sk(sk); 2677 const struct inet_connection_sock *icsk = inet_csk(sk); 2678 u32 now = tcp_time_stamp; 2679 2680 memset(info, 0, sizeof(*info)); 2681 2682 info->tcpi_state = sk->sk_state; 2683 info->tcpi_ca_state = icsk->icsk_ca_state; 2684 info->tcpi_retransmits = icsk->icsk_retransmits; 2685 info->tcpi_probes = icsk->icsk_probes_out; 2686 info->tcpi_backoff = icsk->icsk_backoff; 2687 2688 if (tp->rx_opt.tstamp_ok) 2689 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 2690 if (tcp_is_sack(tp)) 2691 info->tcpi_options |= TCPI_OPT_SACK; 2692 if (tp->rx_opt.wscale_ok) { 2693 info->tcpi_options |= TCPI_OPT_WSCALE; 2694 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 2695 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2696 } 2697 2698 if (tp->ecn_flags & TCP_ECN_OK) 2699 info->tcpi_options |= TCPI_OPT_ECN; 2700 if (tp->ecn_flags & TCP_ECN_SEEN) 2701 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 2702 if (tp->syn_data_acked) 2703 info->tcpi_options |= TCPI_OPT_SYN_DATA; 2704 2705 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2706 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2707 info->tcpi_snd_mss = tp->mss_cache; 2708 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2709 2710 if (sk->sk_state == TCP_LISTEN) { 2711 info->tcpi_unacked = sk->sk_ack_backlog; 2712 info->tcpi_sacked = sk->sk_max_ack_backlog; 2713 } else { 2714 info->tcpi_unacked = tp->packets_out; 2715 info->tcpi_sacked = tp->sacked_out; 2716 } 2717 info->tcpi_lost = tp->lost_out; 2718 info->tcpi_retrans = tp->retrans_out; 2719 info->tcpi_fackets = tp->fackets_out; 2720 2721 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2722 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2723 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2724 2725 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 2726 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 2727 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; 2728 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; 2729 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 2730 info->tcpi_snd_cwnd = tp->snd_cwnd; 2731 info->tcpi_advmss = tp->advmss; 2732 info->tcpi_reordering = tp->reordering; 2733 2734 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; 2735 info->tcpi_rcv_space = tp->rcvq_space.space; 2736 2737 info->tcpi_total_retrans = tp->total_retrans; 2738} 2739EXPORT_SYMBOL_GPL(tcp_get_info); 2740 2741static int do_tcp_getsockopt(struct sock *sk, int level, 2742 int optname, char __user *optval, int __user *optlen) 2743{ 2744 struct inet_connection_sock *icsk = inet_csk(sk); 2745 struct tcp_sock *tp = tcp_sk(sk); 2746 int val, len; 2747 2748 if (get_user(len, optlen)) 2749 return -EFAULT; 2750 2751 len = min_t(unsigned int, len, sizeof(int)); 2752 2753 if (len < 0) 2754 return -EINVAL; 2755 2756 switch (optname) { 2757 case TCP_MAXSEG: 2758 val = tp->mss_cache; 2759 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2760 val = tp->rx_opt.user_mss; 2761 if (tp->repair) 2762 val = tp->rx_opt.mss_clamp; 2763 break; 2764 case TCP_NODELAY: 2765 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2766 break; 2767 case TCP_CORK: 2768 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2769 break; 2770 case TCP_KEEPIDLE: 2771 val = keepalive_time_when(tp) / HZ; 2772 break; 2773 case TCP_KEEPINTVL: 2774 val = keepalive_intvl_when(tp) / HZ; 2775 break; 2776 case TCP_KEEPCNT: 2777 val = keepalive_probes(tp); 2778 break; 2779 case TCP_SYNCNT: 2780 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2781 break; 2782 case TCP_LINGER2: 2783 val = tp->linger2; 2784 if (val >= 0) 2785 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2786 break; 2787 case TCP_DEFER_ACCEPT: 2788 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 2789 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 2790 break; 2791 case TCP_WINDOW_CLAMP: 2792 val = tp->window_clamp; 2793 break; 2794 case TCP_INFO: { 2795 struct tcp_info info; 2796 2797 if (get_user(len, optlen)) 2798 return -EFAULT; 2799 2800 tcp_get_info(sk, &info); 2801 2802 len = min_t(unsigned int, len, sizeof(info)); 2803 if (put_user(len, optlen)) 2804 return -EFAULT; 2805 if (copy_to_user(optval, &info, len)) 2806 return -EFAULT; 2807 return 0; 2808 } 2809 case TCP_QUICKACK: 2810 val = !icsk->icsk_ack.pingpong; 2811 break; 2812 2813 case TCP_CONGESTION: 2814 if (get_user(len, optlen)) 2815 return -EFAULT; 2816 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2817 if (put_user(len, optlen)) 2818 return -EFAULT; 2819 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 2820 return -EFAULT; 2821 return 0; 2822 2823 case TCP_THIN_LINEAR_TIMEOUTS: 2824 val = tp->thin_lto; 2825 break; 2826 case TCP_THIN_DUPACK: 2827 val = tp->thin_dupack; 2828 break; 2829 2830 case TCP_REPAIR: 2831 val = tp->repair; 2832 break; 2833 2834 case TCP_REPAIR_QUEUE: 2835 if (tp->repair) 2836 val = tp->repair_queue; 2837 else 2838 return -EINVAL; 2839 break; 2840 2841 case TCP_QUEUE_SEQ: 2842 if (tp->repair_queue == TCP_SEND_QUEUE) 2843 val = tp->write_seq; 2844 else if (tp->repair_queue == TCP_RECV_QUEUE) 2845 val = tp->rcv_nxt; 2846 else 2847 return -EINVAL; 2848 break; 2849 2850 case TCP_USER_TIMEOUT: 2851 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2852 break; 2853 case TCP_TIMESTAMP: 2854 val = tcp_time_stamp + tp->tsoffset; 2855 break; 2856 default: 2857 return -ENOPROTOOPT; 2858 } 2859 2860 if (put_user(len, optlen)) 2861 return -EFAULT; 2862 if (copy_to_user(optval, &val, len)) 2863 return -EFAULT; 2864 return 0; 2865} 2866 2867int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 2868 int __user *optlen) 2869{ 2870 struct inet_connection_sock *icsk = inet_csk(sk); 2871 2872 if (level != SOL_TCP) 2873 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 2874 optval, optlen); 2875 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2876} 2877EXPORT_SYMBOL(tcp_getsockopt); 2878 2879#ifdef CONFIG_COMPAT 2880int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 2881 char __user *optval, int __user *optlen) 2882{ 2883 if (level != SOL_TCP) 2884 return inet_csk_compat_getsockopt(sk, level, optname, 2885 optval, optlen); 2886 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2887} 2888EXPORT_SYMBOL(compat_tcp_getsockopt); 2889#endif 2890 2891struct sk_buff *tcp_tso_segment(struct sk_buff *skb, 2892 netdev_features_t features) 2893{ 2894 struct sk_buff *segs = ERR_PTR(-EINVAL); 2895 struct tcphdr *th; 2896 unsigned int thlen; 2897 unsigned int seq; 2898 __be32 delta; 2899 unsigned int oldlen; 2900 unsigned int mss; 2901 struct sk_buff *gso_skb = skb; 2902 __sum16 newcheck; 2903 bool ooo_okay, copy_destructor; 2904 2905 if (!pskb_may_pull(skb, sizeof(*th))) 2906 goto out; 2907 2908 th = tcp_hdr(skb); 2909 thlen = th->doff * 4; 2910 if (thlen < sizeof(*th)) 2911 goto out; 2912 2913 if (!pskb_may_pull(skb, thlen)) 2914 goto out; 2915 2916 oldlen = (u16)~skb->len; 2917 __skb_pull(skb, thlen); 2918 2919 mss = skb_shinfo(skb)->gso_size; 2920 if (unlikely(skb->len <= mss)) 2921 goto out; 2922 2923 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2924 /* Packet is from an untrusted source, reset gso_segs. */ 2925 int type = skb_shinfo(skb)->gso_type; 2926 2927 if (unlikely(type & 2928 ~(SKB_GSO_TCPV4 | 2929 SKB_GSO_DODGY | 2930 SKB_GSO_TCP_ECN | 2931 SKB_GSO_TCPV6 | 2932 SKB_GSO_GRE | 2933 SKB_GSO_UDP_TUNNEL | 2934 0) || 2935 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 2936 goto out; 2937 2938 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 2939 2940 segs = NULL; 2941 goto out; 2942 } 2943 2944 copy_destructor = gso_skb->destructor == tcp_wfree; 2945 ooo_okay = gso_skb->ooo_okay; 2946 /* All segments but the first should have ooo_okay cleared */ 2947 skb->ooo_okay = 0; 2948 2949 segs = skb_segment(skb, features); 2950 if (IS_ERR(segs)) 2951 goto out; 2952 2953 /* Only first segment might have ooo_okay set */ 2954 segs->ooo_okay = ooo_okay; 2955 2956 delta = htonl(oldlen + (thlen + mss)); 2957 2958 skb = segs; 2959 th = tcp_hdr(skb); 2960 seq = ntohl(th->seq); 2961 2962 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + 2963 (__force u32)delta)); 2964 2965 do { 2966 th->fin = th->psh = 0; 2967 th->check = newcheck; 2968 2969 if (skb->ip_summed != CHECKSUM_PARTIAL) 2970 th->check = 2971 csum_fold(csum_partial(skb_transport_header(skb), 2972 thlen, skb->csum)); 2973 2974 seq += mss; 2975 if (copy_destructor) { 2976 skb->destructor = gso_skb->destructor; 2977 skb->sk = gso_skb->sk; 2978 /* {tcp|sock}_wfree() use exact truesize accounting : 2979 * sum(skb->truesize) MUST be exactly be gso_skb->truesize 2980 * So we account mss bytes of 'true size' for each segment. 2981 * The last segment will contain the remaining. 2982 */ 2983 skb->truesize = mss; 2984 gso_skb->truesize -= mss; 2985 } 2986 skb = skb->next; 2987 th = tcp_hdr(skb); 2988 2989 th->seq = htonl(seq); 2990 th->cwr = 0; 2991 } while (skb->next); 2992 2993 /* Following permits TCP Small Queues to work well with GSO : 2994 * The callback to TCP stack will be called at the time last frag 2995 * is freed at TX completion, and not right now when gso_skb 2996 * is freed by GSO engine 2997 */ 2998 if (copy_destructor) { 2999 swap(gso_skb->sk, skb->sk); 3000 swap(gso_skb->destructor, skb->destructor); 3001 swap(gso_skb->truesize, skb->truesize); 3002 } 3003 3004 delta = htonl(oldlen + (skb->tail - skb->transport_header) + 3005 skb->data_len); 3006 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 3007 (__force u32)delta)); 3008 if (skb->ip_summed != CHECKSUM_PARTIAL) 3009 th->check = csum_fold(csum_partial(skb_transport_header(skb), 3010 thlen, skb->csum)); 3011 3012out: 3013 return segs; 3014} 3015EXPORT_SYMBOL(tcp_tso_segment); 3016 3017struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3018{ 3019 struct sk_buff **pp = NULL; 3020 struct sk_buff *p; 3021 struct tcphdr *th; 3022 struct tcphdr *th2; 3023 unsigned int len; 3024 unsigned int thlen; 3025 __be32 flags; 3026 unsigned int mss = 1; 3027 unsigned int hlen; 3028 unsigned int off; 3029 int flush = 1; 3030 int i; 3031 3032 off = skb_gro_offset(skb); 3033 hlen = off + sizeof(*th); 3034 th = skb_gro_header_fast(skb, off); 3035 if (skb_gro_header_hard(skb, hlen)) { 3036 th = skb_gro_header_slow(skb, hlen, off); 3037 if (unlikely(!th)) 3038 goto out; 3039 } 3040 3041 thlen = th->doff * 4; 3042 if (thlen < sizeof(*th)) 3043 goto out; 3044 3045 hlen = off + thlen; 3046 if (skb_gro_header_hard(skb, hlen)) { 3047 th = skb_gro_header_slow(skb, hlen, off); 3048 if (unlikely(!th)) 3049 goto out; 3050 } 3051 3052 skb_gro_pull(skb, thlen); 3053 3054 len = skb_gro_len(skb); 3055 flags = tcp_flag_word(th); 3056 3057 for (; (p = *head); head = &p->next) { 3058 if (!NAPI_GRO_CB(p)->same_flow) 3059 continue; 3060 3061 th2 = tcp_hdr(p); 3062 3063 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { 3064 NAPI_GRO_CB(p)->same_flow = 0; 3065 continue; 3066 } 3067 3068 goto found; 3069 } 3070 3071 goto out_check_final; 3072 3073found: 3074 flush = NAPI_GRO_CB(p)->flush; 3075 flush |= (__force int)(flags & TCP_FLAG_CWR); 3076 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & 3077 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); 3078 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); 3079 for (i = sizeof(*th); i < thlen; i += 4) 3080 flush |= *(u32 *)((u8 *)th + i) ^ 3081 *(u32 *)((u8 *)th2 + i); 3082 3083 mss = skb_shinfo(p)->gso_size; 3084 3085 flush |= (len - 1) >= mss; 3086 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 3087 3088 if (flush || skb_gro_receive(head, skb)) { 3089 mss = 1; 3090 goto out_check_final; 3091 } 3092 3093 p = *head; 3094 th2 = tcp_hdr(p); 3095 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 3096 3097out_check_final: 3098 flush = len < mss; 3099 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | 3100 TCP_FLAG_RST | TCP_FLAG_SYN | 3101 TCP_FLAG_FIN)); 3102 3103 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 3104 pp = head; 3105 3106out: 3107 NAPI_GRO_CB(skb)->flush |= flush; 3108 3109 return pp; 3110} 3111EXPORT_SYMBOL(tcp_gro_receive); 3112 3113int tcp_gro_complete(struct sk_buff *skb) 3114{ 3115 struct tcphdr *th = tcp_hdr(skb); 3116 3117 skb->csum_start = skb_transport_header(skb) - skb->head; 3118 skb->csum_offset = offsetof(struct tcphdr, check); 3119 skb->ip_summed = CHECKSUM_PARTIAL; 3120 3121 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 3122 3123 if (th->cwr) 3124 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 3125 3126 return 0; 3127} 3128EXPORT_SYMBOL(tcp_gro_complete); 3129 3130#ifdef CONFIG_TCP_MD5SIG 3131static unsigned long tcp_md5sig_users; 3132static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool; 3133static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 3134 3135static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) 3136{ 3137 int cpu; 3138 3139 for_each_possible_cpu(cpu) { 3140 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); 3141 3142 if (p->md5_desc.tfm) 3143 crypto_free_hash(p->md5_desc.tfm); 3144 } 3145 free_percpu(pool); 3146} 3147 3148void tcp_free_md5sig_pool(void) 3149{ 3150 struct tcp_md5sig_pool __percpu *pool = NULL; 3151 3152 spin_lock_bh(&tcp_md5sig_pool_lock); 3153 if (--tcp_md5sig_users == 0) { 3154 pool = tcp_md5sig_pool; 3155 tcp_md5sig_pool = NULL; 3156 } 3157 spin_unlock_bh(&tcp_md5sig_pool_lock); 3158 if (pool) 3159 __tcp_free_md5sig_pool(pool); 3160} 3161EXPORT_SYMBOL(tcp_free_md5sig_pool); 3162 3163static struct tcp_md5sig_pool __percpu * 3164__tcp_alloc_md5sig_pool(struct sock *sk) 3165{ 3166 int cpu; 3167 struct tcp_md5sig_pool __percpu *pool; 3168 3169 pool = alloc_percpu(struct tcp_md5sig_pool); 3170 if (!pool) 3171 return NULL; 3172 3173 for_each_possible_cpu(cpu) { 3174 struct crypto_hash *hash; 3175 3176 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 3177 if (IS_ERR_OR_NULL(hash)) 3178 goto out_free; 3179 3180 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 3181 } 3182 return pool; 3183out_free: 3184 __tcp_free_md5sig_pool(pool); 3185 return NULL; 3186} 3187 3188struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 3189{ 3190 struct tcp_md5sig_pool __percpu *pool; 3191 bool alloc = false; 3192 3193retry: 3194 spin_lock_bh(&tcp_md5sig_pool_lock); 3195 pool = tcp_md5sig_pool; 3196 if (tcp_md5sig_users++ == 0) { 3197 alloc = true; 3198 spin_unlock_bh(&tcp_md5sig_pool_lock); 3199 } else if (!pool) { 3200 tcp_md5sig_users--; 3201 spin_unlock_bh(&tcp_md5sig_pool_lock); 3202 cpu_relax(); 3203 goto retry; 3204 } else 3205 spin_unlock_bh(&tcp_md5sig_pool_lock); 3206 3207 if (alloc) { 3208 /* we cannot hold spinlock here because this may sleep. */ 3209 struct tcp_md5sig_pool __percpu *p; 3210 3211 p = __tcp_alloc_md5sig_pool(sk); 3212 spin_lock_bh(&tcp_md5sig_pool_lock); 3213 if (!p) { 3214 tcp_md5sig_users--; 3215 spin_unlock_bh(&tcp_md5sig_pool_lock); 3216 return NULL; 3217 } 3218 pool = tcp_md5sig_pool; 3219 if (pool) { 3220 /* oops, it has already been assigned. */ 3221 spin_unlock_bh(&tcp_md5sig_pool_lock); 3222 __tcp_free_md5sig_pool(p); 3223 } else { 3224 tcp_md5sig_pool = pool = p; 3225 spin_unlock_bh(&tcp_md5sig_pool_lock); 3226 } 3227 } 3228 return pool; 3229} 3230EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 3231 3232 3233/** 3234 * tcp_get_md5sig_pool - get md5sig_pool for this user 3235 * 3236 * We use percpu structure, so if we succeed, we exit with preemption 3237 * and BH disabled, to make sure another thread or softirq handling 3238 * wont try to get same context. 3239 */ 3240struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 3241{ 3242 struct tcp_md5sig_pool __percpu *p; 3243 3244 local_bh_disable(); 3245 3246 spin_lock(&tcp_md5sig_pool_lock); 3247 p = tcp_md5sig_pool; 3248 if (p) 3249 tcp_md5sig_users++; 3250 spin_unlock(&tcp_md5sig_pool_lock); 3251 3252 if (p) 3253 return this_cpu_ptr(p); 3254 3255 local_bh_enable(); 3256 return NULL; 3257} 3258EXPORT_SYMBOL(tcp_get_md5sig_pool); 3259 3260void tcp_put_md5sig_pool(void) 3261{ 3262 local_bh_enable(); 3263 tcp_free_md5sig_pool(); 3264} 3265EXPORT_SYMBOL(tcp_put_md5sig_pool); 3266 3267int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 3268 const struct tcphdr *th) 3269{ 3270 struct scatterlist sg; 3271 struct tcphdr hdr; 3272 int err; 3273 3274 /* We are not allowed to change tcphdr, make a local copy */ 3275 memcpy(&hdr, th, sizeof(hdr)); 3276 hdr.check = 0; 3277 3278 /* options aren't included in the hash */ 3279 sg_init_one(&sg, &hdr, sizeof(hdr)); 3280 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); 3281 return err; 3282} 3283EXPORT_SYMBOL(tcp_md5_hash_header); 3284 3285int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3286 const struct sk_buff *skb, unsigned int header_len) 3287{ 3288 struct scatterlist sg; 3289 const struct tcphdr *tp = tcp_hdr(skb); 3290 struct hash_desc *desc = &hp->md5_desc; 3291 unsigned int i; 3292 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 3293 skb_headlen(skb) - header_len : 0; 3294 const struct skb_shared_info *shi = skb_shinfo(skb); 3295 struct sk_buff *frag_iter; 3296 3297 sg_init_table(&sg, 1); 3298 3299 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 3300 if (crypto_hash_update(desc, &sg, head_data_len)) 3301 return 1; 3302 3303 for (i = 0; i < shi->nr_frags; ++i) { 3304 const struct skb_frag_struct *f = &shi->frags[i]; 3305 unsigned int offset = f->page_offset; 3306 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 3307 3308 sg_set_page(&sg, page, skb_frag_size(f), 3309 offset_in_page(offset)); 3310 if (crypto_hash_update(desc, &sg, skb_frag_size(f))) 3311 return 1; 3312 } 3313 3314 skb_walk_frags(skb, frag_iter) 3315 if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 3316 return 1; 3317 3318 return 0; 3319} 3320EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3321 3322int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 3323{ 3324 struct scatterlist sg; 3325 3326 sg_init_one(&sg, key->key, key->keylen); 3327 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); 3328} 3329EXPORT_SYMBOL(tcp_md5_hash_key); 3330 3331#endif 3332 3333void tcp_done(struct sock *sk) 3334{ 3335 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 3336 3337 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3338 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 3339 3340 tcp_set_state(sk, TCP_CLOSE); 3341 tcp_clear_xmit_timers(sk); 3342 if (req != NULL) 3343 reqsk_fastopen_remove(sk, req, false); 3344 3345 sk->sk_shutdown = SHUTDOWN_MASK; 3346 3347 if (!sock_flag(sk, SOCK_DEAD)) 3348 sk->sk_state_change(sk); 3349 else 3350 inet_csk_destroy_sock(sk); 3351} 3352EXPORT_SYMBOL_GPL(tcp_done); 3353 3354extern struct tcp_congestion_ops tcp_reno; 3355 3356static __initdata unsigned long thash_entries; 3357static int __init set_thash_entries(char *str) 3358{ 3359 ssize_t ret; 3360 3361 if (!str) 3362 return 0; 3363 3364 ret = kstrtoul(str, 0, &thash_entries); 3365 if (ret) 3366 return 0; 3367 3368 return 1; 3369} 3370__setup("thash_entries=", set_thash_entries); 3371 3372void tcp_init_mem(struct net *net) 3373{ 3374 unsigned long limit = nr_free_buffer_pages() / 8; 3375 limit = max(limit, 128UL); 3376 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; 3377 net->ipv4.sysctl_tcp_mem[1] = limit; 3378 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; 3379} 3380 3381void __init tcp_init(void) 3382{ 3383 struct sk_buff *skb = NULL; 3384 unsigned long limit; 3385 int max_rshare, max_wshare, cnt; 3386 unsigned int i; 3387 3388 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3389 3390 percpu_counter_init(&tcp_sockets_allocated, 0); 3391 percpu_counter_init(&tcp_orphan_count, 0); 3392 tcp_hashinfo.bind_bucket_cachep = 3393 kmem_cache_create("tcp_bind_bucket", 3394 sizeof(struct inet_bind_bucket), 0, 3395 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3396 3397 /* Size and allocate the main established and bind bucket 3398 * hash tables. 3399 * 3400 * The methodology is similar to that of the buffer cache. 3401 */ 3402 tcp_hashinfo.ehash = 3403 alloc_large_system_hash("TCP established", 3404 sizeof(struct inet_ehash_bucket), 3405 thash_entries, 3406 17, /* one slot per 128 KB of memory */ 3407 0, 3408 NULL, 3409 &tcp_hashinfo.ehash_mask, 3410 0, 3411 thash_entries ? 0 : 512 * 1024); 3412 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { 3413 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 3414 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); 3415 } 3416 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 3417 panic("TCP: failed to alloc ehash_locks"); 3418 tcp_hashinfo.bhash = 3419 alloc_large_system_hash("TCP bind", 3420 sizeof(struct inet_bind_hashbucket), 3421 tcp_hashinfo.ehash_mask + 1, 3422 17, /* one slot per 128 KB of memory */ 3423 0, 3424 &tcp_hashinfo.bhash_size, 3425 NULL, 3426 0, 3427 64 * 1024); 3428 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 3429 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3430 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3431 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3432 } 3433 3434 3435 cnt = tcp_hashinfo.ehash_mask + 1; 3436 3437 tcp_death_row.sysctl_max_tw_buckets = cnt / 2; 3438 sysctl_tcp_max_orphans = cnt / 2; 3439 sysctl_max_syn_backlog = max(128, cnt / 256); 3440 3441 tcp_init_mem(&init_net); 3442 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3443 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3444 max_wshare = min(4UL*1024*1024, limit); 3445 max_rshare = min(6UL*1024*1024, limit); 3446 3447 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3448 sysctl_tcp_wmem[1] = 16*1024; 3449 sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 3450 3451 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3452 sysctl_tcp_rmem[1] = 87380; 3453 sysctl_tcp_rmem[2] = max(87380, max_rshare); 3454 3455 pr_info("Hash tables configured (established %u bind %u)\n", 3456 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3457 3458 tcp_metrics_init(); 3459 3460 tcp_register_congestion_control(&tcp_reno); 3461 3462 tcp_tasklet_init(); 3463} 3464 3465static int tcp_is_local(struct net *net, __be32 addr) { 3466 struct rtable *rt; 3467 struct flowi4 fl4 = { .daddr = addr }; 3468 rt = ip_route_output_key(net, &fl4); 3469 if (IS_ERR_OR_NULL(rt)) 3470 return 0; 3471 return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK); 3472} 3473 3474#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3475static int tcp_is_local6(struct net *net, struct in6_addr *addr) { 3476 struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0); 3477 return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK); 3478} 3479#endif 3480 3481/* 3482 * tcp_nuke_addr - destroy all sockets on the given local address 3483 * if local address is the unspecified address (0.0.0.0 or ::), destroy all 3484 * sockets with local addresses that are not configured. 3485 */ 3486int tcp_nuke_addr(struct net *net, struct sockaddr *addr) 3487{ 3488 int family = addr->sa_family; 3489 unsigned int bucket; 3490 3491 struct in_addr *in; 3492#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3493 struct in6_addr *in6; 3494#endif 3495 if (family == AF_INET) { 3496 in = &((struct sockaddr_in *)addr)->sin_addr; 3497#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3498 } else if (family == AF_INET6) { 3499 in6 = &((struct sockaddr_in6 *)addr)->sin6_addr; 3500#endif 3501 } else { 3502 return -EAFNOSUPPORT; 3503 } 3504 3505 for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) { 3506 struct hlist_nulls_node *node; 3507 struct sock *sk; 3508 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket); 3509 3510restart: 3511 spin_lock_bh(lock); 3512 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) { 3513 struct inet_sock *inet = inet_sk(sk); 3514 3515 if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT) 3516 continue; 3517 if (sock_flag(sk, SOCK_DEAD)) 3518 continue; 3519 3520 if (family == AF_INET) { 3521 __be32 s4 = inet->inet_rcv_saddr; 3522 if (s4 == LOOPBACK4_IPV6) 3523 continue; 3524 3525 if (in->s_addr != s4 && 3526 !(in->s_addr == INADDR_ANY && 3527 !tcp_is_local(net, s4))) 3528 continue; 3529 } 3530 3531#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3532 if (family == AF_INET6) { 3533 struct in6_addr *s6; 3534 if (!inet->pinet6) 3535 continue; 3536 3537 s6 = &inet->pinet6->rcv_saddr; 3538 if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED) 3539 continue; 3540 3541 if (!ipv6_addr_equal(in6, s6) && 3542 !(ipv6_addr_equal(in6, &in6addr_any) && 3543 !tcp_is_local6(net, s6))) 3544 continue; 3545 } 3546#endif 3547 3548 sock_hold(sk); 3549 spin_unlock_bh(lock); 3550 3551 lock_sock(sk); 3552 // TODO: 3553 // Check for SOCK_DEAD again, it could have changed. 3554 // Add a write barrier, see tcp_reset(). 3555 local_bh_disable(); 3556 sk->sk_err = ETIMEDOUT; 3557 sk->sk_error_report(sk); 3558 3559 tcp_done(sk); 3560 local_bh_enable(); 3561 release_sock(sk); 3562 sock_put(sk); 3563 3564 goto restart; 3565 } 3566 spin_unlock_bh(lock); 3567 } 3568 3569 return 0; 3570} 3571