tcp.c revision ec0a196626bd12e0ba108d7daa6d95a4fb25c2c5
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 * 22 * Fixes: 23 * Alan Cox : Numerous verify_area() calls 24 * Alan Cox : Set the ACK bit on a reset 25 * Alan Cox : Stopped it crashing if it closed while 26 * sk->inuse=1 and was trying to connect 27 * (tcp_err()). 28 * Alan Cox : All icmp error handling was broken 29 * pointers passed where wrong and the 30 * socket was looked up backwards. Nobody 31 * tested any icmp error code obviously. 32 * Alan Cox : tcp_err() now handled properly. It 33 * wakes people on errors. poll 34 * behaves and the icmp error race 35 * has gone by moving it into sock.c 36 * Alan Cox : tcp_send_reset() fixed to work for 37 * everything not just packets for 38 * unknown sockets. 39 * Alan Cox : tcp option processing. 40 * Alan Cox : Reset tweaked (still not 100%) [Had 41 * syn rule wrong] 42 * Herp Rosmanith : More reset fixes 43 * Alan Cox : No longer acks invalid rst frames. 44 * Acking any kind of RST is right out. 45 * Alan Cox : Sets an ignore me flag on an rst 46 * receive otherwise odd bits of prattle 47 * escape still 48 * Alan Cox : Fixed another acking RST frame bug. 49 * Should stop LAN workplace lockups. 50 * Alan Cox : Some tidyups using the new skb list 51 * facilities 52 * Alan Cox : sk->keepopen now seems to work 53 * Alan Cox : Pulls options out correctly on accepts 54 * Alan Cox : Fixed assorted sk->rqueue->next errors 55 * Alan Cox : PSH doesn't end a TCP read. Switched a 56 * bit to skb ops. 57 * Alan Cox : Tidied tcp_data to avoid a potential 58 * nasty. 59 * Alan Cox : Added some better commenting, as the 60 * tcp is hard to follow 61 * Alan Cox : Removed incorrect check for 20 * psh 62 * Michael O'Reilly : ack < copied bug fix. 63 * Johannes Stille : Misc tcp fixes (not all in yet). 64 * Alan Cox : FIN with no memory -> CRASH 65 * Alan Cox : Added socket option proto entries. 66 * Also added awareness of them to accept. 67 * Alan Cox : Added TCP options (SOL_TCP) 68 * Alan Cox : Switched wakeup calls to callbacks, 69 * so the kernel can layer network 70 * sockets. 71 * Alan Cox : Use ip_tos/ip_ttl settings. 72 * Alan Cox : Handle FIN (more) properly (we hope). 73 * Alan Cox : RST frames sent on unsynchronised 74 * state ack error. 75 * Alan Cox : Put in missing check for SYN bit. 76 * Alan Cox : Added tcp_select_window() aka NET2E 77 * window non shrink trick. 78 * Alan Cox : Added a couple of small NET2E timer 79 * fixes 80 * Charles Hedrick : TCP fixes 81 * Toomas Tamm : TCP window fixes 82 * Alan Cox : Small URG fix to rlogin ^C ack fight 83 * Charles Hedrick : Rewrote most of it to actually work 84 * Linus : Rewrote tcp_read() and URG handling 85 * completely 86 * Gerhard Koerting: Fixed some missing timer handling 87 * Matthew Dillon : Reworked TCP machine states as per RFC 88 * Gerhard Koerting: PC/TCP workarounds 89 * Adam Caldwell : Assorted timer/timing errors 90 * Matthew Dillon : Fixed another RST bug 91 * Alan Cox : Move to kernel side addressing changes. 92 * Alan Cox : Beginning work on TCP fastpathing 93 * (not yet usable) 94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 95 * Alan Cox : TCP fast path debugging 96 * Alan Cox : Window clamping 97 * Michael Riepe : Bug in tcp_check() 98 * Matt Dillon : More TCP improvements and RST bug fixes 99 * Matt Dillon : Yet more small nasties remove from the 100 * TCP code (Be very nice to this man if 101 * tcp finally works 100%) 8) 102 * Alan Cox : BSD accept semantics. 103 * Alan Cox : Reset on closedown bug. 104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 105 * Michael Pall : Handle poll() after URG properly in 106 * all cases. 107 * Michael Pall : Undo the last fix in tcp_read_urg() 108 * (multi URG PUSH broke rlogin). 109 * Michael Pall : Fix the multi URG PUSH problem in 110 * tcp_readable(), poll() after URG 111 * works now. 112 * Michael Pall : recv(...,MSG_OOB) never blocks in the 113 * BSD api. 114 * Alan Cox : Changed the semantics of sk->socket to 115 * fix a race and a signal problem with 116 * accept() and async I/O. 117 * Alan Cox : Relaxed the rules on tcp_sendto(). 118 * Yury Shevchuk : Really fixed accept() blocking problem. 119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 120 * clients/servers which listen in on 121 * fixed ports. 122 * Alan Cox : Cleaned the above up and shrank it to 123 * a sensible code size. 124 * Alan Cox : Self connect lockup fix. 125 * Alan Cox : No connect to multicast. 126 * Ross Biro : Close unaccepted children on master 127 * socket close. 128 * Alan Cox : Reset tracing code. 129 * Alan Cox : Spurious resets on shutdown. 130 * Alan Cox : Giant 15 minute/60 second timer error 131 * Alan Cox : Small whoops in polling before an 132 * accept. 133 * Alan Cox : Kept the state trace facility since 134 * it's handy for debugging. 135 * Alan Cox : More reset handler fixes. 136 * Alan Cox : Started rewriting the code based on 137 * the RFC's for other useful protocol 138 * references see: Comer, KA9Q NOS, and 139 * for a reference on the difference 140 * between specifications and how BSD 141 * works see the 4.4lite source. 142 * A.N.Kuznetsov : Don't time wait on completion of tidy 143 * close. 144 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 145 * Linus Torvalds : Fixed BSD port reuse to work first syn 146 * Alan Cox : Reimplemented timers as per the RFC 147 * and using multiple timers for sanity. 148 * Alan Cox : Small bug fixes, and a lot of new 149 * comments. 150 * Alan Cox : Fixed dual reader crash by locking 151 * the buffers (much like datagram.c) 152 * Alan Cox : Fixed stuck sockets in probe. A probe 153 * now gets fed up of retrying without 154 * (even a no space) answer. 155 * Alan Cox : Extracted closing code better 156 * Alan Cox : Fixed the closing state machine to 157 * resemble the RFC. 158 * Alan Cox : More 'per spec' fixes. 159 * Jorge Cwik : Even faster checksumming. 160 * Alan Cox : tcp_data() doesn't ack illegal PSH 161 * only frames. At least one pc tcp stack 162 * generates them. 163 * Alan Cox : Cache last socket. 164 * Alan Cox : Per route irtt. 165 * Matt Day : poll()->select() match BSD precisely on error 166 * Alan Cox : New buffers 167 * Marc Tamsky : Various sk->prot->retransmits and 168 * sk->retransmits misupdating fixed. 169 * Fixed tcp_write_timeout: stuck close, 170 * and TCP syn retries gets used now. 171 * Mark Yarvis : In tcp_read_wakeup(), don't send an 172 * ack if state is TCP_CLOSED. 173 * Alan Cox : Look up device on a retransmit - routes may 174 * change. Doesn't yet cope with MSS shrink right 175 * but it's a start! 176 * Marc Tamsky : Closing in closing fixes. 177 * Mike Shaver : RFC1122 verifications. 178 * Alan Cox : rcv_saddr errors. 179 * Alan Cox : Block double connect(). 180 * Alan Cox : Small hooks for enSKIP. 181 * Alexey Kuznetsov: Path MTU discovery. 182 * Alan Cox : Support soft errors. 183 * Alan Cox : Fix MTU discovery pathological case 184 * when the remote claims no mtu! 185 * Marc Tamsky : TCP_CLOSE fix. 186 * Colin (G3TNE) : Send a reset on syn ack replies in 187 * window but wrong (fixes NT lpd problems) 188 * Pedro Roque : Better TCP window handling, delayed ack. 189 * Joerg Reuter : No modification of locked buffers in 190 * tcp_do_retransmit() 191 * Eric Schenk : Changed receiver side silly window 192 * avoidance algorithm to BSD style 193 * algorithm. This doubles throughput 194 * against machines running Solaris, 195 * and seems to result in general 196 * improvement. 197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 198 * Willy Konynenberg : Transparent proxying support. 199 * Mike McLagan : Routing by source 200 * Keith Owens : Do proper merging with partial SKB's in 201 * tcp_do_sendmsg to avoid burstiness. 202 * Eric Schenk : Fix fast close down bug with 203 * shutdown() followed by close(). 204 * Andi Kleen : Make poll agree with SIGIO 205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 206 * lingertime == 0 (RFC 793 ABORT Call) 207 * Hirokazu Takahashi : Use copy_from_user() instead of 208 * csum_and_copy_from_user() if possible. 209 * 210 * This program is free software; you can redistribute it and/or 211 * modify it under the terms of the GNU General Public License 212 * as published by the Free Software Foundation; either version 213 * 2 of the License, or(at your option) any later version. 214 * 215 * Description of States: 216 * 217 * TCP_SYN_SENT sent a connection request, waiting for ack 218 * 219 * TCP_SYN_RECV received a connection request, sent ack, 220 * waiting for final ack in three-way handshake. 221 * 222 * TCP_ESTABLISHED connection established 223 * 224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 225 * transmission of remaining buffered data 226 * 227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 228 * to shutdown 229 * 230 * TCP_CLOSING both sides have shutdown but we still have 231 * data we have to finish sending 232 * 233 * TCP_TIME_WAIT timeout to catch resent junk before entering 234 * closed, can only be entered from FIN_WAIT2 235 * or CLOSING. Required because the other end 236 * may not have gotten our last ACK causing it 237 * to retransmit the data packet (which we ignore) 238 * 239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 240 * us to finish writing our data and to shutdown 241 * (we have to close() to move on to LAST_ACK) 242 * 243 * TCP_LAST_ACK out side has shutdown after remote has 244 * shutdown. There may still be data in our 245 * buffer that we have to finish sending 246 * 247 * TCP_CLOSE socket is finished 248 */ 249 250#include <linux/kernel.h> 251#include <linux/module.h> 252#include <linux/types.h> 253#include <linux/fcntl.h> 254#include <linux/poll.h> 255#include <linux/init.h> 256#include <linux/fs.h> 257#include <linux/skbuff.h> 258#include <linux/splice.h> 259#include <linux/net.h> 260#include <linux/socket.h> 261#include <linux/random.h> 262#include <linux/bootmem.h> 263#include <linux/cache.h> 264#include <linux/err.h> 265#include <linux/crypto.h> 266 267#include <net/icmp.h> 268#include <net/tcp.h> 269#include <net/xfrm.h> 270#include <net/ip.h> 271#include <net/netdma.h> 272#include <net/sock.h> 273 274#include <asm/uaccess.h> 275#include <asm/ioctls.h> 276 277int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 278 279DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; 280 281atomic_t tcp_orphan_count = ATOMIC_INIT(0); 282 283EXPORT_SYMBOL_GPL(tcp_orphan_count); 284 285int sysctl_tcp_mem[3] __read_mostly; 286int sysctl_tcp_wmem[3] __read_mostly; 287int sysctl_tcp_rmem[3] __read_mostly; 288 289EXPORT_SYMBOL(sysctl_tcp_mem); 290EXPORT_SYMBOL(sysctl_tcp_rmem); 291EXPORT_SYMBOL(sysctl_tcp_wmem); 292 293atomic_t tcp_memory_allocated; /* Current allocated memory. */ 294atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */ 295 296EXPORT_SYMBOL(tcp_memory_allocated); 297EXPORT_SYMBOL(tcp_sockets_allocated); 298 299/* 300 * TCP splice context 301 */ 302struct tcp_splice_state { 303 struct pipe_inode_info *pipe; 304 size_t len; 305 unsigned int flags; 306}; 307 308/* 309 * Pressure flag: try to collapse. 310 * Technical note: it is used by multiple contexts non atomically. 311 * All the __sk_mem_schedule() is of this nature: accounting 312 * is strict, actions are advisory and have some latency. 313 */ 314int tcp_memory_pressure __read_mostly; 315 316EXPORT_SYMBOL(tcp_memory_pressure); 317 318void tcp_enter_memory_pressure(void) 319{ 320 if (!tcp_memory_pressure) { 321 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); 322 tcp_memory_pressure = 1; 323 } 324} 325 326EXPORT_SYMBOL(tcp_enter_memory_pressure); 327 328/* 329 * Wait for a TCP event. 330 * 331 * Note that we don't need to lock the socket, as the upper poll layers 332 * take care of normal races (between the test and the event) and we don't 333 * go look at any of the socket buffers directly. 334 */ 335unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 336{ 337 unsigned int mask; 338 struct sock *sk = sock->sk; 339 struct tcp_sock *tp = tcp_sk(sk); 340 341 poll_wait(file, sk->sk_sleep, wait); 342 if (sk->sk_state == TCP_LISTEN) 343 return inet_csk_listen_poll(sk); 344 345 /* Socket is not locked. We are protected from async events 346 by poll logic and correct handling of state changes 347 made by another threads is impossible in any case. 348 */ 349 350 mask = 0; 351 if (sk->sk_err) 352 mask = POLLERR; 353 354 /* 355 * POLLHUP is certainly not done right. But poll() doesn't 356 * have a notion of HUP in just one direction, and for a 357 * socket the read side is more interesting. 358 * 359 * Some poll() documentation says that POLLHUP is incompatible 360 * with the POLLOUT/POLLWR flags, so somebody should check this 361 * all. But careful, it tends to be safer to return too many 362 * bits than too few, and you can easily break real applications 363 * if you don't tell them that something has hung up! 364 * 365 * Check-me. 366 * 367 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 368 * our fs/select.c). It means that after we received EOF, 369 * poll always returns immediately, making impossible poll() on write() 370 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 371 * if and only if shutdown has been made in both directions. 372 * Actually, it is interesting to look how Solaris and DUX 373 * solve this dilemma. I would prefer, if PULLHUP were maskable, 374 * then we could set it on SND_SHUTDOWN. BTW examples given 375 * in Stevens' books assume exactly this behaviour, it explains 376 * why PULLHUP is incompatible with POLLOUT. --ANK 377 * 378 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 379 * blocking on fresh not-connected or disconnected socket. --ANK 380 */ 381 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 382 mask |= POLLHUP; 383 if (sk->sk_shutdown & RCV_SHUTDOWN) 384 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 385 386 /* Connected? */ 387 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 388 /* Potential race condition. If read of tp below will 389 * escape above sk->sk_state, we can be illegally awaken 390 * in SYN_* states. */ 391 if ((tp->rcv_nxt != tp->copied_seq) && 392 (tp->urg_seq != tp->copied_seq || 393 tp->rcv_nxt != tp->copied_seq + 1 || 394 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data)) 395 mask |= POLLIN | POLLRDNORM; 396 397 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 398 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 399 mask |= POLLOUT | POLLWRNORM; 400 } else { /* send SIGIO later */ 401 set_bit(SOCK_ASYNC_NOSPACE, 402 &sk->sk_socket->flags); 403 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 404 405 /* Race breaker. If space is freed after 406 * wspace test but before the flags are set, 407 * IO signal will be lost. 408 */ 409 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 410 mask |= POLLOUT | POLLWRNORM; 411 } 412 } 413 414 if (tp->urg_data & TCP_URG_VALID) 415 mask |= POLLPRI; 416 } 417 return mask; 418} 419 420int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 421{ 422 struct tcp_sock *tp = tcp_sk(sk); 423 int answ; 424 425 switch (cmd) { 426 case SIOCINQ: 427 if (sk->sk_state == TCP_LISTEN) 428 return -EINVAL; 429 430 lock_sock(sk); 431 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 432 answ = 0; 433 else if (sock_flag(sk, SOCK_URGINLINE) || 434 !tp->urg_data || 435 before(tp->urg_seq, tp->copied_seq) || 436 !before(tp->urg_seq, tp->rcv_nxt)) { 437 answ = tp->rcv_nxt - tp->copied_seq; 438 439 /* Subtract 1, if FIN is in queue. */ 440 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 441 answ -= 442 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; 443 } else 444 answ = tp->urg_seq - tp->copied_seq; 445 release_sock(sk); 446 break; 447 case SIOCATMARK: 448 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 449 break; 450 case SIOCOUTQ: 451 if (sk->sk_state == TCP_LISTEN) 452 return -EINVAL; 453 454 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 455 answ = 0; 456 else 457 answ = tp->write_seq - tp->snd_una; 458 break; 459 default: 460 return -ENOIOCTLCMD; 461 } 462 463 return put_user(answ, (int __user *)arg); 464} 465 466static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 467{ 468 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 469 tp->pushed_seq = tp->write_seq; 470} 471 472static inline int forced_push(struct tcp_sock *tp) 473{ 474 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 475} 476 477static inline void skb_entail(struct sock *sk, struct sk_buff *skb) 478{ 479 struct tcp_sock *tp = tcp_sk(sk); 480 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 481 482 skb->csum = 0; 483 tcb->seq = tcb->end_seq = tp->write_seq; 484 tcb->flags = TCPCB_FLAG_ACK; 485 tcb->sacked = 0; 486 skb_header_release(skb); 487 tcp_add_write_queue_tail(sk, skb); 488 sk->sk_wmem_queued += skb->truesize; 489 sk_mem_charge(sk, skb->truesize); 490 if (tp->nonagle & TCP_NAGLE_PUSH) 491 tp->nonagle &= ~TCP_NAGLE_PUSH; 492} 493 494static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 495 struct sk_buff *skb) 496{ 497 if (flags & MSG_OOB) { 498 tp->urg_mode = 1; 499 tp->snd_up = tp->write_seq; 500 } 501} 502 503static inline void tcp_push(struct sock *sk, int flags, int mss_now, 504 int nonagle) 505{ 506 struct tcp_sock *tp = tcp_sk(sk); 507 508 if (tcp_send_head(sk)) { 509 struct sk_buff *skb = tcp_write_queue_tail(sk); 510 if (!(flags & MSG_MORE) || forced_push(tp)) 511 tcp_mark_push(tp, skb); 512 tcp_mark_urg(tp, flags, skb); 513 __tcp_push_pending_frames(sk, mss_now, 514 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 515 } 516} 517 518static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 519 unsigned int offset, size_t len) 520{ 521 struct tcp_splice_state *tss = rd_desc->arg.data; 522 523 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags); 524} 525 526static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 527{ 528 /* Store TCP splice context information in read_descriptor_t. */ 529 read_descriptor_t rd_desc = { 530 .arg.data = tss, 531 }; 532 533 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 534} 535 536/** 537 * tcp_splice_read - splice data from TCP socket to a pipe 538 * @sock: socket to splice from 539 * @ppos: position (not valid) 540 * @pipe: pipe to splice to 541 * @len: number of bytes to splice 542 * @flags: splice modifier flags 543 * 544 * Description: 545 * Will read pages from given socket and fill them into a pipe. 546 * 547 **/ 548ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 549 struct pipe_inode_info *pipe, size_t len, 550 unsigned int flags) 551{ 552 struct sock *sk = sock->sk; 553 struct tcp_splice_state tss = { 554 .pipe = pipe, 555 .len = len, 556 .flags = flags, 557 }; 558 long timeo; 559 ssize_t spliced; 560 int ret; 561 562 /* 563 * We can't seek on a socket input 564 */ 565 if (unlikely(*ppos)) 566 return -ESPIPE; 567 568 ret = spliced = 0; 569 570 lock_sock(sk); 571 572 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); 573 while (tss.len) { 574 ret = __tcp_splice_read(sk, &tss); 575 if (ret < 0) 576 break; 577 else if (!ret) { 578 if (spliced) 579 break; 580 if (flags & SPLICE_F_NONBLOCK) { 581 ret = -EAGAIN; 582 break; 583 } 584 if (sock_flag(sk, SOCK_DONE)) 585 break; 586 if (sk->sk_err) { 587 ret = sock_error(sk); 588 break; 589 } 590 if (sk->sk_shutdown & RCV_SHUTDOWN) 591 break; 592 if (sk->sk_state == TCP_CLOSE) { 593 /* 594 * This occurs when user tries to read 595 * from never connected socket. 596 */ 597 if (!sock_flag(sk, SOCK_DONE)) 598 ret = -ENOTCONN; 599 break; 600 } 601 if (!timeo) { 602 ret = -EAGAIN; 603 break; 604 } 605 sk_wait_data(sk, &timeo); 606 if (signal_pending(current)) { 607 ret = sock_intr_errno(timeo); 608 break; 609 } 610 continue; 611 } 612 tss.len -= ret; 613 spliced += ret; 614 615 release_sock(sk); 616 lock_sock(sk); 617 618 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 619 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || 620 signal_pending(current)) 621 break; 622 } 623 624 release_sock(sk); 625 626 if (spliced) 627 return spliced; 628 629 return ret; 630} 631 632struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) 633{ 634 struct sk_buff *skb; 635 636 /* The TCP header must be at least 32-bit aligned. */ 637 size = ALIGN(size, 4); 638 639 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 640 if (skb) { 641 if (sk_wmem_schedule(sk, skb->truesize)) { 642 /* 643 * Make sure that we have exactly size bytes 644 * available to the caller, no more, no less. 645 */ 646 skb_reserve(skb, skb_tailroom(skb) - size); 647 return skb; 648 } 649 __kfree_skb(skb); 650 } else { 651 sk->sk_prot->enter_memory_pressure(); 652 sk_stream_moderate_sndbuf(sk); 653 } 654 return NULL; 655} 656 657static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, 658 size_t psize, int flags) 659{ 660 struct tcp_sock *tp = tcp_sk(sk); 661 int mss_now, size_goal; 662 int err; 663 ssize_t copied; 664 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 665 666 /* Wait for a connection to finish. */ 667 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 668 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 669 goto out_err; 670 671 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 672 673 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 674 size_goal = tp->xmit_size_goal; 675 copied = 0; 676 677 err = -EPIPE; 678 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 679 goto do_error; 680 681 while (psize > 0) { 682 struct sk_buff *skb = tcp_write_queue_tail(sk); 683 struct page *page = pages[poffset / PAGE_SIZE]; 684 int copy, i, can_coalesce; 685 int offset = poffset % PAGE_SIZE; 686 int size = min_t(size_t, psize, PAGE_SIZE - offset); 687 688 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 689new_segment: 690 if (!sk_stream_memory_free(sk)) 691 goto wait_for_sndbuf; 692 693 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); 694 if (!skb) 695 goto wait_for_memory; 696 697 skb_entail(sk, skb); 698 copy = size_goal; 699 } 700 701 if (copy > size) 702 copy = size; 703 704 i = skb_shinfo(skb)->nr_frags; 705 can_coalesce = skb_can_coalesce(skb, i, page, offset); 706 if (!can_coalesce && i >= MAX_SKB_FRAGS) { 707 tcp_mark_push(tp, skb); 708 goto new_segment; 709 } 710 if (!sk_wmem_schedule(sk, copy)) 711 goto wait_for_memory; 712 713 if (can_coalesce) { 714 skb_shinfo(skb)->frags[i - 1].size += copy; 715 } else { 716 get_page(page); 717 skb_fill_page_desc(skb, i, page, offset, copy); 718 } 719 720 skb->len += copy; 721 skb->data_len += copy; 722 skb->truesize += copy; 723 sk->sk_wmem_queued += copy; 724 sk_mem_charge(sk, copy); 725 skb->ip_summed = CHECKSUM_PARTIAL; 726 tp->write_seq += copy; 727 TCP_SKB_CB(skb)->end_seq += copy; 728 skb_shinfo(skb)->gso_segs = 0; 729 730 if (!copied) 731 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 732 733 copied += copy; 734 poffset += copy; 735 if (!(psize -= copy)) 736 goto out; 737 738 if (skb->len < size_goal || (flags & MSG_OOB)) 739 continue; 740 741 if (forced_push(tp)) { 742 tcp_mark_push(tp, skb); 743 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 744 } else if (skb == tcp_send_head(sk)) 745 tcp_push_one(sk, mss_now); 746 continue; 747 748wait_for_sndbuf: 749 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 750wait_for_memory: 751 if (copied) 752 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 753 754 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 755 goto do_error; 756 757 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 758 size_goal = tp->xmit_size_goal; 759 } 760 761out: 762 if (copied) 763 tcp_push(sk, flags, mss_now, tp->nonagle); 764 return copied; 765 766do_error: 767 if (copied) 768 goto out; 769out_err: 770 return sk_stream_error(sk, flags, err); 771} 772 773ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, 774 size_t size, int flags) 775{ 776 ssize_t res; 777 struct sock *sk = sock->sk; 778 779 if (!(sk->sk_route_caps & NETIF_F_SG) || 780 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 781 return sock_no_sendpage(sock, page, offset, size, flags); 782 783 lock_sock(sk); 784 TCP_CHECK_TIMER(sk); 785 res = do_tcp_sendpages(sk, &page, offset, size, flags); 786 TCP_CHECK_TIMER(sk); 787 release_sock(sk); 788 return res; 789} 790 791#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 792#define TCP_OFF(sk) (sk->sk_sndmsg_off) 793 794static inline int select_size(struct sock *sk) 795{ 796 struct tcp_sock *tp = tcp_sk(sk); 797 int tmp = tp->mss_cache; 798 799 if (sk->sk_route_caps & NETIF_F_SG) { 800 if (sk_can_gso(sk)) 801 tmp = 0; 802 else { 803 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); 804 805 if (tmp >= pgbreak && 806 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) 807 tmp = pgbreak; 808 } 809 } 810 811 return tmp; 812} 813 814int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 815 size_t size) 816{ 817 struct sock *sk = sock->sk; 818 struct iovec *iov; 819 struct tcp_sock *tp = tcp_sk(sk); 820 struct sk_buff *skb; 821 int iovlen, flags; 822 int mss_now, size_goal; 823 int err, copied; 824 long timeo; 825 826 lock_sock(sk); 827 TCP_CHECK_TIMER(sk); 828 829 flags = msg->msg_flags; 830 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 831 832 /* Wait for a connection to finish. */ 833 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 834 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 835 goto out_err; 836 837 /* This should be in poll */ 838 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 839 840 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 841 size_goal = tp->xmit_size_goal; 842 843 /* Ok commence sending. */ 844 iovlen = msg->msg_iovlen; 845 iov = msg->msg_iov; 846 copied = 0; 847 848 err = -EPIPE; 849 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 850 goto do_error; 851 852 while (--iovlen >= 0) { 853 int seglen = iov->iov_len; 854 unsigned char __user *from = iov->iov_base; 855 856 iov++; 857 858 while (seglen > 0) { 859 int copy; 860 861 skb = tcp_write_queue_tail(sk); 862 863 if (!tcp_send_head(sk) || 864 (copy = size_goal - skb->len) <= 0) { 865 866new_segment: 867 /* Allocate new segment. If the interface is SG, 868 * allocate skb fitting to single page. 869 */ 870 if (!sk_stream_memory_free(sk)) 871 goto wait_for_sndbuf; 872 873 skb = sk_stream_alloc_skb(sk, select_size(sk), 874 sk->sk_allocation); 875 if (!skb) 876 goto wait_for_memory; 877 878 /* 879 * Check whether we can use HW checksum. 880 */ 881 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 882 skb->ip_summed = CHECKSUM_PARTIAL; 883 884 skb_entail(sk, skb); 885 copy = size_goal; 886 } 887 888 /* Try to append data to the end of skb. */ 889 if (copy > seglen) 890 copy = seglen; 891 892 /* Where to copy to? */ 893 if (skb_tailroom(skb) > 0) { 894 /* We have some space in skb head. Superb! */ 895 if (copy > skb_tailroom(skb)) 896 copy = skb_tailroom(skb); 897 if ((err = skb_add_data(skb, from, copy)) != 0) 898 goto do_fault; 899 } else { 900 int merge = 0; 901 int i = skb_shinfo(skb)->nr_frags; 902 struct page *page = TCP_PAGE(sk); 903 int off = TCP_OFF(sk); 904 905 if (skb_can_coalesce(skb, i, page, off) && 906 off != PAGE_SIZE) { 907 /* We can extend the last page 908 * fragment. */ 909 merge = 1; 910 } else if (i == MAX_SKB_FRAGS || 911 (!i && 912 !(sk->sk_route_caps & NETIF_F_SG))) { 913 /* Need to add new fragment and cannot 914 * do this because interface is non-SG, 915 * or because all the page slots are 916 * busy. */ 917 tcp_mark_push(tp, skb); 918 goto new_segment; 919 } else if (page) { 920 if (off == PAGE_SIZE) { 921 put_page(page); 922 TCP_PAGE(sk) = page = NULL; 923 off = 0; 924 } 925 } else 926 off = 0; 927 928 if (copy > PAGE_SIZE - off) 929 copy = PAGE_SIZE - off; 930 931 if (!sk_wmem_schedule(sk, copy)) 932 goto wait_for_memory; 933 934 if (!page) { 935 /* Allocate new cache page. */ 936 if (!(page = sk_stream_alloc_page(sk))) 937 goto wait_for_memory; 938 } 939 940 /* Time to copy data. We are close to 941 * the end! */ 942 err = skb_copy_to_page(sk, from, skb, page, 943 off, copy); 944 if (err) { 945 /* If this page was new, give it to the 946 * socket so it does not get leaked. 947 */ 948 if (!TCP_PAGE(sk)) { 949 TCP_PAGE(sk) = page; 950 TCP_OFF(sk) = 0; 951 } 952 goto do_error; 953 } 954 955 /* Update the skb. */ 956 if (merge) { 957 skb_shinfo(skb)->frags[i - 1].size += 958 copy; 959 } else { 960 skb_fill_page_desc(skb, i, page, off, copy); 961 if (TCP_PAGE(sk)) { 962 get_page(page); 963 } else if (off + copy < PAGE_SIZE) { 964 get_page(page); 965 TCP_PAGE(sk) = page; 966 } 967 } 968 969 TCP_OFF(sk) = off + copy; 970 } 971 972 if (!copied) 973 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 974 975 tp->write_seq += copy; 976 TCP_SKB_CB(skb)->end_seq += copy; 977 skb_shinfo(skb)->gso_segs = 0; 978 979 from += copy; 980 copied += copy; 981 if ((seglen -= copy) == 0 && iovlen == 0) 982 goto out; 983 984 if (skb->len < size_goal || (flags & MSG_OOB)) 985 continue; 986 987 if (forced_push(tp)) { 988 tcp_mark_push(tp, skb); 989 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 990 } else if (skb == tcp_send_head(sk)) 991 tcp_push_one(sk, mss_now); 992 continue; 993 994wait_for_sndbuf: 995 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 996wait_for_memory: 997 if (copied) 998 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 999 1000 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1001 goto do_error; 1002 1003 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 1004 size_goal = tp->xmit_size_goal; 1005 } 1006 } 1007 1008out: 1009 if (copied) 1010 tcp_push(sk, flags, mss_now, tp->nonagle); 1011 TCP_CHECK_TIMER(sk); 1012 release_sock(sk); 1013 return copied; 1014 1015do_fault: 1016 if (!skb->len) { 1017 tcp_unlink_write_queue(skb, sk); 1018 /* It is the one place in all of TCP, except connection 1019 * reset, where we can be unlinking the send_head. 1020 */ 1021 tcp_check_send_head(sk, skb); 1022 sk_wmem_free_skb(sk, skb); 1023 } 1024 1025do_error: 1026 if (copied) 1027 goto out; 1028out_err: 1029 err = sk_stream_error(sk, flags, err); 1030 TCP_CHECK_TIMER(sk); 1031 release_sock(sk); 1032 return err; 1033} 1034 1035/* 1036 * Handle reading urgent data. BSD has very simple semantics for 1037 * this, no blocking and very strange errors 8) 1038 */ 1039 1040static int tcp_recv_urg(struct sock *sk, long timeo, 1041 struct msghdr *msg, int len, int flags, 1042 int *addr_len) 1043{ 1044 struct tcp_sock *tp = tcp_sk(sk); 1045 1046 /* No URG data to read. */ 1047 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1048 tp->urg_data == TCP_URG_READ) 1049 return -EINVAL; /* Yes this is right ! */ 1050 1051 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1052 return -ENOTCONN; 1053 1054 if (tp->urg_data & TCP_URG_VALID) { 1055 int err = 0; 1056 char c = tp->urg_data; 1057 1058 if (!(flags & MSG_PEEK)) 1059 tp->urg_data = TCP_URG_READ; 1060 1061 /* Read urgent data. */ 1062 msg->msg_flags |= MSG_OOB; 1063 1064 if (len > 0) { 1065 if (!(flags & MSG_TRUNC)) 1066 err = memcpy_toiovec(msg->msg_iov, &c, 1); 1067 len = 1; 1068 } else 1069 msg->msg_flags |= MSG_TRUNC; 1070 1071 return err ? -EFAULT : len; 1072 } 1073 1074 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1075 return 0; 1076 1077 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1078 * the available implementations agree in this case: 1079 * this call should never block, independent of the 1080 * blocking state of the socket. 1081 * Mike <pall@rz.uni-karlsruhe.de> 1082 */ 1083 return -EAGAIN; 1084} 1085 1086/* Clean up the receive buffer for full frames taken by the user, 1087 * then send an ACK if necessary. COPIED is the number of bytes 1088 * tcp_recvmsg has given to the user so far, it speeds up the 1089 * calculation of whether or not we must ACK for the sake of 1090 * a window update. 1091 */ 1092void tcp_cleanup_rbuf(struct sock *sk, int copied) 1093{ 1094 struct tcp_sock *tp = tcp_sk(sk); 1095 int time_to_ack = 0; 1096 1097#if TCP_DEBUG 1098 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1099 1100 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1101#endif 1102 1103 if (inet_csk_ack_scheduled(sk)) { 1104 const struct inet_connection_sock *icsk = inet_csk(sk); 1105 /* Delayed ACKs frequently hit locked sockets during bulk 1106 * receive. */ 1107 if (icsk->icsk_ack.blocked || 1108 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 1109 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1110 /* 1111 * If this read emptied read buffer, we send ACK, if 1112 * connection is not bidirectional, user drained 1113 * receive buffer and there was a small segment 1114 * in queue. 1115 */ 1116 (copied > 0 && 1117 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1118 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1119 !icsk->icsk_ack.pingpong)) && 1120 !atomic_read(&sk->sk_rmem_alloc))) 1121 time_to_ack = 1; 1122 } 1123 1124 /* We send an ACK if we can now advertise a non-zero window 1125 * which has been raised "significantly". 1126 * 1127 * Even if window raised up to infinity, do not send window open ACK 1128 * in states, where we will not receive more. It is useless. 1129 */ 1130 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1131 __u32 rcv_window_now = tcp_receive_window(tp); 1132 1133 /* Optimize, __tcp_select_window() is not cheap. */ 1134 if (2*rcv_window_now <= tp->window_clamp) { 1135 __u32 new_window = __tcp_select_window(sk); 1136 1137 /* Send ACK now, if this read freed lots of space 1138 * in our buffer. Certainly, new_window is new window. 1139 * We can advertise it now, if it is not less than current one. 1140 * "Lots" means "at least twice" here. 1141 */ 1142 if (new_window && new_window >= 2 * rcv_window_now) 1143 time_to_ack = 1; 1144 } 1145 } 1146 if (time_to_ack) 1147 tcp_send_ack(sk); 1148} 1149 1150static void tcp_prequeue_process(struct sock *sk) 1151{ 1152 struct sk_buff *skb; 1153 struct tcp_sock *tp = tcp_sk(sk); 1154 1155 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); 1156 1157 /* RX process wants to run with disabled BHs, though it is not 1158 * necessary */ 1159 local_bh_disable(); 1160 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1161 sk->sk_backlog_rcv(sk, skb); 1162 local_bh_enable(); 1163 1164 /* Clear memory counter. */ 1165 tp->ucopy.memory = 0; 1166} 1167 1168static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1169{ 1170 struct sk_buff *skb; 1171 u32 offset; 1172 1173 skb_queue_walk(&sk->sk_receive_queue, skb) { 1174 offset = seq - TCP_SKB_CB(skb)->seq; 1175 if (tcp_hdr(skb)->syn) 1176 offset--; 1177 if (offset < skb->len || tcp_hdr(skb)->fin) { 1178 *off = offset; 1179 return skb; 1180 } 1181 } 1182 return NULL; 1183} 1184 1185/* 1186 * This routine provides an alternative to tcp_recvmsg() for routines 1187 * that would like to handle copying from skbuffs directly in 'sendfile' 1188 * fashion. 1189 * Note: 1190 * - It is assumed that the socket was locked by the caller. 1191 * - The routine does not block. 1192 * - At present, there is no support for reading OOB data 1193 * or for 'peeking' the socket using this routine 1194 * (although both would be easy to implement). 1195 */ 1196int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1197 sk_read_actor_t recv_actor) 1198{ 1199 struct sk_buff *skb; 1200 struct tcp_sock *tp = tcp_sk(sk); 1201 u32 seq = tp->copied_seq; 1202 u32 offset; 1203 int copied = 0; 1204 1205 if (sk->sk_state == TCP_LISTEN) 1206 return -ENOTCONN; 1207 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1208 if (offset < skb->len) { 1209 size_t used, len; 1210 1211 len = skb->len - offset; 1212 /* Stop reading if we hit a patch of urgent data */ 1213 if (tp->urg_data) { 1214 u32 urg_offset = tp->urg_seq - seq; 1215 if (urg_offset < len) 1216 len = urg_offset; 1217 if (!len) 1218 break; 1219 } 1220 used = recv_actor(desc, skb, offset, len); 1221 if (used < 0) { 1222 if (!copied) 1223 copied = used; 1224 break; 1225 } else if (used <= len) { 1226 seq += used; 1227 copied += used; 1228 offset += used; 1229 } 1230 /* 1231 * If recv_actor drops the lock (e.g. TCP splice 1232 * receive) the skb pointer might be invalid when 1233 * getting here: tcp_collapse might have deleted it 1234 * while aggregating skbs from the socket queue. 1235 */ 1236 skb = tcp_recv_skb(sk, seq-1, &offset); 1237 if (!skb || (offset+1 != skb->len)) 1238 break; 1239 } 1240 if (tcp_hdr(skb)->fin) { 1241 sk_eat_skb(sk, skb, 0); 1242 ++seq; 1243 break; 1244 } 1245 sk_eat_skb(sk, skb, 0); 1246 if (!desc->count) 1247 break; 1248 } 1249 tp->copied_seq = seq; 1250 1251 tcp_rcv_space_adjust(sk); 1252 1253 /* Clean up data we have read: This will do ACK frames. */ 1254 if (copied > 0) 1255 tcp_cleanup_rbuf(sk, copied); 1256 return copied; 1257} 1258 1259/* 1260 * This routine copies from a sock struct into the user buffer. 1261 * 1262 * Technical note: in 2.3 we work on _locked_ socket, so that 1263 * tricks with *seq access order and skb->users are not required. 1264 * Probably, code can be easily improved even more. 1265 */ 1266 1267int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1268 size_t len, int nonblock, int flags, int *addr_len) 1269{ 1270 struct tcp_sock *tp = tcp_sk(sk); 1271 int copied = 0; 1272 u32 peek_seq; 1273 u32 *seq; 1274 unsigned long used; 1275 int err; 1276 int target; /* Read at least this many bytes */ 1277 long timeo; 1278 struct task_struct *user_recv = NULL; 1279 int copied_early = 0; 1280 struct sk_buff *skb; 1281 1282 lock_sock(sk); 1283 1284 TCP_CHECK_TIMER(sk); 1285 1286 err = -ENOTCONN; 1287 if (sk->sk_state == TCP_LISTEN) 1288 goto out; 1289 1290 timeo = sock_rcvtimeo(sk, nonblock); 1291 1292 /* Urgent data needs to be handled specially. */ 1293 if (flags & MSG_OOB) 1294 goto recv_urg; 1295 1296 seq = &tp->copied_seq; 1297 if (flags & MSG_PEEK) { 1298 peek_seq = tp->copied_seq; 1299 seq = &peek_seq; 1300 } 1301 1302 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1303 1304#ifdef CONFIG_NET_DMA 1305 tp->ucopy.dma_chan = NULL; 1306 preempt_disable(); 1307 skb = skb_peek_tail(&sk->sk_receive_queue); 1308 { 1309 int available = 0; 1310 1311 if (skb) 1312 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); 1313 if ((available < target) && 1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1315 !sysctl_tcp_low_latency && 1316 __get_cpu_var(softnet_data).net_dma) { 1317 preempt_enable_no_resched(); 1318 tp->ucopy.pinned_list = 1319 dma_pin_iovec_pages(msg->msg_iov, len); 1320 } else { 1321 preempt_enable_no_resched(); 1322 } 1323 } 1324#endif 1325 1326 do { 1327 u32 offset; 1328 1329 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1330 if (tp->urg_data && tp->urg_seq == *seq) { 1331 if (copied) 1332 break; 1333 if (signal_pending(current)) { 1334 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 1335 break; 1336 } 1337 } 1338 1339 /* Next get a buffer. */ 1340 1341 skb = skb_peek(&sk->sk_receive_queue); 1342 do { 1343 if (!skb) 1344 break; 1345 1346 /* Now that we have two receive queues this 1347 * shouldn't happen. 1348 */ 1349 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1350 printk(KERN_INFO "recvmsg bug: copied %X " 1351 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1352 break; 1353 } 1354 offset = *seq - TCP_SKB_CB(skb)->seq; 1355 if (tcp_hdr(skb)->syn) 1356 offset--; 1357 if (offset < skb->len) 1358 goto found_ok_skb; 1359 if (tcp_hdr(skb)->fin) 1360 goto found_fin_ok; 1361 BUG_TRAP(flags & MSG_PEEK); 1362 skb = skb->next; 1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue); 1364 1365 /* Well, if we have backlog, try to process it now yet. */ 1366 1367 if (copied >= target && !sk->sk_backlog.tail) 1368 break; 1369 1370 if (copied) { 1371 if (sk->sk_err || 1372 sk->sk_state == TCP_CLOSE || 1373 (sk->sk_shutdown & RCV_SHUTDOWN) || 1374 !timeo || 1375 signal_pending(current) || 1376 (flags & MSG_PEEK)) 1377 break; 1378 } else { 1379 if (sock_flag(sk, SOCK_DONE)) 1380 break; 1381 1382 if (sk->sk_err) { 1383 copied = sock_error(sk); 1384 break; 1385 } 1386 1387 if (sk->sk_shutdown & RCV_SHUTDOWN) 1388 break; 1389 1390 if (sk->sk_state == TCP_CLOSE) { 1391 if (!sock_flag(sk, SOCK_DONE)) { 1392 /* This occurs when user tries to read 1393 * from never connected socket. 1394 */ 1395 copied = -ENOTCONN; 1396 break; 1397 } 1398 break; 1399 } 1400 1401 if (!timeo) { 1402 copied = -EAGAIN; 1403 break; 1404 } 1405 1406 if (signal_pending(current)) { 1407 copied = sock_intr_errno(timeo); 1408 break; 1409 } 1410 } 1411 1412 tcp_cleanup_rbuf(sk, copied); 1413 1414 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { 1415 /* Install new reader */ 1416 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { 1417 user_recv = current; 1418 tp->ucopy.task = user_recv; 1419 tp->ucopy.iov = msg->msg_iov; 1420 } 1421 1422 tp->ucopy.len = len; 1423 1424 BUG_TRAP(tp->copied_seq == tp->rcv_nxt || 1425 (flags & (MSG_PEEK | MSG_TRUNC))); 1426 1427 /* Ugly... If prequeue is not empty, we have to 1428 * process it before releasing socket, otherwise 1429 * order will be broken at second iteration. 1430 * More elegant solution is required!!! 1431 * 1432 * Look: we have the following (pseudo)queues: 1433 * 1434 * 1. packets in flight 1435 * 2. backlog 1436 * 3. prequeue 1437 * 4. receive_queue 1438 * 1439 * Each queue can be processed only if the next ones 1440 * are empty. At this point we have empty receive_queue. 1441 * But prequeue _can_ be not empty after 2nd iteration, 1442 * when we jumped to start of loop because backlog 1443 * processing added something to receive_queue. 1444 * We cannot release_sock(), because backlog contains 1445 * packets arrived _after_ prequeued ones. 1446 * 1447 * Shortly, algorithm is clear --- to process all 1448 * the queues in order. We could make it more directly, 1449 * requeueing packets from backlog to prequeue, if 1450 * is not empty. It is more elegant, but eats cycles, 1451 * unfortunately. 1452 */ 1453 if (!skb_queue_empty(&tp->ucopy.prequeue)) 1454 goto do_prequeue; 1455 1456 /* __ Set realtime policy in scheduler __ */ 1457 } 1458 1459 if (copied >= target) { 1460 /* Do not sleep, just process backlog. */ 1461 release_sock(sk); 1462 lock_sock(sk); 1463 } else 1464 sk_wait_data(sk, &timeo); 1465 1466#ifdef CONFIG_NET_DMA 1467 tp->ucopy.wakeup = 0; 1468#endif 1469 1470 if (user_recv) { 1471 int chunk; 1472 1473 /* __ Restore normal policy in scheduler __ */ 1474 1475 if ((chunk = len - tp->ucopy.len) != 0) { 1476 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1477 len -= chunk; 1478 copied += chunk; 1479 } 1480 1481 if (tp->rcv_nxt == tp->copied_seq && 1482 !skb_queue_empty(&tp->ucopy.prequeue)) { 1483do_prequeue: 1484 tcp_prequeue_process(sk); 1485 1486 if ((chunk = len - tp->ucopy.len) != 0) { 1487 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1488 len -= chunk; 1489 copied += chunk; 1490 } 1491 } 1492 } 1493 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1494 if (net_ratelimit()) 1495 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1496 current->comm, task_pid_nr(current)); 1497 peek_seq = tp->copied_seq; 1498 } 1499 continue; 1500 1501 found_ok_skb: 1502 /* Ok so how much can we use? */ 1503 used = skb->len - offset; 1504 if (len < used) 1505 used = len; 1506 1507 /* Do we have urgent data here? */ 1508 if (tp->urg_data) { 1509 u32 urg_offset = tp->urg_seq - *seq; 1510 if (urg_offset < used) { 1511 if (!urg_offset) { 1512 if (!sock_flag(sk, SOCK_URGINLINE)) { 1513 ++*seq; 1514 offset++; 1515 used--; 1516 if (!used) 1517 goto skip_copy; 1518 } 1519 } else 1520 used = urg_offset; 1521 } 1522 } 1523 1524 if (!(flags & MSG_TRUNC)) { 1525#ifdef CONFIG_NET_DMA 1526 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1527 tp->ucopy.dma_chan = get_softnet_dma(); 1528 1529 if (tp->ucopy.dma_chan) { 1530 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1531 tp->ucopy.dma_chan, skb, offset, 1532 msg->msg_iov, used, 1533 tp->ucopy.pinned_list); 1534 1535 if (tp->ucopy.dma_cookie < 0) { 1536 1537 printk(KERN_ALERT "dma_cookie < 0\n"); 1538 1539 /* Exception. Bailout! */ 1540 if (!copied) 1541 copied = -EFAULT; 1542 break; 1543 } 1544 if ((offset + used) == skb->len) 1545 copied_early = 1; 1546 1547 } else 1548#endif 1549 { 1550 err = skb_copy_datagram_iovec(skb, offset, 1551 msg->msg_iov, used); 1552 if (err) { 1553 /* Exception. Bailout! */ 1554 if (!copied) 1555 copied = -EFAULT; 1556 break; 1557 } 1558 } 1559 } 1560 1561 *seq += used; 1562 copied += used; 1563 len -= used; 1564 1565 tcp_rcv_space_adjust(sk); 1566 1567skip_copy: 1568 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1569 tp->urg_data = 0; 1570 tcp_fast_path_check(sk); 1571 } 1572 if (used + offset < skb->len) 1573 continue; 1574 1575 if (tcp_hdr(skb)->fin) 1576 goto found_fin_ok; 1577 if (!(flags & MSG_PEEK)) { 1578 sk_eat_skb(sk, skb, copied_early); 1579 copied_early = 0; 1580 } 1581 continue; 1582 1583 found_fin_ok: 1584 /* Process the FIN. */ 1585 ++*seq; 1586 if (!(flags & MSG_PEEK)) { 1587 sk_eat_skb(sk, skb, copied_early); 1588 copied_early = 0; 1589 } 1590 break; 1591 } while (len > 0); 1592 1593 if (user_recv) { 1594 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1595 int chunk; 1596 1597 tp->ucopy.len = copied > 0 ? len : 0; 1598 1599 tcp_prequeue_process(sk); 1600 1601 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1602 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1603 len -= chunk; 1604 copied += chunk; 1605 } 1606 } 1607 1608 tp->ucopy.task = NULL; 1609 tp->ucopy.len = 0; 1610 } 1611 1612#ifdef CONFIG_NET_DMA 1613 if (tp->ucopy.dma_chan) { 1614 dma_cookie_t done, used; 1615 1616 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1617 1618 while (dma_async_memcpy_complete(tp->ucopy.dma_chan, 1619 tp->ucopy.dma_cookie, &done, 1620 &used) == DMA_IN_PROGRESS) { 1621 /* do partial cleanup of sk_async_wait_queue */ 1622 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1623 (dma_async_is_complete(skb->dma_cookie, done, 1624 used) == DMA_SUCCESS)) { 1625 __skb_dequeue(&sk->sk_async_wait_queue); 1626 kfree_skb(skb); 1627 } 1628 } 1629 1630 /* Safe to free early-copied skbs now */ 1631 __skb_queue_purge(&sk->sk_async_wait_queue); 1632 dma_chan_put(tp->ucopy.dma_chan); 1633 tp->ucopy.dma_chan = NULL; 1634 } 1635 if (tp->ucopy.pinned_list) { 1636 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1637 tp->ucopy.pinned_list = NULL; 1638 } 1639#endif 1640 1641 /* According to UNIX98, msg_name/msg_namelen are ignored 1642 * on connected socket. I was just happy when found this 8) --ANK 1643 */ 1644 1645 /* Clean up data we have read: This will do ACK frames. */ 1646 tcp_cleanup_rbuf(sk, copied); 1647 1648 TCP_CHECK_TIMER(sk); 1649 release_sock(sk); 1650 return copied; 1651 1652out: 1653 TCP_CHECK_TIMER(sk); 1654 release_sock(sk); 1655 return err; 1656 1657recv_urg: 1658 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); 1659 goto out; 1660} 1661 1662void tcp_set_state(struct sock *sk, int state) 1663{ 1664 int oldstate = sk->sk_state; 1665 1666 switch (state) { 1667 case TCP_ESTABLISHED: 1668 if (oldstate != TCP_ESTABLISHED) 1669 TCP_INC_STATS(TCP_MIB_CURRESTAB); 1670 break; 1671 1672 case TCP_CLOSE: 1673 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 1674 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1675 1676 sk->sk_prot->unhash(sk); 1677 if (inet_csk(sk)->icsk_bind_hash && 1678 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1679 inet_put_port(sk); 1680 /* fall through */ 1681 default: 1682 if (oldstate==TCP_ESTABLISHED) 1683 TCP_DEC_STATS(TCP_MIB_CURRESTAB); 1684 } 1685 1686 /* Change state AFTER socket is unhashed to avoid closed 1687 * socket sitting in hash tables. 1688 */ 1689 sk->sk_state = state; 1690 1691#ifdef STATE_TRACE 1692 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); 1693#endif 1694} 1695EXPORT_SYMBOL_GPL(tcp_set_state); 1696 1697/* 1698 * State processing on a close. This implements the state shift for 1699 * sending our FIN frame. Note that we only send a FIN for some 1700 * states. A shutdown() may have already sent the FIN, or we may be 1701 * closed. 1702 */ 1703 1704static const unsigned char new_state[16] = { 1705 /* current state: new state: action: */ 1706 /* (Invalid) */ TCP_CLOSE, 1707 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 1708 /* TCP_SYN_SENT */ TCP_CLOSE, 1709 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 1710 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, 1711 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, 1712 /* TCP_TIME_WAIT */ TCP_CLOSE, 1713 /* TCP_CLOSE */ TCP_CLOSE, 1714 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, 1715 /* TCP_LAST_ACK */ TCP_LAST_ACK, 1716 /* TCP_LISTEN */ TCP_CLOSE, 1717 /* TCP_CLOSING */ TCP_CLOSING, 1718}; 1719 1720static int tcp_close_state(struct sock *sk) 1721{ 1722 int next = (int)new_state[sk->sk_state]; 1723 int ns = next & TCP_STATE_MASK; 1724 1725 tcp_set_state(sk, ns); 1726 1727 return next & TCP_ACTION_FIN; 1728} 1729 1730/* 1731 * Shutdown the sending side of a connection. Much like close except 1732 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 1733 */ 1734 1735void tcp_shutdown(struct sock *sk, int how) 1736{ 1737 /* We need to grab some memory, and put together a FIN, 1738 * and then put it into the queue to be sent. 1739 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 1740 */ 1741 if (!(how & SEND_SHUTDOWN)) 1742 return; 1743 1744 /* If we've already sent a FIN, or it's a closed state, skip this. */ 1745 if ((1 << sk->sk_state) & 1746 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 1747 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 1748 /* Clear out any half completed packets. FIN if needed. */ 1749 if (tcp_close_state(sk)) 1750 tcp_send_fin(sk); 1751 } 1752} 1753 1754void tcp_close(struct sock *sk, long timeout) 1755{ 1756 struct sk_buff *skb; 1757 int data_was_unread = 0; 1758 int state; 1759 1760 lock_sock(sk); 1761 sk->sk_shutdown = SHUTDOWN_MASK; 1762 1763 if (sk->sk_state == TCP_LISTEN) { 1764 tcp_set_state(sk, TCP_CLOSE); 1765 1766 /* Special case. */ 1767 inet_csk_listen_stop(sk); 1768 1769 goto adjudge_to_death; 1770 } 1771 1772 /* We need to flush the recv. buffs. We do this only on the 1773 * descriptor close, not protocol-sourced closes, because the 1774 * reader process may not have drained the data yet! 1775 */ 1776 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 1777 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 1778 tcp_hdr(skb)->fin; 1779 data_was_unread += len; 1780 __kfree_skb(skb); 1781 } 1782 1783 sk_mem_reclaim(sk); 1784 1785 /* As outlined in RFC 2525, section 2.17, we send a RST here because 1786 * data was lost. To witness the awful effects of the old behavior of 1787 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 1788 * GET in an FTP client, suspend the process, wait for the client to 1789 * advertise a zero window, then kill -9 the FTP client, wheee... 1790 * Note: timeout is always zero in such a case. 1791 */ 1792 if (data_was_unread) { 1793 /* Unread data was tossed, zap the connection. */ 1794 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); 1795 tcp_set_state(sk, TCP_CLOSE); 1796 tcp_send_active_reset(sk, GFP_KERNEL); 1797 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1798 /* Check zero linger _after_ checking for unread data. */ 1799 sk->sk_prot->disconnect(sk, 0); 1800 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); 1801 } else if (tcp_close_state(sk)) { 1802 /* We FIN if the application ate all the data before 1803 * zapping the connection. 1804 */ 1805 1806 /* RED-PEN. Formally speaking, we have broken TCP state 1807 * machine. State transitions: 1808 * 1809 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 1810 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 1811 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 1812 * 1813 * are legal only when FIN has been sent (i.e. in window), 1814 * rather than queued out of window. Purists blame. 1815 * 1816 * F.e. "RFC state" is ESTABLISHED, 1817 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 1818 * 1819 * The visible declinations are that sometimes 1820 * we enter time-wait state, when it is not required really 1821 * (harmless), do not send active resets, when they are 1822 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 1823 * they look as CLOSING or LAST_ACK for Linux) 1824 * Probably, I missed some more holelets. 1825 * --ANK 1826 */ 1827 tcp_send_fin(sk); 1828 } 1829 1830 sk_stream_wait_close(sk, timeout); 1831 1832adjudge_to_death: 1833 state = sk->sk_state; 1834 sock_hold(sk); 1835 sock_orphan(sk); 1836 atomic_inc(sk->sk_prot->orphan_count); 1837 1838 /* It is the last release_sock in its life. It will remove backlog. */ 1839 release_sock(sk); 1840 1841 1842 /* Now socket is owned by kernel and we acquire BH lock 1843 to finish close. No need to check for user refs. 1844 */ 1845 local_bh_disable(); 1846 bh_lock_sock(sk); 1847 BUG_TRAP(!sock_owned_by_user(sk)); 1848 1849 /* Have we already been destroyed by a softirq or backlog? */ 1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1851 goto out; 1852 1853 /* This is a (useful) BSD violating of the RFC. There is a 1854 * problem with TCP as specified in that the other end could 1855 * keep a socket open forever with no application left this end. 1856 * We use a 3 minute timeout (about the same as BSD) then kill 1857 * our end. If they send after that then tough - BUT: long enough 1858 * that we won't make the old 4*rto = almost no time - whoops 1859 * reset mistake. 1860 * 1861 * Nope, it was not mistake. It is really desired behaviour 1862 * f.e. on http servers, when such sockets are useless, but 1863 * consume significant resources. Let's do it with special 1864 * linger2 option. --ANK 1865 */ 1866 1867 if (sk->sk_state == TCP_FIN_WAIT2) { 1868 struct tcp_sock *tp = tcp_sk(sk); 1869 if (tp->linger2 < 0) { 1870 tcp_set_state(sk, TCP_CLOSE); 1871 tcp_send_active_reset(sk, GFP_ATOMIC); 1872 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1873 } else { 1874 const int tmo = tcp_fin_time(sk); 1875 1876 if (tmo > TCP_TIMEWAIT_LEN) { 1877 inet_csk_reset_keepalive_timer(sk, 1878 tmo - TCP_TIMEWAIT_LEN); 1879 } else { 1880 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1881 goto out; 1882 } 1883 } 1884 } 1885 if (sk->sk_state != TCP_CLOSE) { 1886 sk_mem_reclaim(sk); 1887 if (tcp_too_many_orphans(sk, 1888 atomic_read(sk->sk_prot->orphan_count))) { 1889 if (net_ratelimit()) 1890 printk(KERN_INFO "TCP: too many of orphaned " 1891 "sockets\n"); 1892 tcp_set_state(sk, TCP_CLOSE); 1893 tcp_send_active_reset(sk, GFP_ATOMIC); 1894 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1895 } 1896 } 1897 1898 if (sk->sk_state == TCP_CLOSE) 1899 inet_csk_destroy_sock(sk); 1900 /* Otherwise, socket is reprieved until protocol close. */ 1901 1902out: 1903 bh_unlock_sock(sk); 1904 local_bh_enable(); 1905 sock_put(sk); 1906} 1907 1908/* These states need RST on ABORT according to RFC793 */ 1909 1910static inline int tcp_need_reset(int state) 1911{ 1912 return (1 << state) & 1913 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 1914 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 1915} 1916 1917int tcp_disconnect(struct sock *sk, int flags) 1918{ 1919 struct inet_sock *inet = inet_sk(sk); 1920 struct inet_connection_sock *icsk = inet_csk(sk); 1921 struct tcp_sock *tp = tcp_sk(sk); 1922 int err = 0; 1923 int old_state = sk->sk_state; 1924 1925 if (old_state != TCP_CLOSE) 1926 tcp_set_state(sk, TCP_CLOSE); 1927 1928 /* ABORT function of RFC793 */ 1929 if (old_state == TCP_LISTEN) { 1930 inet_csk_listen_stop(sk); 1931 } else if (tcp_need_reset(old_state) || 1932 (tp->snd_nxt != tp->write_seq && 1933 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 1934 /* The last check adjusts for discrepancy of Linux wrt. RFC 1935 * states 1936 */ 1937 tcp_send_active_reset(sk, gfp_any()); 1938 sk->sk_err = ECONNRESET; 1939 } else if (old_state == TCP_SYN_SENT) 1940 sk->sk_err = ECONNRESET; 1941 1942 tcp_clear_xmit_timers(sk); 1943 __skb_queue_purge(&sk->sk_receive_queue); 1944 tcp_write_queue_purge(sk); 1945 __skb_queue_purge(&tp->out_of_order_queue); 1946#ifdef CONFIG_NET_DMA 1947 __skb_queue_purge(&sk->sk_async_wait_queue); 1948#endif 1949 1950 inet->dport = 0; 1951 1952 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1953 inet_reset_saddr(sk); 1954 1955 sk->sk_shutdown = 0; 1956 sock_reset_flag(sk, SOCK_DONE); 1957 tp->srtt = 0; 1958 if ((tp->write_seq += tp->max_window + 2) == 0) 1959 tp->write_seq = 1; 1960 icsk->icsk_backoff = 0; 1961 tp->snd_cwnd = 2; 1962 icsk->icsk_probes_out = 0; 1963 tp->packets_out = 0; 1964 tp->snd_ssthresh = 0x7fffffff; 1965 tp->snd_cwnd_cnt = 0; 1966 tp->bytes_acked = 0; 1967 tcp_set_ca_state(sk, TCP_CA_Open); 1968 tcp_clear_retrans(tp); 1969 inet_csk_delack_init(sk); 1970 tcp_init_send_head(sk); 1971 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 1972 __sk_dst_reset(sk); 1973 1974 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1975 1976 sk->sk_error_report(sk); 1977 return err; 1978} 1979 1980/* 1981 * Socket option code for TCP. 1982 */ 1983static int do_tcp_setsockopt(struct sock *sk, int level, 1984 int optname, char __user *optval, int optlen) 1985{ 1986 struct tcp_sock *tp = tcp_sk(sk); 1987 struct inet_connection_sock *icsk = inet_csk(sk); 1988 int val; 1989 int err = 0; 1990 1991 /* This is a string value all the others are int's */ 1992 if (optname == TCP_CONGESTION) { 1993 char name[TCP_CA_NAME_MAX]; 1994 1995 if (optlen < 1) 1996 return -EINVAL; 1997 1998 val = strncpy_from_user(name, optval, 1999 min(TCP_CA_NAME_MAX-1, optlen)); 2000 if (val < 0) 2001 return -EFAULT; 2002 name[val] = 0; 2003 2004 lock_sock(sk); 2005 err = tcp_set_congestion_control(sk, name); 2006 release_sock(sk); 2007 return err; 2008 } 2009 2010 if (optlen < sizeof(int)) 2011 return -EINVAL; 2012 2013 if (get_user(val, (int __user *)optval)) 2014 return -EFAULT; 2015 2016 lock_sock(sk); 2017 2018 switch (optname) { 2019 case TCP_MAXSEG: 2020 /* Values greater than interface MTU won't take effect. However 2021 * at the point when this call is done we typically don't yet 2022 * know which interface is going to be used */ 2023 if (val < 8 || val > MAX_TCP_WINDOW) { 2024 err = -EINVAL; 2025 break; 2026 } 2027 tp->rx_opt.user_mss = val; 2028 break; 2029 2030 case TCP_NODELAY: 2031 if (val) { 2032 /* TCP_NODELAY is weaker than TCP_CORK, so that 2033 * this option on corked socket is remembered, but 2034 * it is not activated until cork is cleared. 2035 * 2036 * However, when TCP_NODELAY is set we make 2037 * an explicit push, which overrides even TCP_CORK 2038 * for currently queued segments. 2039 */ 2040 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 2041 tcp_push_pending_frames(sk); 2042 } else { 2043 tp->nonagle &= ~TCP_NAGLE_OFF; 2044 } 2045 break; 2046 2047 case TCP_CORK: 2048 /* When set indicates to always queue non-full frames. 2049 * Later the user clears this option and we transmit 2050 * any pending partial frames in the queue. This is 2051 * meant to be used alongside sendfile() to get properly 2052 * filled frames when the user (for example) must write 2053 * out headers with a write() call first and then use 2054 * sendfile to send out the data parts. 2055 * 2056 * TCP_CORK can be set together with TCP_NODELAY and it is 2057 * stronger than TCP_NODELAY. 2058 */ 2059 if (val) { 2060 tp->nonagle |= TCP_NAGLE_CORK; 2061 } else { 2062 tp->nonagle &= ~TCP_NAGLE_CORK; 2063 if (tp->nonagle&TCP_NAGLE_OFF) 2064 tp->nonagle |= TCP_NAGLE_PUSH; 2065 tcp_push_pending_frames(sk); 2066 } 2067 break; 2068 2069 case TCP_KEEPIDLE: 2070 if (val < 1 || val > MAX_TCP_KEEPIDLE) 2071 err = -EINVAL; 2072 else { 2073 tp->keepalive_time = val * HZ; 2074 if (sock_flag(sk, SOCK_KEEPOPEN) && 2075 !((1 << sk->sk_state) & 2076 (TCPF_CLOSE | TCPF_LISTEN))) { 2077 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; 2078 if (tp->keepalive_time > elapsed) 2079 elapsed = tp->keepalive_time - elapsed; 2080 else 2081 elapsed = 0; 2082 inet_csk_reset_keepalive_timer(sk, elapsed); 2083 } 2084 } 2085 break; 2086 case TCP_KEEPINTVL: 2087 if (val < 1 || val > MAX_TCP_KEEPINTVL) 2088 err = -EINVAL; 2089 else 2090 tp->keepalive_intvl = val * HZ; 2091 break; 2092 case TCP_KEEPCNT: 2093 if (val < 1 || val > MAX_TCP_KEEPCNT) 2094 err = -EINVAL; 2095 else 2096 tp->keepalive_probes = val; 2097 break; 2098 case TCP_SYNCNT: 2099 if (val < 1 || val > MAX_TCP_SYNCNT) 2100 err = -EINVAL; 2101 else 2102 icsk->icsk_syn_retries = val; 2103 break; 2104 2105 case TCP_LINGER2: 2106 if (val < 0) 2107 tp->linger2 = -1; 2108 else if (val > sysctl_tcp_fin_timeout / HZ) 2109 tp->linger2 = 0; 2110 else 2111 tp->linger2 = val * HZ; 2112 break; 2113 2114 case TCP_DEFER_ACCEPT: 2115 icsk->icsk_accept_queue.rskq_defer_accept = 0; 2116 if (val > 0) { 2117 /* Translate value in seconds to number of 2118 * retransmits */ 2119 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && 2120 val > ((TCP_TIMEOUT_INIT / HZ) << 2121 icsk->icsk_accept_queue.rskq_defer_accept)) 2122 icsk->icsk_accept_queue.rskq_defer_accept++; 2123 icsk->icsk_accept_queue.rskq_defer_accept++; 2124 } 2125 break; 2126 2127 case TCP_WINDOW_CLAMP: 2128 if (!val) { 2129 if (sk->sk_state != TCP_CLOSE) { 2130 err = -EINVAL; 2131 break; 2132 } 2133 tp->window_clamp = 0; 2134 } else 2135 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 2136 SOCK_MIN_RCVBUF / 2 : val; 2137 break; 2138 2139 case TCP_QUICKACK: 2140 if (!val) { 2141 icsk->icsk_ack.pingpong = 1; 2142 } else { 2143 icsk->icsk_ack.pingpong = 0; 2144 if ((1 << sk->sk_state) & 2145 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 2146 inet_csk_ack_scheduled(sk)) { 2147 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 2148 tcp_cleanup_rbuf(sk, 1); 2149 if (!(val & 1)) 2150 icsk->icsk_ack.pingpong = 1; 2151 } 2152 } 2153 break; 2154 2155#ifdef CONFIG_TCP_MD5SIG 2156 case TCP_MD5SIG: 2157 /* Read the IP->Key mappings from userspace */ 2158 err = tp->af_specific->md5_parse(sk, optval, optlen); 2159 break; 2160#endif 2161 2162 default: 2163 err = -ENOPROTOOPT; 2164 break; 2165 } 2166 2167 release_sock(sk); 2168 return err; 2169} 2170 2171int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2172 int optlen) 2173{ 2174 struct inet_connection_sock *icsk = inet_csk(sk); 2175 2176 if (level != SOL_TCP) 2177 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2178 optval, optlen); 2179 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2180} 2181 2182#ifdef CONFIG_COMPAT 2183int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2184 char __user *optval, int optlen) 2185{ 2186 if (level != SOL_TCP) 2187 return inet_csk_compat_setsockopt(sk, level, optname, 2188 optval, optlen); 2189 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2190} 2191 2192EXPORT_SYMBOL(compat_tcp_setsockopt); 2193#endif 2194 2195/* Return information about state of tcp endpoint in API format. */ 2196void tcp_get_info(struct sock *sk, struct tcp_info *info) 2197{ 2198 struct tcp_sock *tp = tcp_sk(sk); 2199 const struct inet_connection_sock *icsk = inet_csk(sk); 2200 u32 now = tcp_time_stamp; 2201 2202 memset(info, 0, sizeof(*info)); 2203 2204 info->tcpi_state = sk->sk_state; 2205 info->tcpi_ca_state = icsk->icsk_ca_state; 2206 info->tcpi_retransmits = icsk->icsk_retransmits; 2207 info->tcpi_probes = icsk->icsk_probes_out; 2208 info->tcpi_backoff = icsk->icsk_backoff; 2209 2210 if (tp->rx_opt.tstamp_ok) 2211 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 2212 if (tcp_is_sack(tp)) 2213 info->tcpi_options |= TCPI_OPT_SACK; 2214 if (tp->rx_opt.wscale_ok) { 2215 info->tcpi_options |= TCPI_OPT_WSCALE; 2216 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 2217 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2218 } 2219 2220 if (tp->ecn_flags&TCP_ECN_OK) 2221 info->tcpi_options |= TCPI_OPT_ECN; 2222 2223 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2224 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2225 info->tcpi_snd_mss = tp->mss_cache; 2226 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2227 2228 if (sk->sk_state == TCP_LISTEN) { 2229 info->tcpi_unacked = sk->sk_ack_backlog; 2230 info->tcpi_sacked = sk->sk_max_ack_backlog; 2231 } else { 2232 info->tcpi_unacked = tp->packets_out; 2233 info->tcpi_sacked = tp->sacked_out; 2234 } 2235 info->tcpi_lost = tp->lost_out; 2236 info->tcpi_retrans = tp->retrans_out; 2237 info->tcpi_fackets = tp->fackets_out; 2238 2239 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2240 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2241 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2242 2243 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 2244 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 2245 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; 2246 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; 2247 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 2248 info->tcpi_snd_cwnd = tp->snd_cwnd; 2249 info->tcpi_advmss = tp->advmss; 2250 info->tcpi_reordering = tp->reordering; 2251 2252 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; 2253 info->tcpi_rcv_space = tp->rcvq_space.space; 2254 2255 info->tcpi_total_retrans = tp->total_retrans; 2256} 2257 2258EXPORT_SYMBOL_GPL(tcp_get_info); 2259 2260static int do_tcp_getsockopt(struct sock *sk, int level, 2261 int optname, char __user *optval, int __user *optlen) 2262{ 2263 struct inet_connection_sock *icsk = inet_csk(sk); 2264 struct tcp_sock *tp = tcp_sk(sk); 2265 int val, len; 2266 2267 if (get_user(len, optlen)) 2268 return -EFAULT; 2269 2270 len = min_t(unsigned int, len, sizeof(int)); 2271 2272 if (len < 0) 2273 return -EINVAL; 2274 2275 switch (optname) { 2276 case TCP_MAXSEG: 2277 val = tp->mss_cache; 2278 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2279 val = tp->rx_opt.user_mss; 2280 break; 2281 case TCP_NODELAY: 2282 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2283 break; 2284 case TCP_CORK: 2285 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2286 break; 2287 case TCP_KEEPIDLE: 2288 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; 2289 break; 2290 case TCP_KEEPINTVL: 2291 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; 2292 break; 2293 case TCP_KEEPCNT: 2294 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 2295 break; 2296 case TCP_SYNCNT: 2297 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2298 break; 2299 case TCP_LINGER2: 2300 val = tp->linger2; 2301 if (val >= 0) 2302 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2303 break; 2304 case TCP_DEFER_ACCEPT: 2305 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2306 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); 2307 break; 2308 case TCP_WINDOW_CLAMP: 2309 val = tp->window_clamp; 2310 break; 2311 case TCP_INFO: { 2312 struct tcp_info info; 2313 2314 if (get_user(len, optlen)) 2315 return -EFAULT; 2316 2317 tcp_get_info(sk, &info); 2318 2319 len = min_t(unsigned int, len, sizeof(info)); 2320 if (put_user(len, optlen)) 2321 return -EFAULT; 2322 if (copy_to_user(optval, &info, len)) 2323 return -EFAULT; 2324 return 0; 2325 } 2326 case TCP_QUICKACK: 2327 val = !icsk->icsk_ack.pingpong; 2328 break; 2329 2330 case TCP_CONGESTION: 2331 if (get_user(len, optlen)) 2332 return -EFAULT; 2333 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2334 if (put_user(len, optlen)) 2335 return -EFAULT; 2336 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 2337 return -EFAULT; 2338 return 0; 2339 default: 2340 return -ENOPROTOOPT; 2341 } 2342 2343 if (put_user(len, optlen)) 2344 return -EFAULT; 2345 if (copy_to_user(optval, &val, len)) 2346 return -EFAULT; 2347 return 0; 2348} 2349 2350int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 2351 int __user *optlen) 2352{ 2353 struct inet_connection_sock *icsk = inet_csk(sk); 2354 2355 if (level != SOL_TCP) 2356 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 2357 optval, optlen); 2358 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2359} 2360 2361#ifdef CONFIG_COMPAT 2362int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 2363 char __user *optval, int __user *optlen) 2364{ 2365 if (level != SOL_TCP) 2366 return inet_csk_compat_getsockopt(sk, level, optname, 2367 optval, optlen); 2368 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2369} 2370 2371EXPORT_SYMBOL(compat_tcp_getsockopt); 2372#endif 2373 2374struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) 2375{ 2376 struct sk_buff *segs = ERR_PTR(-EINVAL); 2377 struct tcphdr *th; 2378 unsigned thlen; 2379 unsigned int seq; 2380 __be32 delta; 2381 unsigned int oldlen; 2382 unsigned int len; 2383 2384 if (!pskb_may_pull(skb, sizeof(*th))) 2385 goto out; 2386 2387 th = tcp_hdr(skb); 2388 thlen = th->doff * 4; 2389 if (thlen < sizeof(*th)) 2390 goto out; 2391 2392 if (!pskb_may_pull(skb, thlen)) 2393 goto out; 2394 2395 oldlen = (u16)~skb->len; 2396 __skb_pull(skb, thlen); 2397 2398 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2399 /* Packet is from an untrusted source, reset gso_segs. */ 2400 int type = skb_shinfo(skb)->gso_type; 2401 int mss; 2402 2403 if (unlikely(type & 2404 ~(SKB_GSO_TCPV4 | 2405 SKB_GSO_DODGY | 2406 SKB_GSO_TCP_ECN | 2407 SKB_GSO_TCPV6 | 2408 0) || 2409 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 2410 goto out; 2411 2412 mss = skb_shinfo(skb)->gso_size; 2413 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 2414 2415 segs = NULL; 2416 goto out; 2417 } 2418 2419 segs = skb_segment(skb, features); 2420 if (IS_ERR(segs)) 2421 goto out; 2422 2423 len = skb_shinfo(skb)->gso_size; 2424 delta = htonl(oldlen + (thlen + len)); 2425 2426 skb = segs; 2427 th = tcp_hdr(skb); 2428 seq = ntohl(th->seq); 2429 2430 do { 2431 th->fin = th->psh = 0; 2432 2433 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2434 (__force u32)delta)); 2435 if (skb->ip_summed != CHECKSUM_PARTIAL) 2436 th->check = 2437 csum_fold(csum_partial(skb_transport_header(skb), 2438 thlen, skb->csum)); 2439 2440 seq += len; 2441 skb = skb->next; 2442 th = tcp_hdr(skb); 2443 2444 th->seq = htonl(seq); 2445 th->cwr = 0; 2446 } while (skb->next); 2447 2448 delta = htonl(oldlen + (skb->tail - skb->transport_header) + 2449 skb->data_len); 2450 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2451 (__force u32)delta)); 2452 if (skb->ip_summed != CHECKSUM_PARTIAL) 2453 th->check = csum_fold(csum_partial(skb_transport_header(skb), 2454 thlen, skb->csum)); 2455 2456out: 2457 return segs; 2458} 2459EXPORT_SYMBOL(tcp_tso_segment); 2460 2461#ifdef CONFIG_TCP_MD5SIG 2462static unsigned long tcp_md5sig_users; 2463static struct tcp_md5sig_pool **tcp_md5sig_pool; 2464static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2465 2466static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2467{ 2468 int cpu; 2469 for_each_possible_cpu(cpu) { 2470 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); 2471 if (p) { 2472 if (p->md5_desc.tfm) 2473 crypto_free_hash(p->md5_desc.tfm); 2474 kfree(p); 2475 p = NULL; 2476 } 2477 } 2478 free_percpu(pool); 2479} 2480 2481void tcp_free_md5sig_pool(void) 2482{ 2483 struct tcp_md5sig_pool **pool = NULL; 2484 2485 spin_lock_bh(&tcp_md5sig_pool_lock); 2486 if (--tcp_md5sig_users == 0) { 2487 pool = tcp_md5sig_pool; 2488 tcp_md5sig_pool = NULL; 2489 } 2490 spin_unlock_bh(&tcp_md5sig_pool_lock); 2491 if (pool) 2492 __tcp_free_md5sig_pool(pool); 2493} 2494 2495EXPORT_SYMBOL(tcp_free_md5sig_pool); 2496 2497static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) 2498{ 2499 int cpu; 2500 struct tcp_md5sig_pool **pool; 2501 2502 pool = alloc_percpu(struct tcp_md5sig_pool *); 2503 if (!pool) 2504 return NULL; 2505 2506 for_each_possible_cpu(cpu) { 2507 struct tcp_md5sig_pool *p; 2508 struct crypto_hash *hash; 2509 2510 p = kzalloc(sizeof(*p), GFP_KERNEL); 2511 if (!p) 2512 goto out_free; 2513 *per_cpu_ptr(pool, cpu) = p; 2514 2515 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2516 if (!hash || IS_ERR(hash)) 2517 goto out_free; 2518 2519 p->md5_desc.tfm = hash; 2520 } 2521 return pool; 2522out_free: 2523 __tcp_free_md5sig_pool(pool); 2524 return NULL; 2525} 2526 2527struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) 2528{ 2529 struct tcp_md5sig_pool **pool; 2530 int alloc = 0; 2531 2532retry: 2533 spin_lock_bh(&tcp_md5sig_pool_lock); 2534 pool = tcp_md5sig_pool; 2535 if (tcp_md5sig_users++ == 0) { 2536 alloc = 1; 2537 spin_unlock_bh(&tcp_md5sig_pool_lock); 2538 } else if (!pool) { 2539 tcp_md5sig_users--; 2540 spin_unlock_bh(&tcp_md5sig_pool_lock); 2541 cpu_relax(); 2542 goto retry; 2543 } else 2544 spin_unlock_bh(&tcp_md5sig_pool_lock); 2545 2546 if (alloc) { 2547 /* we cannot hold spinlock here because this may sleep. */ 2548 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); 2549 spin_lock_bh(&tcp_md5sig_pool_lock); 2550 if (!p) { 2551 tcp_md5sig_users--; 2552 spin_unlock_bh(&tcp_md5sig_pool_lock); 2553 return NULL; 2554 } 2555 pool = tcp_md5sig_pool; 2556 if (pool) { 2557 /* oops, it has already been assigned. */ 2558 spin_unlock_bh(&tcp_md5sig_pool_lock); 2559 __tcp_free_md5sig_pool(p); 2560 } else { 2561 tcp_md5sig_pool = pool = p; 2562 spin_unlock_bh(&tcp_md5sig_pool_lock); 2563 } 2564 } 2565 return pool; 2566} 2567 2568EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2569 2570struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2571{ 2572 struct tcp_md5sig_pool **p; 2573 spin_lock_bh(&tcp_md5sig_pool_lock); 2574 p = tcp_md5sig_pool; 2575 if (p) 2576 tcp_md5sig_users++; 2577 spin_unlock_bh(&tcp_md5sig_pool_lock); 2578 return (p ? *per_cpu_ptr(p, cpu) : NULL); 2579} 2580 2581EXPORT_SYMBOL(__tcp_get_md5sig_pool); 2582 2583void __tcp_put_md5sig_pool(void) 2584{ 2585 tcp_free_md5sig_pool(); 2586} 2587 2588EXPORT_SYMBOL(__tcp_put_md5sig_pool); 2589#endif 2590 2591void tcp_done(struct sock *sk) 2592{ 2593 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 2594 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 2595 2596 tcp_set_state(sk, TCP_CLOSE); 2597 tcp_clear_xmit_timers(sk); 2598 2599 sk->sk_shutdown = SHUTDOWN_MASK; 2600 2601 if (!sock_flag(sk, SOCK_DEAD)) 2602 sk->sk_state_change(sk); 2603 else 2604 inet_csk_destroy_sock(sk); 2605} 2606EXPORT_SYMBOL_GPL(tcp_done); 2607 2608extern struct tcp_congestion_ops tcp_reno; 2609 2610static __initdata unsigned long thash_entries; 2611static int __init set_thash_entries(char *str) 2612{ 2613 if (!str) 2614 return 0; 2615 thash_entries = simple_strtoul(str, &str, 0); 2616 return 1; 2617} 2618__setup("thash_entries=", set_thash_entries); 2619 2620void __init tcp_init(void) 2621{ 2622 struct sk_buff *skb = NULL; 2623 unsigned long limit; 2624 int order, i, max_share; 2625 2626 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 2627 2628 tcp_hashinfo.bind_bucket_cachep = 2629 kmem_cache_create("tcp_bind_bucket", 2630 sizeof(struct inet_bind_bucket), 0, 2631 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2632 2633 /* Size and allocate the main established and bind bucket 2634 * hash tables. 2635 * 2636 * The methodology is similar to that of the buffer cache. 2637 */ 2638 tcp_hashinfo.ehash = 2639 alloc_large_system_hash("TCP established", 2640 sizeof(struct inet_ehash_bucket), 2641 thash_entries, 2642 (num_physpages >= 128 * 1024) ? 2643 13 : 15, 2644 0, 2645 &tcp_hashinfo.ehash_size, 2646 NULL, 2647 thash_entries ? 0 : 512 * 1024); 2648 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; 2649 for (i = 0; i < tcp_hashinfo.ehash_size; i++) { 2650 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2651 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); 2652 } 2653 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 2654 panic("TCP: failed to alloc ehash_locks"); 2655 tcp_hashinfo.bhash = 2656 alloc_large_system_hash("TCP bind", 2657 sizeof(struct inet_bind_hashbucket), 2658 tcp_hashinfo.ehash_size, 2659 (num_physpages >= 128 * 1024) ? 2660 13 : 15, 2661 0, 2662 &tcp_hashinfo.bhash_size, 2663 NULL, 2664 64 * 1024); 2665 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 2666 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 2667 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 2668 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 2669 } 2670 2671 /* Try to be a bit smarter and adjust defaults depending 2672 * on available memory. 2673 */ 2674 for (order = 0; ((1 << order) << PAGE_SHIFT) < 2675 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); 2676 order++) 2677 ; 2678 if (order >= 4) { 2679 tcp_death_row.sysctl_max_tw_buckets = 180000; 2680 sysctl_tcp_max_orphans = 4096 << (order - 4); 2681 sysctl_max_syn_backlog = 1024; 2682 } else if (order < 3) { 2683 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); 2684 sysctl_tcp_max_orphans >>= (3 - order); 2685 sysctl_max_syn_backlog = 128; 2686 } 2687 2688 /* Set the pressure threshold to be a fraction of global memory that 2689 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of 2690 * memory, with a floor of 128 pages. 2691 */ 2692 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); 2693 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); 2694 limit = max(limit, 128UL); 2695 sysctl_tcp_mem[0] = limit / 4 * 3; 2696 sysctl_tcp_mem[1] = limit; 2697 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; 2698 2699 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 2700 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); 2701 max_share = min(4UL*1024*1024, limit); 2702 2703 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 2704 sysctl_tcp_wmem[1] = 16*1024; 2705 sysctl_tcp_wmem[2] = max(64*1024, max_share); 2706 2707 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 2708 sysctl_tcp_rmem[1] = 87380; 2709 sysctl_tcp_rmem[2] = max(87380, max_share); 2710 2711 printk(KERN_INFO "TCP: Hash tables configured " 2712 "(established %d bind %d)\n", 2713 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size); 2714 2715 tcp_register_congestion_control(&tcp_reno); 2716} 2717 2718EXPORT_SYMBOL(tcp_close); 2719EXPORT_SYMBOL(tcp_disconnect); 2720EXPORT_SYMBOL(tcp_getsockopt); 2721EXPORT_SYMBOL(tcp_ioctl); 2722EXPORT_SYMBOL(tcp_poll); 2723EXPORT_SYMBOL(tcp_read_sock); 2724EXPORT_SYMBOL(tcp_recvmsg); 2725EXPORT_SYMBOL(tcp_sendmsg); 2726EXPORT_SYMBOL(tcp_splice_read); 2727EXPORT_SYMBOL(tcp_sendpage); 2728EXPORT_SYMBOL(tcp_setsockopt); 2729EXPORT_SYMBOL(tcp_shutdown); 2730EXPORT_SYMBOL(tcp_statistics); 2731