tcp.c revision 52bf376c63eebe72e862a1a6e713976b038c3f50
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 * 22 * Fixes: 23 * Alan Cox : Numerous verify_area() calls 24 * Alan Cox : Set the ACK bit on a reset 25 * Alan Cox : Stopped it crashing if it closed while 26 * sk->inuse=1 and was trying to connect 27 * (tcp_err()). 28 * Alan Cox : All icmp error handling was broken 29 * pointers passed where wrong and the 30 * socket was looked up backwards. Nobody 31 * tested any icmp error code obviously. 32 * Alan Cox : tcp_err() now handled properly. It 33 * wakes people on errors. poll 34 * behaves and the icmp error race 35 * has gone by moving it into sock.c 36 * Alan Cox : tcp_send_reset() fixed to work for 37 * everything not just packets for 38 * unknown sockets. 39 * Alan Cox : tcp option processing. 40 * Alan Cox : Reset tweaked (still not 100%) [Had 41 * syn rule wrong] 42 * Herp Rosmanith : More reset fixes 43 * Alan Cox : No longer acks invalid rst frames. 44 * Acking any kind of RST is right out. 45 * Alan Cox : Sets an ignore me flag on an rst 46 * receive otherwise odd bits of prattle 47 * escape still 48 * Alan Cox : Fixed another acking RST frame bug. 49 * Should stop LAN workplace lockups. 50 * Alan Cox : Some tidyups using the new skb list 51 * facilities 52 * Alan Cox : sk->keepopen now seems to work 53 * Alan Cox : Pulls options out correctly on accepts 54 * Alan Cox : Fixed assorted sk->rqueue->next errors 55 * Alan Cox : PSH doesn't end a TCP read. Switched a 56 * bit to skb ops. 57 * Alan Cox : Tidied tcp_data to avoid a potential 58 * nasty. 59 * Alan Cox : Added some better commenting, as the 60 * tcp is hard to follow 61 * Alan Cox : Removed incorrect check for 20 * psh 62 * Michael O'Reilly : ack < copied bug fix. 63 * Johannes Stille : Misc tcp fixes (not all in yet). 64 * Alan Cox : FIN with no memory -> CRASH 65 * Alan Cox : Added socket option proto entries. 66 * Also added awareness of them to accept. 67 * Alan Cox : Added TCP options (SOL_TCP) 68 * Alan Cox : Switched wakeup calls to callbacks, 69 * so the kernel can layer network 70 * sockets. 71 * Alan Cox : Use ip_tos/ip_ttl settings. 72 * Alan Cox : Handle FIN (more) properly (we hope). 73 * Alan Cox : RST frames sent on unsynchronised 74 * state ack error. 75 * Alan Cox : Put in missing check for SYN bit. 76 * Alan Cox : Added tcp_select_window() aka NET2E 77 * window non shrink trick. 78 * Alan Cox : Added a couple of small NET2E timer 79 * fixes 80 * Charles Hedrick : TCP fixes 81 * Toomas Tamm : TCP window fixes 82 * Alan Cox : Small URG fix to rlogin ^C ack fight 83 * Charles Hedrick : Rewrote most of it to actually work 84 * Linus : Rewrote tcp_read() and URG handling 85 * completely 86 * Gerhard Koerting: Fixed some missing timer handling 87 * Matthew Dillon : Reworked TCP machine states as per RFC 88 * Gerhard Koerting: PC/TCP workarounds 89 * Adam Caldwell : Assorted timer/timing errors 90 * Matthew Dillon : Fixed another RST bug 91 * Alan Cox : Move to kernel side addressing changes. 92 * Alan Cox : Beginning work on TCP fastpathing 93 * (not yet usable) 94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 95 * Alan Cox : TCP fast path debugging 96 * Alan Cox : Window clamping 97 * Michael Riepe : Bug in tcp_check() 98 * Matt Dillon : More TCP improvements and RST bug fixes 99 * Matt Dillon : Yet more small nasties remove from the 100 * TCP code (Be very nice to this man if 101 * tcp finally works 100%) 8) 102 * Alan Cox : BSD accept semantics. 103 * Alan Cox : Reset on closedown bug. 104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 105 * Michael Pall : Handle poll() after URG properly in 106 * all cases. 107 * Michael Pall : Undo the last fix in tcp_read_urg() 108 * (multi URG PUSH broke rlogin). 109 * Michael Pall : Fix the multi URG PUSH problem in 110 * tcp_readable(), poll() after URG 111 * works now. 112 * Michael Pall : recv(...,MSG_OOB) never blocks in the 113 * BSD api. 114 * Alan Cox : Changed the semantics of sk->socket to 115 * fix a race and a signal problem with 116 * accept() and async I/O. 117 * Alan Cox : Relaxed the rules on tcp_sendto(). 118 * Yury Shevchuk : Really fixed accept() blocking problem. 119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 120 * clients/servers which listen in on 121 * fixed ports. 122 * Alan Cox : Cleaned the above up and shrank it to 123 * a sensible code size. 124 * Alan Cox : Self connect lockup fix. 125 * Alan Cox : No connect to multicast. 126 * Ross Biro : Close unaccepted children on master 127 * socket close. 128 * Alan Cox : Reset tracing code. 129 * Alan Cox : Spurious resets on shutdown. 130 * Alan Cox : Giant 15 minute/60 second timer error 131 * Alan Cox : Small whoops in polling before an 132 * accept. 133 * Alan Cox : Kept the state trace facility since 134 * it's handy for debugging. 135 * Alan Cox : More reset handler fixes. 136 * Alan Cox : Started rewriting the code based on 137 * the RFC's for other useful protocol 138 * references see: Comer, KA9Q NOS, and 139 * for a reference on the difference 140 * between specifications and how BSD 141 * works see the 4.4lite source. 142 * A.N.Kuznetsov : Don't time wait on completion of tidy 143 * close. 144 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 145 * Linus Torvalds : Fixed BSD port reuse to work first syn 146 * Alan Cox : Reimplemented timers as per the RFC 147 * and using multiple timers for sanity. 148 * Alan Cox : Small bug fixes, and a lot of new 149 * comments. 150 * Alan Cox : Fixed dual reader crash by locking 151 * the buffers (much like datagram.c) 152 * Alan Cox : Fixed stuck sockets in probe. A probe 153 * now gets fed up of retrying without 154 * (even a no space) answer. 155 * Alan Cox : Extracted closing code better 156 * Alan Cox : Fixed the closing state machine to 157 * resemble the RFC. 158 * Alan Cox : More 'per spec' fixes. 159 * Jorge Cwik : Even faster checksumming. 160 * Alan Cox : tcp_data() doesn't ack illegal PSH 161 * only frames. At least one pc tcp stack 162 * generates them. 163 * Alan Cox : Cache last socket. 164 * Alan Cox : Per route irtt. 165 * Matt Day : poll()->select() match BSD precisely on error 166 * Alan Cox : New buffers 167 * Marc Tamsky : Various sk->prot->retransmits and 168 * sk->retransmits misupdating fixed. 169 * Fixed tcp_write_timeout: stuck close, 170 * and TCP syn retries gets used now. 171 * Mark Yarvis : In tcp_read_wakeup(), don't send an 172 * ack if state is TCP_CLOSED. 173 * Alan Cox : Look up device on a retransmit - routes may 174 * change. Doesn't yet cope with MSS shrink right 175 * but it's a start! 176 * Marc Tamsky : Closing in closing fixes. 177 * Mike Shaver : RFC1122 verifications. 178 * Alan Cox : rcv_saddr errors. 179 * Alan Cox : Block double connect(). 180 * Alan Cox : Small hooks for enSKIP. 181 * Alexey Kuznetsov: Path MTU discovery. 182 * Alan Cox : Support soft errors. 183 * Alan Cox : Fix MTU discovery pathological case 184 * when the remote claims no mtu! 185 * Marc Tamsky : TCP_CLOSE fix. 186 * Colin (G3TNE) : Send a reset on syn ack replies in 187 * window but wrong (fixes NT lpd problems) 188 * Pedro Roque : Better TCP window handling, delayed ack. 189 * Joerg Reuter : No modification of locked buffers in 190 * tcp_do_retransmit() 191 * Eric Schenk : Changed receiver side silly window 192 * avoidance algorithm to BSD style 193 * algorithm. This doubles throughput 194 * against machines running Solaris, 195 * and seems to result in general 196 * improvement. 197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 198 * Willy Konynenberg : Transparent proxying support. 199 * Mike McLagan : Routing by source 200 * Keith Owens : Do proper merging with partial SKB's in 201 * tcp_do_sendmsg to avoid burstiness. 202 * Eric Schenk : Fix fast close down bug with 203 * shutdown() followed by close(). 204 * Andi Kleen : Make poll agree with SIGIO 205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 206 * lingertime == 0 (RFC 793 ABORT Call) 207 * Hirokazu Takahashi : Use copy_from_user() instead of 208 * csum_and_copy_from_user() if possible. 209 * 210 * This program is free software; you can redistribute it and/or 211 * modify it under the terms of the GNU General Public License 212 * as published by the Free Software Foundation; either version 213 * 2 of the License, or(at your option) any later version. 214 * 215 * Description of States: 216 * 217 * TCP_SYN_SENT sent a connection request, waiting for ack 218 * 219 * TCP_SYN_RECV received a connection request, sent ack, 220 * waiting for final ack in three-way handshake. 221 * 222 * TCP_ESTABLISHED connection established 223 * 224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 225 * transmission of remaining buffered data 226 * 227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 228 * to shutdown 229 * 230 * TCP_CLOSING both sides have shutdown but we still have 231 * data we have to finish sending 232 * 233 * TCP_TIME_WAIT timeout to catch resent junk before entering 234 * closed, can only be entered from FIN_WAIT2 235 * or CLOSING. Required because the other end 236 * may not have gotten our last ACK causing it 237 * to retransmit the data packet (which we ignore) 238 * 239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 240 * us to finish writing our data and to shutdown 241 * (we have to close() to move on to LAST_ACK) 242 * 243 * TCP_LAST_ACK out side has shutdown after remote has 244 * shutdown. There may still be data in our 245 * buffer that we have to finish sending 246 * 247 * TCP_CLOSE socket is finished 248 */ 249 250#include <linux/module.h> 251#include <linux/types.h> 252#include <linux/fcntl.h> 253#include <linux/poll.h> 254#include <linux/init.h> 255#include <linux/smp_lock.h> 256#include <linux/fs.h> 257#include <linux/random.h> 258#include <linux/bootmem.h> 259#include <linux/cache.h> 260#include <linux/err.h> 261 262#include <net/icmp.h> 263#include <net/tcp.h> 264#include <net/xfrm.h> 265#include <net/ip.h> 266#include <net/netdma.h> 267 268#include <asm/uaccess.h> 269#include <asm/ioctls.h> 270 271int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 272 273DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; 274 275atomic_t tcp_orphan_count = ATOMIC_INIT(0); 276 277EXPORT_SYMBOL_GPL(tcp_orphan_count); 278 279int sysctl_tcp_mem[3] __read_mostly; 280int sysctl_tcp_wmem[3] __read_mostly; 281int sysctl_tcp_rmem[3] __read_mostly; 282 283EXPORT_SYMBOL(sysctl_tcp_mem); 284EXPORT_SYMBOL(sysctl_tcp_rmem); 285EXPORT_SYMBOL(sysctl_tcp_wmem); 286 287atomic_t tcp_memory_allocated; /* Current allocated memory. */ 288atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */ 289 290EXPORT_SYMBOL(tcp_memory_allocated); 291EXPORT_SYMBOL(tcp_sockets_allocated); 292 293/* 294 * Pressure flag: try to collapse. 295 * Technical note: it is used by multiple contexts non atomically. 296 * All the sk_stream_mem_schedule() is of this nature: accounting 297 * is strict, actions are advisory and have some latency. 298 */ 299int tcp_memory_pressure; 300 301EXPORT_SYMBOL(tcp_memory_pressure); 302 303void tcp_enter_memory_pressure(void) 304{ 305 if (!tcp_memory_pressure) { 306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); 307 tcp_memory_pressure = 1; 308 } 309} 310 311EXPORT_SYMBOL(tcp_enter_memory_pressure); 312 313/* 314 * Wait for a TCP event. 315 * 316 * Note that we don't need to lock the socket, as the upper poll layers 317 * take care of normal races (between the test and the event) and we don't 318 * go look at any of the socket buffers directly. 319 */ 320unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 321{ 322 unsigned int mask; 323 struct sock *sk = sock->sk; 324 struct tcp_sock *tp = tcp_sk(sk); 325 326 poll_wait(file, sk->sk_sleep, wait); 327 if (sk->sk_state == TCP_LISTEN) 328 return inet_csk_listen_poll(sk); 329 330 /* Socket is not locked. We are protected from async events 331 by poll logic and correct handling of state changes 332 made by another threads is impossible in any case. 333 */ 334 335 mask = 0; 336 if (sk->sk_err) 337 mask = POLLERR; 338 339 /* 340 * POLLHUP is certainly not done right. But poll() doesn't 341 * have a notion of HUP in just one direction, and for a 342 * socket the read side is more interesting. 343 * 344 * Some poll() documentation says that POLLHUP is incompatible 345 * with the POLLOUT/POLLWR flags, so somebody should check this 346 * all. But careful, it tends to be safer to return too many 347 * bits than too few, and you can easily break real applications 348 * if you don't tell them that something has hung up! 349 * 350 * Check-me. 351 * 352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 353 * our fs/select.c). It means that after we received EOF, 354 * poll always returns immediately, making impossible poll() on write() 355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 356 * if and only if shutdown has been made in both directions. 357 * Actually, it is interesting to look how Solaris and DUX 358 * solve this dilemma. I would prefer, if PULLHUP were maskable, 359 * then we could set it on SND_SHUTDOWN. BTW examples given 360 * in Stevens' books assume exactly this behaviour, it explains 361 * why PULLHUP is incompatible with POLLOUT. --ANK 362 * 363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 364 * blocking on fresh not-connected or disconnected socket. --ANK 365 */ 366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 367 mask |= POLLHUP; 368 if (sk->sk_shutdown & RCV_SHUTDOWN) 369 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 370 371 /* Connected? */ 372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 373 /* Potential race condition. If read of tp below will 374 * escape above sk->sk_state, we can be illegally awaken 375 * in SYN_* states. */ 376 if ((tp->rcv_nxt != tp->copied_seq) && 377 (tp->urg_seq != tp->copied_seq || 378 tp->rcv_nxt != tp->copied_seq + 1 || 379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data)) 380 mask |= POLLIN | POLLRDNORM; 381 382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 384 mask |= POLLOUT | POLLWRNORM; 385 } else { /* send SIGIO later */ 386 set_bit(SOCK_ASYNC_NOSPACE, 387 &sk->sk_socket->flags); 388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 389 390 /* Race breaker. If space is freed after 391 * wspace test but before the flags are set, 392 * IO signal will be lost. 393 */ 394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 395 mask |= POLLOUT | POLLWRNORM; 396 } 397 } 398 399 if (tp->urg_data & TCP_URG_VALID) 400 mask |= POLLPRI; 401 } 402 return mask; 403} 404 405int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 406{ 407 struct tcp_sock *tp = tcp_sk(sk); 408 int answ; 409 410 switch (cmd) { 411 case SIOCINQ: 412 if (sk->sk_state == TCP_LISTEN) 413 return -EINVAL; 414 415 lock_sock(sk); 416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 417 answ = 0; 418 else if (sock_flag(sk, SOCK_URGINLINE) || 419 !tp->urg_data || 420 before(tp->urg_seq, tp->copied_seq) || 421 !before(tp->urg_seq, tp->rcv_nxt)) { 422 answ = tp->rcv_nxt - tp->copied_seq; 423 424 /* Subtract 1, if FIN is in queue. */ 425 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 426 answ -= 427 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; 428 } else 429 answ = tp->urg_seq - tp->copied_seq; 430 release_sock(sk); 431 break; 432 case SIOCATMARK: 433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 434 break; 435 case SIOCOUTQ: 436 if (sk->sk_state == TCP_LISTEN) 437 return -EINVAL; 438 439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 440 answ = 0; 441 else 442 answ = tp->write_seq - tp->snd_una; 443 break; 444 default: 445 return -ENOIOCTLCMD; 446 }; 447 448 return put_user(answ, (int __user *)arg); 449} 450 451static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 452{ 453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 454 tp->pushed_seq = tp->write_seq; 455} 456 457static inline int forced_push(struct tcp_sock *tp) 458{ 459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 460} 461 462static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, 463 struct sk_buff *skb) 464{ 465 skb->csum = 0; 466 TCP_SKB_CB(skb)->seq = tp->write_seq; 467 TCP_SKB_CB(skb)->end_seq = tp->write_seq; 468 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 469 TCP_SKB_CB(skb)->sacked = 0; 470 skb_header_release(skb); 471 __skb_queue_tail(&sk->sk_write_queue, skb); 472 sk_charge_skb(sk, skb); 473 if (!sk->sk_send_head) 474 sk->sk_send_head = skb; 475 if (tp->nonagle & TCP_NAGLE_PUSH) 476 tp->nonagle &= ~TCP_NAGLE_PUSH; 477} 478 479static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 480 struct sk_buff *skb) 481{ 482 if (flags & MSG_OOB) { 483 tp->urg_mode = 1; 484 tp->snd_up = tp->write_seq; 485 TCP_SKB_CB(skb)->sacked |= TCPCB_URG; 486 } 487} 488 489static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, 490 int mss_now, int nonagle) 491{ 492 if (sk->sk_send_head) { 493 struct sk_buff *skb = sk->sk_write_queue.prev; 494 if (!(flags & MSG_MORE) || forced_push(tp)) 495 tcp_mark_push(tp, skb); 496 tcp_mark_urg(tp, flags, skb); 497 __tcp_push_pending_frames(sk, tp, mss_now, 498 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 499 } 500} 501 502static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, 503 size_t psize, int flags) 504{ 505 struct tcp_sock *tp = tcp_sk(sk); 506 int mss_now, size_goal; 507 int err; 508 ssize_t copied; 509 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 510 511 /* Wait for a connection to finish. */ 512 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 513 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 514 goto out_err; 515 516 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 517 518 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 519 size_goal = tp->xmit_size_goal; 520 copied = 0; 521 522 err = -EPIPE; 523 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 524 goto do_error; 525 526 while (psize > 0) { 527 struct sk_buff *skb = sk->sk_write_queue.prev; 528 struct page *page = pages[poffset / PAGE_SIZE]; 529 int copy, i, can_coalesce; 530 int offset = poffset % PAGE_SIZE; 531 int size = min_t(size_t, psize, PAGE_SIZE - offset); 532 533 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { 534new_segment: 535 if (!sk_stream_memory_free(sk)) 536 goto wait_for_sndbuf; 537 538 skb = sk_stream_alloc_pskb(sk, 0, 0, 539 sk->sk_allocation); 540 if (!skb) 541 goto wait_for_memory; 542 543 skb_entail(sk, tp, skb); 544 copy = size_goal; 545 } 546 547 if (copy > size) 548 copy = size; 549 550 i = skb_shinfo(skb)->nr_frags; 551 can_coalesce = skb_can_coalesce(skb, i, page, offset); 552 if (!can_coalesce && i >= MAX_SKB_FRAGS) { 553 tcp_mark_push(tp, skb); 554 goto new_segment; 555 } 556 if (!sk_stream_wmem_schedule(sk, copy)) 557 goto wait_for_memory; 558 559 if (can_coalesce) { 560 skb_shinfo(skb)->frags[i - 1].size += copy; 561 } else { 562 get_page(page); 563 skb_fill_page_desc(skb, i, page, offset, copy); 564 } 565 566 skb->len += copy; 567 skb->data_len += copy; 568 skb->truesize += copy; 569 sk->sk_wmem_queued += copy; 570 sk->sk_forward_alloc -= copy; 571 skb->ip_summed = CHECKSUM_PARTIAL; 572 tp->write_seq += copy; 573 TCP_SKB_CB(skb)->end_seq += copy; 574 skb_shinfo(skb)->gso_segs = 0; 575 576 if (!copied) 577 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 578 579 copied += copy; 580 poffset += copy; 581 if (!(psize -= copy)) 582 goto out; 583 584 if (skb->len < mss_now || (flags & MSG_OOB)) 585 continue; 586 587 if (forced_push(tp)) { 588 tcp_mark_push(tp, skb); 589 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); 590 } else if (skb == sk->sk_send_head) 591 tcp_push_one(sk, mss_now); 592 continue; 593 594wait_for_sndbuf: 595 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 596wait_for_memory: 597 if (copied) 598 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 599 600 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 601 goto do_error; 602 603 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 604 size_goal = tp->xmit_size_goal; 605 } 606 607out: 608 if (copied) 609 tcp_push(sk, tp, flags, mss_now, tp->nonagle); 610 return copied; 611 612do_error: 613 if (copied) 614 goto out; 615out_err: 616 return sk_stream_error(sk, flags, err); 617} 618 619ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, 620 size_t size, int flags) 621{ 622 ssize_t res; 623 struct sock *sk = sock->sk; 624 625 if (!(sk->sk_route_caps & NETIF_F_SG) || 626 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 627 return sock_no_sendpage(sock, page, offset, size, flags); 628 629 lock_sock(sk); 630 TCP_CHECK_TIMER(sk); 631 res = do_tcp_sendpages(sk, &page, offset, size, flags); 632 TCP_CHECK_TIMER(sk); 633 release_sock(sk); 634 return res; 635} 636 637#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 638#define TCP_OFF(sk) (sk->sk_sndmsg_off) 639 640static inline int select_size(struct sock *sk, struct tcp_sock *tp) 641{ 642 int tmp = tp->mss_cache; 643 644 if (sk->sk_route_caps & NETIF_F_SG) { 645 if (sk_can_gso(sk)) 646 tmp = 0; 647 else { 648 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); 649 650 if (tmp >= pgbreak && 651 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) 652 tmp = pgbreak; 653 } 654 } 655 656 return tmp; 657} 658 659int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 660 size_t size) 661{ 662 struct iovec *iov; 663 struct tcp_sock *tp = tcp_sk(sk); 664 struct sk_buff *skb; 665 int iovlen, flags; 666 int mss_now, size_goal; 667 int err, copied; 668 long timeo; 669 670 lock_sock(sk); 671 TCP_CHECK_TIMER(sk); 672 673 flags = msg->msg_flags; 674 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 675 676 /* Wait for a connection to finish. */ 677 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 678 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 679 goto out_err; 680 681 /* This should be in poll */ 682 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 683 684 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 685 size_goal = tp->xmit_size_goal; 686 687 /* Ok commence sending. */ 688 iovlen = msg->msg_iovlen; 689 iov = msg->msg_iov; 690 copied = 0; 691 692 err = -EPIPE; 693 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 694 goto do_error; 695 696 while (--iovlen >= 0) { 697 int seglen = iov->iov_len; 698 unsigned char __user *from = iov->iov_base; 699 700 iov++; 701 702 while (seglen > 0) { 703 int copy; 704 705 skb = sk->sk_write_queue.prev; 706 707 if (!sk->sk_send_head || 708 (copy = size_goal - skb->len) <= 0) { 709 710new_segment: 711 /* Allocate new segment. If the interface is SG, 712 * allocate skb fitting to single page. 713 */ 714 if (!sk_stream_memory_free(sk)) 715 goto wait_for_sndbuf; 716 717 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), 718 0, sk->sk_allocation); 719 if (!skb) 720 goto wait_for_memory; 721 722 /* 723 * Check whether we can use HW checksum. 724 */ 725 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 726 skb->ip_summed = CHECKSUM_PARTIAL; 727 728 skb_entail(sk, tp, skb); 729 copy = size_goal; 730 } 731 732 /* Try to append data to the end of skb. */ 733 if (copy > seglen) 734 copy = seglen; 735 736 /* Where to copy to? */ 737 if (skb_tailroom(skb) > 0) { 738 /* We have some space in skb head. Superb! */ 739 if (copy > skb_tailroom(skb)) 740 copy = skb_tailroom(skb); 741 if ((err = skb_add_data(skb, from, copy)) != 0) 742 goto do_fault; 743 } else { 744 int merge = 0; 745 int i = skb_shinfo(skb)->nr_frags; 746 struct page *page = TCP_PAGE(sk); 747 int off = TCP_OFF(sk); 748 749 if (skb_can_coalesce(skb, i, page, off) && 750 off != PAGE_SIZE) { 751 /* We can extend the last page 752 * fragment. */ 753 merge = 1; 754 } else if (i == MAX_SKB_FRAGS || 755 (!i && 756 !(sk->sk_route_caps & NETIF_F_SG))) { 757 /* Need to add new fragment and cannot 758 * do this because interface is non-SG, 759 * or because all the page slots are 760 * busy. */ 761 tcp_mark_push(tp, skb); 762 goto new_segment; 763 } else if (page) { 764 if (off == PAGE_SIZE) { 765 put_page(page); 766 TCP_PAGE(sk) = page = NULL; 767 off = 0; 768 } 769 } else 770 off = 0; 771 772 if (copy > PAGE_SIZE - off) 773 copy = PAGE_SIZE - off; 774 775 if (!sk_stream_wmem_schedule(sk, copy)) 776 goto wait_for_memory; 777 778 if (!page) { 779 /* Allocate new cache page. */ 780 if (!(page = sk_stream_alloc_page(sk))) 781 goto wait_for_memory; 782 } 783 784 /* Time to copy data. We are close to 785 * the end! */ 786 err = skb_copy_to_page(sk, from, skb, page, 787 off, copy); 788 if (err) { 789 /* If this page was new, give it to the 790 * socket so it does not get leaked. 791 */ 792 if (!TCP_PAGE(sk)) { 793 TCP_PAGE(sk) = page; 794 TCP_OFF(sk) = 0; 795 } 796 goto do_error; 797 } 798 799 /* Update the skb. */ 800 if (merge) { 801 skb_shinfo(skb)->frags[i - 1].size += 802 copy; 803 } else { 804 skb_fill_page_desc(skb, i, page, off, copy); 805 if (TCP_PAGE(sk)) { 806 get_page(page); 807 } else if (off + copy < PAGE_SIZE) { 808 get_page(page); 809 TCP_PAGE(sk) = page; 810 } 811 } 812 813 TCP_OFF(sk) = off + copy; 814 } 815 816 if (!copied) 817 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 818 819 tp->write_seq += copy; 820 TCP_SKB_CB(skb)->end_seq += copy; 821 skb_shinfo(skb)->gso_segs = 0; 822 823 from += copy; 824 copied += copy; 825 if ((seglen -= copy) == 0 && iovlen == 0) 826 goto out; 827 828 if (skb->len < mss_now || (flags & MSG_OOB)) 829 continue; 830 831 if (forced_push(tp)) { 832 tcp_mark_push(tp, skb); 833 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); 834 } else if (skb == sk->sk_send_head) 835 tcp_push_one(sk, mss_now); 836 continue; 837 838wait_for_sndbuf: 839 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 840wait_for_memory: 841 if (copied) 842 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 843 844 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 845 goto do_error; 846 847 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); 848 size_goal = tp->xmit_size_goal; 849 } 850 } 851 852out: 853 if (copied) 854 tcp_push(sk, tp, flags, mss_now, tp->nonagle); 855 TCP_CHECK_TIMER(sk); 856 release_sock(sk); 857 return copied; 858 859do_fault: 860 if (!skb->len) { 861 if (sk->sk_send_head == skb) 862 sk->sk_send_head = NULL; 863 __skb_unlink(skb, &sk->sk_write_queue); 864 sk_stream_free_skb(sk, skb); 865 } 866 867do_error: 868 if (copied) 869 goto out; 870out_err: 871 err = sk_stream_error(sk, flags, err); 872 TCP_CHECK_TIMER(sk); 873 release_sock(sk); 874 return err; 875} 876 877/* 878 * Handle reading urgent data. BSD has very simple semantics for 879 * this, no blocking and very strange errors 8) 880 */ 881 882static int tcp_recv_urg(struct sock *sk, long timeo, 883 struct msghdr *msg, int len, int flags, 884 int *addr_len) 885{ 886 struct tcp_sock *tp = tcp_sk(sk); 887 888 /* No URG data to read. */ 889 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 890 tp->urg_data == TCP_URG_READ) 891 return -EINVAL; /* Yes this is right ! */ 892 893 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 894 return -ENOTCONN; 895 896 if (tp->urg_data & TCP_URG_VALID) { 897 int err = 0; 898 char c = tp->urg_data; 899 900 if (!(flags & MSG_PEEK)) 901 tp->urg_data = TCP_URG_READ; 902 903 /* Read urgent data. */ 904 msg->msg_flags |= MSG_OOB; 905 906 if (len > 0) { 907 if (!(flags & MSG_TRUNC)) 908 err = memcpy_toiovec(msg->msg_iov, &c, 1); 909 len = 1; 910 } else 911 msg->msg_flags |= MSG_TRUNC; 912 913 return err ? -EFAULT : len; 914 } 915 916 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 917 return 0; 918 919 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 920 * the available implementations agree in this case: 921 * this call should never block, independent of the 922 * blocking state of the socket. 923 * Mike <pall@rz.uni-karlsruhe.de> 924 */ 925 return -EAGAIN; 926} 927 928/* Clean up the receive buffer for full frames taken by the user, 929 * then send an ACK if necessary. COPIED is the number of bytes 930 * tcp_recvmsg has given to the user so far, it speeds up the 931 * calculation of whether or not we must ACK for the sake of 932 * a window update. 933 */ 934void tcp_cleanup_rbuf(struct sock *sk, int copied) 935{ 936 struct tcp_sock *tp = tcp_sk(sk); 937 int time_to_ack = 0; 938 939#if TCP_DEBUG 940 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 941 942 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 943#endif 944 945 if (inet_csk_ack_scheduled(sk)) { 946 const struct inet_connection_sock *icsk = inet_csk(sk); 947 /* Delayed ACKs frequently hit locked sockets during bulk 948 * receive. */ 949 if (icsk->icsk_ack.blocked || 950 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 951 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 952 /* 953 * If this read emptied read buffer, we send ACK, if 954 * connection is not bidirectional, user drained 955 * receive buffer and there was a small segment 956 * in queue. 957 */ 958 (copied > 0 && 959 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 960 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 961 !icsk->icsk_ack.pingpong)) && 962 !atomic_read(&sk->sk_rmem_alloc))) 963 time_to_ack = 1; 964 } 965 966 /* We send an ACK if we can now advertise a non-zero window 967 * which has been raised "significantly". 968 * 969 * Even if window raised up to infinity, do not send window open ACK 970 * in states, where we will not receive more. It is useless. 971 */ 972 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 973 __u32 rcv_window_now = tcp_receive_window(tp); 974 975 /* Optimize, __tcp_select_window() is not cheap. */ 976 if (2*rcv_window_now <= tp->window_clamp) { 977 __u32 new_window = __tcp_select_window(sk); 978 979 /* Send ACK now, if this read freed lots of space 980 * in our buffer. Certainly, new_window is new window. 981 * We can advertise it now, if it is not less than current one. 982 * "Lots" means "at least twice" here. 983 */ 984 if (new_window && new_window >= 2 * rcv_window_now) 985 time_to_ack = 1; 986 } 987 } 988 if (time_to_ack) 989 tcp_send_ack(sk); 990} 991 992static void tcp_prequeue_process(struct sock *sk) 993{ 994 struct sk_buff *skb; 995 struct tcp_sock *tp = tcp_sk(sk); 996 997 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); 998 999 /* RX process wants to run with disabled BHs, though it is not 1000 * necessary */ 1001 local_bh_disable(); 1002 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1003 sk->sk_backlog_rcv(sk, skb); 1004 local_bh_enable(); 1005 1006 /* Clear memory counter. */ 1007 tp->ucopy.memory = 0; 1008} 1009 1010static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1011{ 1012 struct sk_buff *skb; 1013 u32 offset; 1014 1015 skb_queue_walk(&sk->sk_receive_queue, skb) { 1016 offset = seq - TCP_SKB_CB(skb)->seq; 1017 if (skb->h.th->syn) 1018 offset--; 1019 if (offset < skb->len || skb->h.th->fin) { 1020 *off = offset; 1021 return skb; 1022 } 1023 } 1024 return NULL; 1025} 1026 1027/* 1028 * This routine provides an alternative to tcp_recvmsg() for routines 1029 * that would like to handle copying from skbuffs directly in 'sendfile' 1030 * fashion. 1031 * Note: 1032 * - It is assumed that the socket was locked by the caller. 1033 * - The routine does not block. 1034 * - At present, there is no support for reading OOB data 1035 * or for 'peeking' the socket using this routine 1036 * (although both would be easy to implement). 1037 */ 1038int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1039 sk_read_actor_t recv_actor) 1040{ 1041 struct sk_buff *skb; 1042 struct tcp_sock *tp = tcp_sk(sk); 1043 u32 seq = tp->copied_seq; 1044 u32 offset; 1045 int copied = 0; 1046 1047 if (sk->sk_state == TCP_LISTEN) 1048 return -ENOTCONN; 1049 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1050 if (offset < skb->len) { 1051 size_t used, len; 1052 1053 len = skb->len - offset; 1054 /* Stop reading if we hit a patch of urgent data */ 1055 if (tp->urg_data) { 1056 u32 urg_offset = tp->urg_seq - seq; 1057 if (urg_offset < len) 1058 len = urg_offset; 1059 if (!len) 1060 break; 1061 } 1062 used = recv_actor(desc, skb, offset, len); 1063 if (used <= len) { 1064 seq += used; 1065 copied += used; 1066 offset += used; 1067 } 1068 if (offset != skb->len) 1069 break; 1070 } 1071 if (skb->h.th->fin) { 1072 sk_eat_skb(sk, skb, 0); 1073 ++seq; 1074 break; 1075 } 1076 sk_eat_skb(sk, skb, 0); 1077 if (!desc->count) 1078 break; 1079 } 1080 tp->copied_seq = seq; 1081 1082 tcp_rcv_space_adjust(sk); 1083 1084 /* Clean up data we have read: This will do ACK frames. */ 1085 if (copied) 1086 tcp_cleanup_rbuf(sk, copied); 1087 return copied; 1088} 1089 1090/* 1091 * This routine copies from a sock struct into the user buffer. 1092 * 1093 * Technical note: in 2.3 we work on _locked_ socket, so that 1094 * tricks with *seq access order and skb->users are not required. 1095 * Probably, code can be easily improved even more. 1096 */ 1097 1098int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1099 size_t len, int nonblock, int flags, int *addr_len) 1100{ 1101 struct tcp_sock *tp = tcp_sk(sk); 1102 int copied = 0; 1103 u32 peek_seq; 1104 u32 *seq; 1105 unsigned long used; 1106 int err; 1107 int target; /* Read at least this many bytes */ 1108 long timeo; 1109 struct task_struct *user_recv = NULL; 1110 int copied_early = 0; 1111 1112 lock_sock(sk); 1113 1114 TCP_CHECK_TIMER(sk); 1115 1116 err = -ENOTCONN; 1117 if (sk->sk_state == TCP_LISTEN) 1118 goto out; 1119 1120 timeo = sock_rcvtimeo(sk, nonblock); 1121 1122 /* Urgent data needs to be handled specially. */ 1123 if (flags & MSG_OOB) 1124 goto recv_urg; 1125 1126 seq = &tp->copied_seq; 1127 if (flags & MSG_PEEK) { 1128 peek_seq = tp->copied_seq; 1129 seq = &peek_seq; 1130 } 1131 1132 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1133 1134#ifdef CONFIG_NET_DMA 1135 tp->ucopy.dma_chan = NULL; 1136 preempt_disable(); 1137 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1138 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { 1139 preempt_enable_no_resched(); 1140 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1141 } else 1142 preempt_enable_no_resched(); 1143#endif 1144 1145 do { 1146 struct sk_buff *skb; 1147 u32 offset; 1148 1149 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1150 if (tp->urg_data && tp->urg_seq == *seq) { 1151 if (copied) 1152 break; 1153 if (signal_pending(current)) { 1154 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 1155 break; 1156 } 1157 } 1158 1159 /* Next get a buffer. */ 1160 1161 skb = skb_peek(&sk->sk_receive_queue); 1162 do { 1163 if (!skb) 1164 break; 1165 1166 /* Now that we have two receive queues this 1167 * shouldn't happen. 1168 */ 1169 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1170 printk(KERN_INFO "recvmsg bug: copied %X " 1171 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1172 break; 1173 } 1174 offset = *seq - TCP_SKB_CB(skb)->seq; 1175 if (skb->h.th->syn) 1176 offset--; 1177 if (offset < skb->len) 1178 goto found_ok_skb; 1179 if (skb->h.th->fin) 1180 goto found_fin_ok; 1181 BUG_TRAP(flags & MSG_PEEK); 1182 skb = skb->next; 1183 } while (skb != (struct sk_buff *)&sk->sk_receive_queue); 1184 1185 /* Well, if we have backlog, try to process it now yet. */ 1186 1187 if (copied >= target && !sk->sk_backlog.tail) 1188 break; 1189 1190 if (copied) { 1191 if (sk->sk_err || 1192 sk->sk_state == TCP_CLOSE || 1193 (sk->sk_shutdown & RCV_SHUTDOWN) || 1194 !timeo || 1195 signal_pending(current) || 1196 (flags & MSG_PEEK)) 1197 break; 1198 } else { 1199 if (sock_flag(sk, SOCK_DONE)) 1200 break; 1201 1202 if (sk->sk_err) { 1203 copied = sock_error(sk); 1204 break; 1205 } 1206 1207 if (sk->sk_shutdown & RCV_SHUTDOWN) 1208 break; 1209 1210 if (sk->sk_state == TCP_CLOSE) { 1211 if (!sock_flag(sk, SOCK_DONE)) { 1212 /* This occurs when user tries to read 1213 * from never connected socket. 1214 */ 1215 copied = -ENOTCONN; 1216 break; 1217 } 1218 break; 1219 } 1220 1221 if (!timeo) { 1222 copied = -EAGAIN; 1223 break; 1224 } 1225 1226 if (signal_pending(current)) { 1227 copied = sock_intr_errno(timeo); 1228 break; 1229 } 1230 } 1231 1232 tcp_cleanup_rbuf(sk, copied); 1233 1234 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { 1235 /* Install new reader */ 1236 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { 1237 user_recv = current; 1238 tp->ucopy.task = user_recv; 1239 tp->ucopy.iov = msg->msg_iov; 1240 } 1241 1242 tp->ucopy.len = len; 1243 1244 BUG_TRAP(tp->copied_seq == tp->rcv_nxt || 1245 (flags & (MSG_PEEK | MSG_TRUNC))); 1246 1247 /* Ugly... If prequeue is not empty, we have to 1248 * process it before releasing socket, otherwise 1249 * order will be broken at second iteration. 1250 * More elegant solution is required!!! 1251 * 1252 * Look: we have the following (pseudo)queues: 1253 * 1254 * 1. packets in flight 1255 * 2. backlog 1256 * 3. prequeue 1257 * 4. receive_queue 1258 * 1259 * Each queue can be processed only if the next ones 1260 * are empty. At this point we have empty receive_queue. 1261 * But prequeue _can_ be not empty after 2nd iteration, 1262 * when we jumped to start of loop because backlog 1263 * processing added something to receive_queue. 1264 * We cannot release_sock(), because backlog contains 1265 * packets arrived _after_ prequeued ones. 1266 * 1267 * Shortly, algorithm is clear --- to process all 1268 * the queues in order. We could make it more directly, 1269 * requeueing packets from backlog to prequeue, if 1270 * is not empty. It is more elegant, but eats cycles, 1271 * unfortunately. 1272 */ 1273 if (!skb_queue_empty(&tp->ucopy.prequeue)) 1274 goto do_prequeue; 1275 1276 /* __ Set realtime policy in scheduler __ */ 1277 } 1278 1279 if (copied >= target) { 1280 /* Do not sleep, just process backlog. */ 1281 release_sock(sk); 1282 lock_sock(sk); 1283 } else 1284 sk_wait_data(sk, &timeo); 1285 1286#ifdef CONFIG_NET_DMA 1287 tp->ucopy.wakeup = 0; 1288#endif 1289 1290 if (user_recv) { 1291 int chunk; 1292 1293 /* __ Restore normal policy in scheduler __ */ 1294 1295 if ((chunk = len - tp->ucopy.len) != 0) { 1296 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1297 len -= chunk; 1298 copied += chunk; 1299 } 1300 1301 if (tp->rcv_nxt == tp->copied_seq && 1302 !skb_queue_empty(&tp->ucopy.prequeue)) { 1303do_prequeue: 1304 tcp_prequeue_process(sk); 1305 1306 if ((chunk = len - tp->ucopy.len) != 0) { 1307 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1308 len -= chunk; 1309 copied += chunk; 1310 } 1311 } 1312 } 1313 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1314 if (net_ratelimit()) 1315 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1316 current->comm, current->pid); 1317 peek_seq = tp->copied_seq; 1318 } 1319 continue; 1320 1321 found_ok_skb: 1322 /* Ok so how much can we use? */ 1323 used = skb->len - offset; 1324 if (len < used) 1325 used = len; 1326 1327 /* Do we have urgent data here? */ 1328 if (tp->urg_data) { 1329 u32 urg_offset = tp->urg_seq - *seq; 1330 if (urg_offset < used) { 1331 if (!urg_offset) { 1332 if (!sock_flag(sk, SOCK_URGINLINE)) { 1333 ++*seq; 1334 offset++; 1335 used--; 1336 if (!used) 1337 goto skip_copy; 1338 } 1339 } else 1340 used = urg_offset; 1341 } 1342 } 1343 1344 if (!(flags & MSG_TRUNC)) { 1345#ifdef CONFIG_NET_DMA 1346 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1347 tp->ucopy.dma_chan = get_softnet_dma(); 1348 1349 if (tp->ucopy.dma_chan) { 1350 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1351 tp->ucopy.dma_chan, skb, offset, 1352 msg->msg_iov, used, 1353 tp->ucopy.pinned_list); 1354 1355 if (tp->ucopy.dma_cookie < 0) { 1356 1357 printk(KERN_ALERT "dma_cookie < 0\n"); 1358 1359 /* Exception. Bailout! */ 1360 if (!copied) 1361 copied = -EFAULT; 1362 break; 1363 } 1364 if ((offset + used) == skb->len) 1365 copied_early = 1; 1366 1367 } else 1368#endif 1369 { 1370 err = skb_copy_datagram_iovec(skb, offset, 1371 msg->msg_iov, used); 1372 if (err) { 1373 /* Exception. Bailout! */ 1374 if (!copied) 1375 copied = -EFAULT; 1376 break; 1377 } 1378 } 1379 } 1380 1381 *seq += used; 1382 copied += used; 1383 len -= used; 1384 1385 tcp_rcv_space_adjust(sk); 1386 1387skip_copy: 1388 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1389 tp->urg_data = 0; 1390 tcp_fast_path_check(sk, tp); 1391 } 1392 if (used + offset < skb->len) 1393 continue; 1394 1395 if (skb->h.th->fin) 1396 goto found_fin_ok; 1397 if (!(flags & MSG_PEEK)) { 1398 sk_eat_skb(sk, skb, copied_early); 1399 copied_early = 0; 1400 } 1401 continue; 1402 1403 found_fin_ok: 1404 /* Process the FIN. */ 1405 ++*seq; 1406 if (!(flags & MSG_PEEK)) { 1407 sk_eat_skb(sk, skb, copied_early); 1408 copied_early = 0; 1409 } 1410 break; 1411 } while (len > 0); 1412 1413 if (user_recv) { 1414 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1415 int chunk; 1416 1417 tp->ucopy.len = copied > 0 ? len : 0; 1418 1419 tcp_prequeue_process(sk); 1420 1421 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1422 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1423 len -= chunk; 1424 copied += chunk; 1425 } 1426 } 1427 1428 tp->ucopy.task = NULL; 1429 tp->ucopy.len = 0; 1430 } 1431 1432#ifdef CONFIG_NET_DMA 1433 if (tp->ucopy.dma_chan) { 1434 struct sk_buff *skb; 1435 dma_cookie_t done, used; 1436 1437 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1438 1439 while (dma_async_memcpy_complete(tp->ucopy.dma_chan, 1440 tp->ucopy.dma_cookie, &done, 1441 &used) == DMA_IN_PROGRESS) { 1442 /* do partial cleanup of sk_async_wait_queue */ 1443 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1444 (dma_async_is_complete(skb->dma_cookie, done, 1445 used) == DMA_SUCCESS)) { 1446 __skb_dequeue(&sk->sk_async_wait_queue); 1447 kfree_skb(skb); 1448 } 1449 } 1450 1451 /* Safe to free early-copied skbs now */ 1452 __skb_queue_purge(&sk->sk_async_wait_queue); 1453 dma_chan_put(tp->ucopy.dma_chan); 1454 tp->ucopy.dma_chan = NULL; 1455 } 1456 if (tp->ucopy.pinned_list) { 1457 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1458 tp->ucopy.pinned_list = NULL; 1459 } 1460#endif 1461 1462 /* According to UNIX98, msg_name/msg_namelen are ignored 1463 * on connected socket. I was just happy when found this 8) --ANK 1464 */ 1465 1466 /* Clean up data we have read: This will do ACK frames. */ 1467 tcp_cleanup_rbuf(sk, copied); 1468 1469 TCP_CHECK_TIMER(sk); 1470 release_sock(sk); 1471 return copied; 1472 1473out: 1474 TCP_CHECK_TIMER(sk); 1475 release_sock(sk); 1476 return err; 1477 1478recv_urg: 1479 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); 1480 goto out; 1481} 1482 1483/* 1484 * State processing on a close. This implements the state shift for 1485 * sending our FIN frame. Note that we only send a FIN for some 1486 * states. A shutdown() may have already sent the FIN, or we may be 1487 * closed. 1488 */ 1489 1490static const unsigned char new_state[16] = { 1491 /* current state: new state: action: */ 1492 /* (Invalid) */ TCP_CLOSE, 1493 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 1494 /* TCP_SYN_SENT */ TCP_CLOSE, 1495 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 1496 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, 1497 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, 1498 /* TCP_TIME_WAIT */ TCP_CLOSE, 1499 /* TCP_CLOSE */ TCP_CLOSE, 1500 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, 1501 /* TCP_LAST_ACK */ TCP_LAST_ACK, 1502 /* TCP_LISTEN */ TCP_CLOSE, 1503 /* TCP_CLOSING */ TCP_CLOSING, 1504}; 1505 1506static int tcp_close_state(struct sock *sk) 1507{ 1508 int next = (int)new_state[sk->sk_state]; 1509 int ns = next & TCP_STATE_MASK; 1510 1511 tcp_set_state(sk, ns); 1512 1513 return next & TCP_ACTION_FIN; 1514} 1515 1516/* 1517 * Shutdown the sending side of a connection. Much like close except 1518 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD). 1519 */ 1520 1521void tcp_shutdown(struct sock *sk, int how) 1522{ 1523 /* We need to grab some memory, and put together a FIN, 1524 * and then put it into the queue to be sent. 1525 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 1526 */ 1527 if (!(how & SEND_SHUTDOWN)) 1528 return; 1529 1530 /* If we've already sent a FIN, or it's a closed state, skip this. */ 1531 if ((1 << sk->sk_state) & 1532 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 1533 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 1534 /* Clear out any half completed packets. FIN if needed. */ 1535 if (tcp_close_state(sk)) 1536 tcp_send_fin(sk); 1537 } 1538} 1539 1540void tcp_close(struct sock *sk, long timeout) 1541{ 1542 struct sk_buff *skb; 1543 int data_was_unread = 0; 1544 int state; 1545 1546 lock_sock(sk); 1547 sk->sk_shutdown = SHUTDOWN_MASK; 1548 1549 if (sk->sk_state == TCP_LISTEN) { 1550 tcp_set_state(sk, TCP_CLOSE); 1551 1552 /* Special case. */ 1553 inet_csk_listen_stop(sk); 1554 1555 goto adjudge_to_death; 1556 } 1557 1558 /* We need to flush the recv. buffs. We do this only on the 1559 * descriptor close, not protocol-sourced closes, because the 1560 * reader process may not have drained the data yet! 1561 */ 1562 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 1563 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 1564 skb->h.th->fin; 1565 data_was_unread += len; 1566 __kfree_skb(skb); 1567 } 1568 1569 sk_stream_mem_reclaim(sk); 1570 1571 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section 1572 * 3.10, we send a RST here because data was lost. To 1573 * witness the awful effects of the old behavior of always 1574 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start 1575 * a bulk GET in an FTP client, suspend the process, wait 1576 * for the client to advertise a zero window, then kill -9 1577 * the FTP client, wheee... Note: timeout is always zero 1578 * in such a case. 1579 */ 1580 if (data_was_unread) { 1581 /* Unread data was tossed, zap the connection. */ 1582 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); 1583 tcp_set_state(sk, TCP_CLOSE); 1584 tcp_send_active_reset(sk, GFP_KERNEL); 1585 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1586 /* Check zero linger _after_ checking for unread data. */ 1587 sk->sk_prot->disconnect(sk, 0); 1588 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); 1589 } else if (tcp_close_state(sk)) { 1590 /* We FIN if the application ate all the data before 1591 * zapping the connection. 1592 */ 1593 1594 /* RED-PEN. Formally speaking, we have broken TCP state 1595 * machine. State transitions: 1596 * 1597 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 1598 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 1599 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 1600 * 1601 * are legal only when FIN has been sent (i.e. in window), 1602 * rather than queued out of window. Purists blame. 1603 * 1604 * F.e. "RFC state" is ESTABLISHED, 1605 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 1606 * 1607 * The visible declinations are that sometimes 1608 * we enter time-wait state, when it is not required really 1609 * (harmless), do not send active resets, when they are 1610 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 1611 * they look as CLOSING or LAST_ACK for Linux) 1612 * Probably, I missed some more holelets. 1613 * --ANK 1614 */ 1615 tcp_send_fin(sk); 1616 } 1617 1618 sk_stream_wait_close(sk, timeout); 1619 1620adjudge_to_death: 1621 state = sk->sk_state; 1622 sock_hold(sk); 1623 sock_orphan(sk); 1624 atomic_inc(sk->sk_prot->orphan_count); 1625 1626 /* It is the last release_sock in its life. It will remove backlog. */ 1627 release_sock(sk); 1628 1629 1630 /* Now socket is owned by kernel and we acquire BH lock 1631 to finish close. No need to check for user refs. 1632 */ 1633 local_bh_disable(); 1634 bh_lock_sock(sk); 1635 BUG_TRAP(!sock_owned_by_user(sk)); 1636 1637 /* Have we already been destroyed by a softirq or backlog? */ 1638 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1639 goto out; 1640 1641 /* This is a (useful) BSD violating of the RFC. There is a 1642 * problem with TCP as specified in that the other end could 1643 * keep a socket open forever with no application left this end. 1644 * We use a 3 minute timeout (about the same as BSD) then kill 1645 * our end. If they send after that then tough - BUT: long enough 1646 * that we won't make the old 4*rto = almost no time - whoops 1647 * reset mistake. 1648 * 1649 * Nope, it was not mistake. It is really desired behaviour 1650 * f.e. on http servers, when such sockets are useless, but 1651 * consume significant resources. Let's do it with special 1652 * linger2 option. --ANK 1653 */ 1654 1655 if (sk->sk_state == TCP_FIN_WAIT2) { 1656 struct tcp_sock *tp = tcp_sk(sk); 1657 if (tp->linger2 < 0) { 1658 tcp_set_state(sk, TCP_CLOSE); 1659 tcp_send_active_reset(sk, GFP_ATOMIC); 1660 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1661 } else { 1662 const int tmo = tcp_fin_time(sk); 1663 1664 if (tmo > TCP_TIMEWAIT_LEN) { 1665 inet_csk_reset_keepalive_timer(sk, 1666 tmo - TCP_TIMEWAIT_LEN); 1667 } else { 1668 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1669 goto out; 1670 } 1671 } 1672 } 1673 if (sk->sk_state != TCP_CLOSE) { 1674 sk_stream_mem_reclaim(sk); 1675 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans || 1676 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 1677 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { 1678 if (net_ratelimit()) 1679 printk(KERN_INFO "TCP: too many of orphaned " 1680 "sockets\n"); 1681 tcp_set_state(sk, TCP_CLOSE); 1682 tcp_send_active_reset(sk, GFP_ATOMIC); 1683 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1684 } 1685 } 1686 1687 if (sk->sk_state == TCP_CLOSE) 1688 inet_csk_destroy_sock(sk); 1689 /* Otherwise, socket is reprieved until protocol close. */ 1690 1691out: 1692 bh_unlock_sock(sk); 1693 local_bh_enable(); 1694 sock_put(sk); 1695} 1696 1697/* These states need RST on ABORT according to RFC793 */ 1698 1699static inline int tcp_need_reset(int state) 1700{ 1701 return (1 << state) & 1702 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 1703 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 1704} 1705 1706int tcp_disconnect(struct sock *sk, int flags) 1707{ 1708 struct inet_sock *inet = inet_sk(sk); 1709 struct inet_connection_sock *icsk = inet_csk(sk); 1710 struct tcp_sock *tp = tcp_sk(sk); 1711 int err = 0; 1712 int old_state = sk->sk_state; 1713 1714 if (old_state != TCP_CLOSE) 1715 tcp_set_state(sk, TCP_CLOSE); 1716 1717 /* ABORT function of RFC793 */ 1718 if (old_state == TCP_LISTEN) { 1719 inet_csk_listen_stop(sk); 1720 } else if (tcp_need_reset(old_state) || 1721 (tp->snd_nxt != tp->write_seq && 1722 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 1723 /* The last check adjusts for discrepancy of Linux wrt. RFC 1724 * states 1725 */ 1726 tcp_send_active_reset(sk, gfp_any()); 1727 sk->sk_err = ECONNRESET; 1728 } else if (old_state == TCP_SYN_SENT) 1729 sk->sk_err = ECONNRESET; 1730 1731 tcp_clear_xmit_timers(sk); 1732 __skb_queue_purge(&sk->sk_receive_queue); 1733 sk_stream_writequeue_purge(sk); 1734 __skb_queue_purge(&tp->out_of_order_queue); 1735#ifdef CONFIG_NET_DMA 1736 __skb_queue_purge(&sk->sk_async_wait_queue); 1737#endif 1738 1739 inet->dport = 0; 1740 1741 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1742 inet_reset_saddr(sk); 1743 1744 sk->sk_shutdown = 0; 1745 sock_reset_flag(sk, SOCK_DONE); 1746 tp->srtt = 0; 1747 if ((tp->write_seq += tp->max_window + 2) == 0) 1748 tp->write_seq = 1; 1749 icsk->icsk_backoff = 0; 1750 tp->snd_cwnd = 2; 1751 icsk->icsk_probes_out = 0; 1752 tp->packets_out = 0; 1753 tp->snd_ssthresh = 0x7fffffff; 1754 tp->snd_cwnd_cnt = 0; 1755 tp->bytes_acked = 0; 1756 tcp_set_ca_state(sk, TCP_CA_Open); 1757 tcp_clear_retrans(tp); 1758 inet_csk_delack_init(sk); 1759 sk->sk_send_head = NULL; 1760 tp->rx_opt.saw_tstamp = 0; 1761 tcp_sack_reset(&tp->rx_opt); 1762 __sk_dst_reset(sk); 1763 1764 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1765 1766 sk->sk_error_report(sk); 1767 return err; 1768} 1769 1770/* 1771 * Socket option code for TCP. 1772 */ 1773static int do_tcp_setsockopt(struct sock *sk, int level, 1774 int optname, char __user *optval, int optlen) 1775{ 1776 struct tcp_sock *tp = tcp_sk(sk); 1777 struct inet_connection_sock *icsk = inet_csk(sk); 1778 int val; 1779 int err = 0; 1780 1781 /* This is a string value all the others are int's */ 1782 if (optname == TCP_CONGESTION) { 1783 char name[TCP_CA_NAME_MAX]; 1784 1785 if (optlen < 1) 1786 return -EINVAL; 1787 1788 val = strncpy_from_user(name, optval, 1789 min(TCP_CA_NAME_MAX-1, optlen)); 1790 if (val < 0) 1791 return -EFAULT; 1792 name[val] = 0; 1793 1794 lock_sock(sk); 1795 err = tcp_set_congestion_control(sk, name); 1796 release_sock(sk); 1797 return err; 1798 } 1799 1800 if (optlen < sizeof(int)) 1801 return -EINVAL; 1802 1803 if (get_user(val, (int __user *)optval)) 1804 return -EFAULT; 1805 1806 lock_sock(sk); 1807 1808 switch (optname) { 1809 case TCP_MAXSEG: 1810 /* Values greater than interface MTU won't take effect. However 1811 * at the point when this call is done we typically don't yet 1812 * know which interface is going to be used */ 1813 if (val < 8 || val > MAX_TCP_WINDOW) { 1814 err = -EINVAL; 1815 break; 1816 } 1817 tp->rx_opt.user_mss = val; 1818 break; 1819 1820 case TCP_NODELAY: 1821 if (val) { 1822 /* TCP_NODELAY is weaker than TCP_CORK, so that 1823 * this option on corked socket is remembered, but 1824 * it is not activated until cork is cleared. 1825 * 1826 * However, when TCP_NODELAY is set we make 1827 * an explicit push, which overrides even TCP_CORK 1828 * for currently queued segments. 1829 */ 1830 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 1831 tcp_push_pending_frames(sk, tp); 1832 } else { 1833 tp->nonagle &= ~TCP_NAGLE_OFF; 1834 } 1835 break; 1836 1837 case TCP_CORK: 1838 /* When set indicates to always queue non-full frames. 1839 * Later the user clears this option and we transmit 1840 * any pending partial frames in the queue. This is 1841 * meant to be used alongside sendfile() to get properly 1842 * filled frames when the user (for example) must write 1843 * out headers with a write() call first and then use 1844 * sendfile to send out the data parts. 1845 * 1846 * TCP_CORK can be set together with TCP_NODELAY and it is 1847 * stronger than TCP_NODELAY. 1848 */ 1849 if (val) { 1850 tp->nonagle |= TCP_NAGLE_CORK; 1851 } else { 1852 tp->nonagle &= ~TCP_NAGLE_CORK; 1853 if (tp->nonagle&TCP_NAGLE_OFF) 1854 tp->nonagle |= TCP_NAGLE_PUSH; 1855 tcp_push_pending_frames(sk, tp); 1856 } 1857 break; 1858 1859 case TCP_KEEPIDLE: 1860 if (val < 1 || val > MAX_TCP_KEEPIDLE) 1861 err = -EINVAL; 1862 else { 1863 tp->keepalive_time = val * HZ; 1864 if (sock_flag(sk, SOCK_KEEPOPEN) && 1865 !((1 << sk->sk_state) & 1866 (TCPF_CLOSE | TCPF_LISTEN))) { 1867 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; 1868 if (tp->keepalive_time > elapsed) 1869 elapsed = tp->keepalive_time - elapsed; 1870 else 1871 elapsed = 0; 1872 inet_csk_reset_keepalive_timer(sk, elapsed); 1873 } 1874 } 1875 break; 1876 case TCP_KEEPINTVL: 1877 if (val < 1 || val > MAX_TCP_KEEPINTVL) 1878 err = -EINVAL; 1879 else 1880 tp->keepalive_intvl = val * HZ; 1881 break; 1882 case TCP_KEEPCNT: 1883 if (val < 1 || val > MAX_TCP_KEEPCNT) 1884 err = -EINVAL; 1885 else 1886 tp->keepalive_probes = val; 1887 break; 1888 case TCP_SYNCNT: 1889 if (val < 1 || val > MAX_TCP_SYNCNT) 1890 err = -EINVAL; 1891 else 1892 icsk->icsk_syn_retries = val; 1893 break; 1894 1895 case TCP_LINGER2: 1896 if (val < 0) 1897 tp->linger2 = -1; 1898 else if (val > sysctl_tcp_fin_timeout / HZ) 1899 tp->linger2 = 0; 1900 else 1901 tp->linger2 = val * HZ; 1902 break; 1903 1904 case TCP_DEFER_ACCEPT: 1905 icsk->icsk_accept_queue.rskq_defer_accept = 0; 1906 if (val > 0) { 1907 /* Translate value in seconds to number of 1908 * retransmits */ 1909 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && 1910 val > ((TCP_TIMEOUT_INIT / HZ) << 1911 icsk->icsk_accept_queue.rskq_defer_accept)) 1912 icsk->icsk_accept_queue.rskq_defer_accept++; 1913 icsk->icsk_accept_queue.rskq_defer_accept++; 1914 } 1915 break; 1916 1917 case TCP_WINDOW_CLAMP: 1918 if (!val) { 1919 if (sk->sk_state != TCP_CLOSE) { 1920 err = -EINVAL; 1921 break; 1922 } 1923 tp->window_clamp = 0; 1924 } else 1925 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 1926 SOCK_MIN_RCVBUF / 2 : val; 1927 break; 1928 1929 case TCP_QUICKACK: 1930 if (!val) { 1931 icsk->icsk_ack.pingpong = 1; 1932 } else { 1933 icsk->icsk_ack.pingpong = 0; 1934 if ((1 << sk->sk_state) & 1935 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 1936 inet_csk_ack_scheduled(sk)) { 1937 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 1938 tcp_cleanup_rbuf(sk, 1); 1939 if (!(val & 1)) 1940 icsk->icsk_ack.pingpong = 1; 1941 } 1942 } 1943 break; 1944 1945 default: 1946 err = -ENOPROTOOPT; 1947 break; 1948 }; 1949 release_sock(sk); 1950 return err; 1951} 1952 1953int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 1954 int optlen) 1955{ 1956 struct inet_connection_sock *icsk = inet_csk(sk); 1957 1958 if (level != SOL_TCP) 1959 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 1960 optval, optlen); 1961 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 1962} 1963 1964#ifdef CONFIG_COMPAT 1965int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 1966 char __user *optval, int optlen) 1967{ 1968 if (level != SOL_TCP) 1969 return inet_csk_compat_setsockopt(sk, level, optname, 1970 optval, optlen); 1971 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 1972} 1973 1974EXPORT_SYMBOL(compat_tcp_setsockopt); 1975#endif 1976 1977/* Return information about state of tcp endpoint in API format. */ 1978void tcp_get_info(struct sock *sk, struct tcp_info *info) 1979{ 1980 struct tcp_sock *tp = tcp_sk(sk); 1981 const struct inet_connection_sock *icsk = inet_csk(sk); 1982 u32 now = tcp_time_stamp; 1983 1984 memset(info, 0, sizeof(*info)); 1985 1986 info->tcpi_state = sk->sk_state; 1987 info->tcpi_ca_state = icsk->icsk_ca_state; 1988 info->tcpi_retransmits = icsk->icsk_retransmits; 1989 info->tcpi_probes = icsk->icsk_probes_out; 1990 info->tcpi_backoff = icsk->icsk_backoff; 1991 1992 if (tp->rx_opt.tstamp_ok) 1993 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 1994 if (tp->rx_opt.sack_ok) 1995 info->tcpi_options |= TCPI_OPT_SACK; 1996 if (tp->rx_opt.wscale_ok) { 1997 info->tcpi_options |= TCPI_OPT_WSCALE; 1998 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 1999 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2000 } 2001 2002 if (tp->ecn_flags&TCP_ECN_OK) 2003 info->tcpi_options |= TCPI_OPT_ECN; 2004 2005 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2006 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2007 info->tcpi_snd_mss = tp->mss_cache; 2008 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2009 2010 info->tcpi_unacked = tp->packets_out; 2011 info->tcpi_sacked = tp->sacked_out; 2012 info->tcpi_lost = tp->lost_out; 2013 info->tcpi_retrans = tp->retrans_out; 2014 info->tcpi_fackets = tp->fackets_out; 2015 2016 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2017 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2018 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2019 2020 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 2021 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 2022 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; 2023 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; 2024 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 2025 info->tcpi_snd_cwnd = tp->snd_cwnd; 2026 info->tcpi_advmss = tp->advmss; 2027 info->tcpi_reordering = tp->reordering; 2028 2029 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; 2030 info->tcpi_rcv_space = tp->rcvq_space.space; 2031 2032 info->tcpi_total_retrans = tp->total_retrans; 2033} 2034 2035EXPORT_SYMBOL_GPL(tcp_get_info); 2036 2037static int do_tcp_getsockopt(struct sock *sk, int level, 2038 int optname, char __user *optval, int __user *optlen) 2039{ 2040 struct inet_connection_sock *icsk = inet_csk(sk); 2041 struct tcp_sock *tp = tcp_sk(sk); 2042 int val, len; 2043 2044 if (get_user(len, optlen)) 2045 return -EFAULT; 2046 2047 len = min_t(unsigned int, len, sizeof(int)); 2048 2049 if (len < 0) 2050 return -EINVAL; 2051 2052 switch (optname) { 2053 case TCP_MAXSEG: 2054 val = tp->mss_cache; 2055 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2056 val = tp->rx_opt.user_mss; 2057 break; 2058 case TCP_NODELAY: 2059 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2060 break; 2061 case TCP_CORK: 2062 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2063 break; 2064 case TCP_KEEPIDLE: 2065 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; 2066 break; 2067 case TCP_KEEPINTVL: 2068 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; 2069 break; 2070 case TCP_KEEPCNT: 2071 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 2072 break; 2073 case TCP_SYNCNT: 2074 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2075 break; 2076 case TCP_LINGER2: 2077 val = tp->linger2; 2078 if (val >= 0) 2079 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2080 break; 2081 case TCP_DEFER_ACCEPT: 2082 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2083 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); 2084 break; 2085 case TCP_WINDOW_CLAMP: 2086 val = tp->window_clamp; 2087 break; 2088 case TCP_INFO: { 2089 struct tcp_info info; 2090 2091 if (get_user(len, optlen)) 2092 return -EFAULT; 2093 2094 tcp_get_info(sk, &info); 2095 2096 len = min_t(unsigned int, len, sizeof(info)); 2097 if (put_user(len, optlen)) 2098 return -EFAULT; 2099 if (copy_to_user(optval, &info, len)) 2100 return -EFAULT; 2101 return 0; 2102 } 2103 case TCP_QUICKACK: 2104 val = !icsk->icsk_ack.pingpong; 2105 break; 2106 2107 case TCP_CONGESTION: 2108 if (get_user(len, optlen)) 2109 return -EFAULT; 2110 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2111 if (put_user(len, optlen)) 2112 return -EFAULT; 2113 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 2114 return -EFAULT; 2115 return 0; 2116 default: 2117 return -ENOPROTOOPT; 2118 }; 2119 2120 if (put_user(len, optlen)) 2121 return -EFAULT; 2122 if (copy_to_user(optval, &val, len)) 2123 return -EFAULT; 2124 return 0; 2125} 2126 2127int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 2128 int __user *optlen) 2129{ 2130 struct inet_connection_sock *icsk = inet_csk(sk); 2131 2132 if (level != SOL_TCP) 2133 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 2134 optval, optlen); 2135 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2136} 2137 2138#ifdef CONFIG_COMPAT 2139int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 2140 char __user *optval, int __user *optlen) 2141{ 2142 if (level != SOL_TCP) 2143 return inet_csk_compat_getsockopt(sk, level, optname, 2144 optval, optlen); 2145 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2146} 2147 2148EXPORT_SYMBOL(compat_tcp_getsockopt); 2149#endif 2150 2151struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) 2152{ 2153 struct sk_buff *segs = ERR_PTR(-EINVAL); 2154 struct tcphdr *th; 2155 unsigned thlen; 2156 unsigned int seq; 2157 unsigned int delta; 2158 unsigned int oldlen; 2159 unsigned int len; 2160 2161 if (!pskb_may_pull(skb, sizeof(*th))) 2162 goto out; 2163 2164 th = skb->h.th; 2165 thlen = th->doff * 4; 2166 if (thlen < sizeof(*th)) 2167 goto out; 2168 2169 if (!pskb_may_pull(skb, thlen)) 2170 goto out; 2171 2172 oldlen = (u16)~skb->len; 2173 __skb_pull(skb, thlen); 2174 2175 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2176 /* Packet is from an untrusted source, reset gso_segs. */ 2177 int type = skb_shinfo(skb)->gso_type; 2178 int mss; 2179 2180 if (unlikely(type & 2181 ~(SKB_GSO_TCPV4 | 2182 SKB_GSO_DODGY | 2183 SKB_GSO_TCP_ECN | 2184 SKB_GSO_TCPV6 | 2185 0) || 2186 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 2187 goto out; 2188 2189 mss = skb_shinfo(skb)->gso_size; 2190 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; 2191 2192 segs = NULL; 2193 goto out; 2194 } 2195 2196 segs = skb_segment(skb, features); 2197 if (IS_ERR(segs)) 2198 goto out; 2199 2200 len = skb_shinfo(skb)->gso_size; 2201 delta = htonl(oldlen + (thlen + len)); 2202 2203 skb = segs; 2204 th = skb->h.th; 2205 seq = ntohl(th->seq); 2206 2207 do { 2208 th->fin = th->psh = 0; 2209 2210 th->check = ~csum_fold(th->check + delta); 2211 if (skb->ip_summed != CHECKSUM_PARTIAL) 2212 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2213 skb->csum)); 2214 2215 seq += len; 2216 skb = skb->next; 2217 th = skb->h.th; 2218 2219 th->seq = htonl(seq); 2220 th->cwr = 0; 2221 } while (skb->next); 2222 2223 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); 2224 th->check = ~csum_fold(th->check + delta); 2225 if (skb->ip_summed != CHECKSUM_PARTIAL) 2226 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2227 skb->csum)); 2228 2229out: 2230 return segs; 2231} 2232EXPORT_SYMBOL(tcp_tso_segment); 2233 2234extern void __skb_cb_too_small_for_tcp(int, int); 2235extern struct tcp_congestion_ops tcp_reno; 2236 2237static __initdata unsigned long thash_entries; 2238static int __init set_thash_entries(char *str) 2239{ 2240 if (!str) 2241 return 0; 2242 thash_entries = simple_strtoul(str, &str, 0); 2243 return 1; 2244} 2245__setup("thash_entries=", set_thash_entries); 2246 2247void __init tcp_init(void) 2248{ 2249 struct sk_buff *skb = NULL; 2250 unsigned long limit; 2251 int order, i, max_share; 2252 2253 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb)) 2254 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2255 sizeof(skb->cb)); 2256 2257 tcp_hashinfo.bind_bucket_cachep = 2258 kmem_cache_create("tcp_bind_bucket", 2259 sizeof(struct inet_bind_bucket), 0, 2260 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2261 2262 /* Size and allocate the main established and bind bucket 2263 * hash tables. 2264 * 2265 * The methodology is similar to that of the buffer cache. 2266 */ 2267 tcp_hashinfo.ehash = 2268 alloc_large_system_hash("TCP established", 2269 sizeof(struct inet_ehash_bucket), 2270 thash_entries, 2271 (num_physpages >= 128 * 1024) ? 2272 13 : 15, 2273 0, 2274 &tcp_hashinfo.ehash_size, 2275 NULL, 2276 0); 2277 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; 2278 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { 2279 rwlock_init(&tcp_hashinfo.ehash[i].lock); 2280 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2281 } 2282 2283 tcp_hashinfo.bhash = 2284 alloc_large_system_hash("TCP bind", 2285 sizeof(struct inet_bind_hashbucket), 2286 tcp_hashinfo.ehash_size, 2287 (num_physpages >= 128 * 1024) ? 2288 13 : 15, 2289 0, 2290 &tcp_hashinfo.bhash_size, 2291 NULL, 2292 64 * 1024); 2293 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 2294 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 2295 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 2296 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 2297 } 2298 2299 /* Try to be a bit smarter and adjust defaults depending 2300 * on available memory. 2301 */ 2302 for (order = 0; ((1 << order) << PAGE_SHIFT) < 2303 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); 2304 order++) 2305 ; 2306 if (order >= 4) { 2307 sysctl_local_port_range[0] = 32768; 2308 sysctl_local_port_range[1] = 61000; 2309 tcp_death_row.sysctl_max_tw_buckets = 180000; 2310 sysctl_tcp_max_orphans = 4096 << (order - 4); 2311 sysctl_max_syn_backlog = 1024; 2312 } else if (order < 3) { 2313 sysctl_local_port_range[0] = 1024 * (3 - order); 2314 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); 2315 sysctl_tcp_max_orphans >>= (3 - order); 2316 sysctl_max_syn_backlog = 128; 2317 } 2318 2319 /* Allow no more than 3/4 kernel memory (usually less) allocated to TCP */ 2320 sysctl_tcp_mem[0] = (1536 / sizeof (struct inet_bind_hashbucket)) << order; 2321 sysctl_tcp_mem[1] = sysctl_tcp_mem[0] * 4 / 3; 2322 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; 2323 2324 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); 2325 max_share = min(4UL*1024*1024, limit); 2326 2327 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; 2328 sysctl_tcp_wmem[1] = 16*1024; 2329 sysctl_tcp_wmem[2] = max(64*1024, max_share); 2330 2331 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; 2332 sysctl_tcp_rmem[1] = 87380; 2333 sysctl_tcp_rmem[2] = max(87380, max_share); 2334 2335 printk(KERN_INFO "TCP: Hash tables configured " 2336 "(established %d bind %d)\n", 2337 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); 2338 2339 tcp_register_congestion_control(&tcp_reno); 2340} 2341 2342EXPORT_SYMBOL(tcp_close); 2343EXPORT_SYMBOL(tcp_disconnect); 2344EXPORT_SYMBOL(tcp_getsockopt); 2345EXPORT_SYMBOL(tcp_ioctl); 2346EXPORT_SYMBOL(tcp_poll); 2347EXPORT_SYMBOL(tcp_read_sock); 2348EXPORT_SYMBOL(tcp_recvmsg); 2349EXPORT_SYMBOL(tcp_sendmsg); 2350EXPORT_SYMBOL(tcp_sendpage); 2351EXPORT_SYMBOL(tcp_setsockopt); 2352EXPORT_SYMBOL(tcp_shutdown); 2353EXPORT_SYMBOL(tcp_statistics); 2354