tcp_output.c revision 750ea2bafa55aaed208b2583470ecd7122225634
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21/* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37#include <net/tcp.h> 38 39#include <linux/compiler.h> 40#include <linux/gfp.h> 41#include <linux/module.h> 42 43/* People can turn this off for buggy TCP's found in printers etc. */ 44int sysctl_tcp_retrans_collapse __read_mostly = 1; 45 46/* People can turn this on to work with those rare, broken TCPs that 47 * interpret the window field as a signed quantity. 48 */ 49int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 50 51/* This limits the percentage of the congestion window which we 52 * will allow a single TSO frame to consume. Building TSO frames 53 * which are too large can cause TCP streams to be bursty. 54 */ 55int sysctl_tcp_tso_win_divisor __read_mostly = 3; 56 57int sysctl_tcp_mtu_probing __read_mostly = 0; 58int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 59 60/* By default, RFC2861 behavior. */ 61int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 62 63int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ 64EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); 65 66 67/* Account for new data that has been sent to the network. */ 68static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 69{ 70 struct tcp_sock *tp = tcp_sk(sk); 71 unsigned int prior_packets = tp->packets_out; 72 73 tcp_advance_send_head(sk, skb); 74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 75 76 /* Don't override Nagle indefinitely with F-RTO */ 77 if (tp->frto_counter == 2) 78 tp->frto_counter = 3; 79 80 tp->packets_out += tcp_skb_pcount(skb); 81 if (!prior_packets || tp->early_retrans_delayed) 82 tcp_rearm_rto(sk); 83} 84 85/* SND.NXT, if window was not shrunk. 86 * If window has been shrunk, what should we make? It is not clear at all. 87 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 88 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 89 * invalid. OK, let's make this for now: 90 */ 91static inline __u32 tcp_acceptable_seq(const struct sock *sk) 92{ 93 const struct tcp_sock *tp = tcp_sk(sk); 94 95 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 96 return tp->snd_nxt; 97 else 98 return tcp_wnd_end(tp); 99} 100 101/* Calculate mss to advertise in SYN segment. 102 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 103 * 104 * 1. It is independent of path mtu. 105 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 106 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 107 * attached devices, because some buggy hosts are confused by 108 * large MSS. 109 * 4. We do not make 3, we advertise MSS, calculated from first 110 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 111 * This may be overridden via information stored in routing table. 112 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 113 * probably even Jumbo". 114 */ 115static __u16 tcp_advertise_mss(struct sock *sk) 116{ 117 struct tcp_sock *tp = tcp_sk(sk); 118 const struct dst_entry *dst = __sk_dst_get(sk); 119 int mss = tp->advmss; 120 121 if (dst) { 122 unsigned int metric = dst_metric_advmss(dst); 123 124 if (metric < mss) { 125 mss = metric; 126 tp->advmss = mss; 127 } 128 } 129 130 return (__u16)mss; 131} 132 133/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 134 * This is the first part of cwnd validation mechanism. */ 135static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 136{ 137 struct tcp_sock *tp = tcp_sk(sk); 138 s32 delta = tcp_time_stamp - tp->lsndtime; 139 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 140 u32 cwnd = tp->snd_cwnd; 141 142 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 143 144 tp->snd_ssthresh = tcp_current_ssthresh(sk); 145 restart_cwnd = min(restart_cwnd, cwnd); 146 147 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 148 cwnd >>= 1; 149 tp->snd_cwnd = max(cwnd, restart_cwnd); 150 tp->snd_cwnd_stamp = tcp_time_stamp; 151 tp->snd_cwnd_used = 0; 152} 153 154/* Congestion state accounting after a packet has been sent. */ 155static void tcp_event_data_sent(struct tcp_sock *tp, 156 struct sock *sk) 157{ 158 struct inet_connection_sock *icsk = inet_csk(sk); 159 const u32 now = tcp_time_stamp; 160 161 if (sysctl_tcp_slow_start_after_idle && 162 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 163 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 164 165 tp->lsndtime = now; 166 167 /* If it is a reply for ato after last received 168 * packet, enter pingpong mode. 169 */ 170 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 171 icsk->icsk_ack.pingpong = 1; 172} 173 174/* Account for an ACK we sent. */ 175static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 176{ 177 tcp_dec_quickack_mode(sk, pkts); 178 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 179} 180 181/* Determine a window scaling and initial window to offer. 182 * Based on the assumption that the given amount of space 183 * will be offered. Store the results in the tp structure. 184 * NOTE: for smooth operation initial space offering should 185 * be a multiple of mss if possible. We assume here that mss >= 1. 186 * This MUST be enforced by all callers. 187 */ 188void tcp_select_initial_window(int __space, __u32 mss, 189 __u32 *rcv_wnd, __u32 *window_clamp, 190 int wscale_ok, __u8 *rcv_wscale, 191 __u32 init_rcv_wnd) 192{ 193 unsigned int space = (__space < 0 ? 0 : __space); 194 195 /* If no clamp set the clamp to the max possible scaled window */ 196 if (*window_clamp == 0) 197 (*window_clamp) = (65535 << 14); 198 space = min(*window_clamp, space); 199 200 /* Quantize space offering to a multiple of mss if possible. */ 201 if (space > mss) 202 space = (space / mss) * mss; 203 204 /* NOTE: offering an initial window larger than 32767 205 * will break some buggy TCP stacks. If the admin tells us 206 * it is likely we could be speaking with such a buggy stack 207 * we will truncate our initial window offering to 32K-1 208 * unless the remote has sent us a window scaling option, 209 * which we interpret as a sign the remote TCP is not 210 * misinterpreting the window field as a signed quantity. 211 */ 212 if (sysctl_tcp_workaround_signed_windows) 213 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 214 else 215 (*rcv_wnd) = space; 216 217 (*rcv_wscale) = 0; 218 if (wscale_ok) { 219 /* Set window scaling on max possible window 220 * See RFC1323 for an explanation of the limit to 14 221 */ 222 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 223 space = min_t(u32, space, *window_clamp); 224 while (space > 65535 && (*rcv_wscale) < 14) { 225 space >>= 1; 226 (*rcv_wscale)++; 227 } 228 } 229 230 /* Set initial window to a value enough for senders starting with 231 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place 232 * a limit on the initial window when mss is larger than 1460. 233 */ 234 if (mss > (1 << *rcv_wscale)) { 235 int init_cwnd = TCP_DEFAULT_INIT_RCVWND; 236 if (mss > 1460) 237 init_cwnd = 238 max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 239 /* when initializing use the value from init_rcv_wnd 240 * rather than the default from above 241 */ 242 if (init_rcv_wnd) 243 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 244 else 245 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); 246 } 247 248 /* Set the clamp no higher than max representable value */ 249 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 250} 251EXPORT_SYMBOL(tcp_select_initial_window); 252 253/* Chose a new window to advertise, update state in tcp_sock for the 254 * socket, and return result with RFC1323 scaling applied. The return 255 * value can be stuffed directly into th->window for an outgoing 256 * frame. 257 */ 258static u16 tcp_select_window(struct sock *sk) 259{ 260 struct tcp_sock *tp = tcp_sk(sk); 261 u32 cur_win = tcp_receive_window(tp); 262 u32 new_win = __tcp_select_window(sk); 263 264 /* Never shrink the offered window */ 265 if (new_win < cur_win) { 266 /* Danger Will Robinson! 267 * Don't update rcv_wup/rcv_wnd here or else 268 * we will not be able to advertise a zero 269 * window in time. --DaveM 270 * 271 * Relax Will Robinson. 272 */ 273 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 274 } 275 tp->rcv_wnd = new_win; 276 tp->rcv_wup = tp->rcv_nxt; 277 278 /* Make sure we do not exceed the maximum possible 279 * scaled window. 280 */ 281 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 282 new_win = min(new_win, MAX_TCP_WINDOW); 283 else 284 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 285 286 /* RFC1323 scaling applied */ 287 new_win >>= tp->rx_opt.rcv_wscale; 288 289 /* If we advertise zero window, disable fast path. */ 290 if (new_win == 0) 291 tp->pred_flags = 0; 292 293 return new_win; 294} 295 296/* Packet ECN state for a SYN-ACK */ 297static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 298{ 299 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 300 if (!(tp->ecn_flags & TCP_ECN_OK)) 301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 302} 303 304/* Packet ECN state for a SYN. */ 305static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 306{ 307 struct tcp_sock *tp = tcp_sk(sk); 308 309 tp->ecn_flags = 0; 310 if (sysctl_tcp_ecn == 1) { 311 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 312 tp->ecn_flags = TCP_ECN_OK; 313 } 314} 315 316static __inline__ void 317TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 318{ 319 if (inet_rsk(req)->ecn_ok) 320 th->ece = 1; 321} 322 323/* Set up ECN state for a packet on a ESTABLISHED socket that is about to 324 * be sent. 325 */ 326static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 327 int tcp_header_len) 328{ 329 struct tcp_sock *tp = tcp_sk(sk); 330 331 if (tp->ecn_flags & TCP_ECN_OK) { 332 /* Not-retransmitted data segment: set ECT and inject CWR. */ 333 if (skb->len != tcp_header_len && 334 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 335 INET_ECN_xmit(sk); 336 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 337 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 338 tcp_hdr(skb)->cwr = 1; 339 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 340 } 341 } else { 342 /* ACK or retransmitted segment: clear ECT|CE */ 343 INET_ECN_dontxmit(sk); 344 } 345 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 346 tcp_hdr(skb)->ece = 1; 347 } 348} 349 350/* Constructs common control bits of non-data skb. If SYN/FIN is present, 351 * auto increment end seqno. 352 */ 353static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 354{ 355 skb->ip_summed = CHECKSUM_PARTIAL; 356 skb->csum = 0; 357 358 TCP_SKB_CB(skb)->tcp_flags = flags; 359 TCP_SKB_CB(skb)->sacked = 0; 360 361 skb_shinfo(skb)->gso_segs = 1; 362 skb_shinfo(skb)->gso_size = 0; 363 skb_shinfo(skb)->gso_type = 0; 364 365 TCP_SKB_CB(skb)->seq = seq; 366 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 367 seq++; 368 TCP_SKB_CB(skb)->end_seq = seq; 369} 370 371static inline int tcp_urg_mode(const struct tcp_sock *tp) 372{ 373 return tp->snd_una != tp->snd_up; 374} 375 376#define OPTION_SACK_ADVERTISE (1 << 0) 377#define OPTION_TS (1 << 1) 378#define OPTION_MD5 (1 << 2) 379#define OPTION_WSCALE (1 << 3) 380#define OPTION_COOKIE_EXTENSION (1 << 4) 381 382struct tcp_out_options { 383 u8 options; /* bit field of OPTION_* */ 384 u8 ws; /* window scale, 0 to disable */ 385 u8 num_sack_blocks; /* number of SACK blocks to include */ 386 u8 hash_size; /* bytes in hash_location */ 387 u16 mss; /* 0 to disable */ 388 __u32 tsval, tsecr; /* need to include OPTION_TS */ 389 __u8 *hash_location; /* temporary pointer, overloaded */ 390}; 391 392/* The sysctl int routines are generic, so check consistency here. 393 */ 394static u8 tcp_cookie_size_check(u8 desired) 395{ 396 int cookie_size; 397 398 if (desired > 0) 399 /* previously specified */ 400 return desired; 401 402 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); 403 if (cookie_size <= 0) 404 /* no default specified */ 405 return 0; 406 407 if (cookie_size <= TCP_COOKIE_MIN) 408 /* value too small, specify minimum */ 409 return TCP_COOKIE_MIN; 410 411 if (cookie_size >= TCP_COOKIE_MAX) 412 /* value too large, specify maximum */ 413 return TCP_COOKIE_MAX; 414 415 if (cookie_size & 1) 416 /* 8-bit multiple, illegal, fix it */ 417 cookie_size++; 418 419 return (u8)cookie_size; 420} 421 422/* Write previously computed TCP options to the packet. 423 * 424 * Beware: Something in the Internet is very sensitive to the ordering of 425 * TCP options, we learned this through the hard way, so be careful here. 426 * Luckily we can at least blame others for their non-compliance but from 427 * inter-operatibility perspective it seems that we're somewhat stuck with 428 * the ordering which we have been using if we want to keep working with 429 * those broken things (not that it currently hurts anybody as there isn't 430 * particular reason why the ordering would need to be changed). 431 * 432 * At least SACK_PERM as the first option is known to lead to a disaster 433 * (but it may well be that other scenarios fail similarly). 434 */ 435static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 436 struct tcp_out_options *opts) 437{ 438 u8 options = opts->options; /* mungable copy */ 439 440 /* Having both authentication and cookies for security is redundant, 441 * and there's certainly not enough room. Instead, the cookie-less 442 * extension variant is proposed. 443 * 444 * Consider the pessimal case with authentication. The options 445 * could look like: 446 * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 447 */ 448 if (unlikely(OPTION_MD5 & options)) { 449 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 450 *ptr++ = htonl((TCPOPT_COOKIE << 24) | 451 (TCPOLEN_COOKIE_BASE << 16) | 452 (TCPOPT_MD5SIG << 8) | 453 TCPOLEN_MD5SIG); 454 } else { 455 *ptr++ = htonl((TCPOPT_NOP << 24) | 456 (TCPOPT_NOP << 16) | 457 (TCPOPT_MD5SIG << 8) | 458 TCPOLEN_MD5SIG); 459 } 460 options &= ~OPTION_COOKIE_EXTENSION; 461 /* overload cookie hash location */ 462 opts->hash_location = (__u8 *)ptr; 463 ptr += 4; 464 } 465 466 if (unlikely(opts->mss)) { 467 *ptr++ = htonl((TCPOPT_MSS << 24) | 468 (TCPOLEN_MSS << 16) | 469 opts->mss); 470 } 471 472 if (likely(OPTION_TS & options)) { 473 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 474 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 475 (TCPOLEN_SACK_PERM << 16) | 476 (TCPOPT_TIMESTAMP << 8) | 477 TCPOLEN_TIMESTAMP); 478 options &= ~OPTION_SACK_ADVERTISE; 479 } else { 480 *ptr++ = htonl((TCPOPT_NOP << 24) | 481 (TCPOPT_NOP << 16) | 482 (TCPOPT_TIMESTAMP << 8) | 483 TCPOLEN_TIMESTAMP); 484 } 485 *ptr++ = htonl(opts->tsval); 486 *ptr++ = htonl(opts->tsecr); 487 } 488 489 /* Specification requires after timestamp, so do it now. 490 * 491 * Consider the pessimal case without authentication. The options 492 * could look like: 493 * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 494 */ 495 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 496 __u8 *cookie_copy = opts->hash_location; 497 u8 cookie_size = opts->hash_size; 498 499 /* 8-bit multiple handled in tcp_cookie_size_check() above, 500 * and elsewhere. 501 */ 502 if (0x2 & cookie_size) { 503 __u8 *p = (__u8 *)ptr; 504 505 /* 16-bit multiple */ 506 *p++ = TCPOPT_COOKIE; 507 *p++ = TCPOLEN_COOKIE_BASE + cookie_size; 508 *p++ = *cookie_copy++; 509 *p++ = *cookie_copy++; 510 ptr++; 511 cookie_size -= 2; 512 } else { 513 /* 32-bit multiple */ 514 *ptr++ = htonl(((TCPOPT_NOP << 24) | 515 (TCPOPT_NOP << 16) | 516 (TCPOPT_COOKIE << 8) | 517 TCPOLEN_COOKIE_BASE) + 518 cookie_size); 519 } 520 521 if (cookie_size > 0) { 522 memcpy(ptr, cookie_copy, cookie_size); 523 ptr += (cookie_size / 4); 524 } 525 } 526 527 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 528 *ptr++ = htonl((TCPOPT_NOP << 24) | 529 (TCPOPT_NOP << 16) | 530 (TCPOPT_SACK_PERM << 8) | 531 TCPOLEN_SACK_PERM); 532 } 533 534 if (unlikely(OPTION_WSCALE & options)) { 535 *ptr++ = htonl((TCPOPT_NOP << 24) | 536 (TCPOPT_WINDOW << 16) | 537 (TCPOLEN_WINDOW << 8) | 538 opts->ws); 539 } 540 541 if (unlikely(opts->num_sack_blocks)) { 542 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 543 tp->duplicate_sack : tp->selective_acks; 544 int this_sack; 545 546 *ptr++ = htonl((TCPOPT_NOP << 24) | 547 (TCPOPT_NOP << 16) | 548 (TCPOPT_SACK << 8) | 549 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 550 TCPOLEN_SACK_PERBLOCK))); 551 552 for (this_sack = 0; this_sack < opts->num_sack_blocks; 553 ++this_sack) { 554 *ptr++ = htonl(sp[this_sack].start_seq); 555 *ptr++ = htonl(sp[this_sack].end_seq); 556 } 557 558 tp->rx_opt.dsack = 0; 559 } 560} 561 562/* Compute TCP options for SYN packets. This is not the final 563 * network wire format yet. 564 */ 565static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 566 struct tcp_out_options *opts, 567 struct tcp_md5sig_key **md5) 568{ 569 struct tcp_sock *tp = tcp_sk(sk); 570 struct tcp_cookie_values *cvp = tp->cookie_values; 571 unsigned int remaining = MAX_TCP_OPTION_SPACE; 572 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 573 tcp_cookie_size_check(cvp->cookie_desired) : 574 0; 575 576#ifdef CONFIG_TCP_MD5SIG 577 *md5 = tp->af_specific->md5_lookup(sk, sk); 578 if (*md5) { 579 opts->options |= OPTION_MD5; 580 remaining -= TCPOLEN_MD5SIG_ALIGNED; 581 } 582#else 583 *md5 = NULL; 584#endif 585 586 /* We always get an MSS option. The option bytes which will be seen in 587 * normal data packets should timestamps be used, must be in the MSS 588 * advertised. But we subtract them from tp->mss_cache so that 589 * calculations in tcp_sendmsg are simpler etc. So account for this 590 * fact here if necessary. If we don't do this correctly, as a 591 * receiver we won't recognize data packets as being full sized when we 592 * should, and thus we won't abide by the delayed ACK rules correctly. 593 * SACKs don't matter, we never delay an ACK when we have any of those 594 * going out. */ 595 opts->mss = tcp_advertise_mss(sk); 596 remaining -= TCPOLEN_MSS_ALIGNED; 597 598 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 599 opts->options |= OPTION_TS; 600 opts->tsval = TCP_SKB_CB(skb)->when; 601 opts->tsecr = tp->rx_opt.ts_recent; 602 remaining -= TCPOLEN_TSTAMP_ALIGNED; 603 } 604 if (likely(sysctl_tcp_window_scaling)) { 605 opts->ws = tp->rx_opt.rcv_wscale; 606 opts->options |= OPTION_WSCALE; 607 remaining -= TCPOLEN_WSCALE_ALIGNED; 608 } 609 if (likely(sysctl_tcp_sack)) { 610 opts->options |= OPTION_SACK_ADVERTISE; 611 if (unlikely(!(OPTION_TS & opts->options))) 612 remaining -= TCPOLEN_SACKPERM_ALIGNED; 613 } 614 615 /* Note that timestamps are required by the specification. 616 * 617 * Odd numbers of bytes are prohibited by the specification, ensuring 618 * that the cookie is 16-bit aligned, and the resulting cookie pair is 619 * 32-bit aligned. 620 */ 621 if (*md5 == NULL && 622 (OPTION_TS & opts->options) && 623 cookie_size > 0) { 624 int need = TCPOLEN_COOKIE_BASE + cookie_size; 625 626 if (0x2 & need) { 627 /* 32-bit multiple */ 628 need += 2; /* NOPs */ 629 630 if (need > remaining) { 631 /* try shrinking cookie to fit */ 632 cookie_size -= 2; 633 need -= 4; 634 } 635 } 636 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { 637 cookie_size -= 4; 638 need -= 4; 639 } 640 if (TCP_COOKIE_MIN <= cookie_size) { 641 opts->options |= OPTION_COOKIE_EXTENSION; 642 opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; 643 opts->hash_size = cookie_size; 644 645 /* Remember for future incarnations. */ 646 cvp->cookie_desired = cookie_size; 647 648 if (cvp->cookie_desired != cvp->cookie_pair_size) { 649 /* Currently use random bytes as a nonce, 650 * assuming these are completely unpredictable 651 * by hostile users of the same system. 652 */ 653 get_random_bytes(&cvp->cookie_pair[0], 654 cookie_size); 655 cvp->cookie_pair_size = cookie_size; 656 } 657 658 remaining -= need; 659 } 660 } 661 return MAX_TCP_OPTION_SPACE - remaining; 662} 663 664/* Set up TCP options for SYN-ACKs. */ 665static unsigned int tcp_synack_options(struct sock *sk, 666 struct request_sock *req, 667 unsigned int mss, struct sk_buff *skb, 668 struct tcp_out_options *opts, 669 struct tcp_md5sig_key **md5, 670 struct tcp_extend_values *xvp) 671{ 672 struct inet_request_sock *ireq = inet_rsk(req); 673 unsigned int remaining = MAX_TCP_OPTION_SPACE; 674 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? 675 xvp->cookie_plus : 676 0; 677 678#ifdef CONFIG_TCP_MD5SIG 679 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 680 if (*md5) { 681 opts->options |= OPTION_MD5; 682 remaining -= TCPOLEN_MD5SIG_ALIGNED; 683 684 /* We can't fit any SACK blocks in a packet with MD5 + TS 685 * options. There was discussion about disabling SACK 686 * rather than TS in order to fit in better with old, 687 * buggy kernels, but that was deemed to be unnecessary. 688 */ 689 ireq->tstamp_ok &= !ireq->sack_ok; 690 } 691#else 692 *md5 = NULL; 693#endif 694 695 /* We always send an MSS option. */ 696 opts->mss = mss; 697 remaining -= TCPOLEN_MSS_ALIGNED; 698 699 if (likely(ireq->wscale_ok)) { 700 opts->ws = ireq->rcv_wscale; 701 opts->options |= OPTION_WSCALE; 702 remaining -= TCPOLEN_WSCALE_ALIGNED; 703 } 704 if (likely(ireq->tstamp_ok)) { 705 opts->options |= OPTION_TS; 706 opts->tsval = TCP_SKB_CB(skb)->when; 707 opts->tsecr = req->ts_recent; 708 remaining -= TCPOLEN_TSTAMP_ALIGNED; 709 } 710 if (likely(ireq->sack_ok)) { 711 opts->options |= OPTION_SACK_ADVERTISE; 712 if (unlikely(!ireq->tstamp_ok)) 713 remaining -= TCPOLEN_SACKPERM_ALIGNED; 714 } 715 716 /* Similar rationale to tcp_syn_options() applies here, too. 717 * If the <SYN> options fit, the same options should fit now! 718 */ 719 if (*md5 == NULL && 720 ireq->tstamp_ok && 721 cookie_plus > TCPOLEN_COOKIE_BASE) { 722 int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ 723 724 if (0x2 & need) { 725 /* 32-bit multiple */ 726 need += 2; /* NOPs */ 727 } 728 if (need <= remaining) { 729 opts->options |= OPTION_COOKIE_EXTENSION; 730 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; 731 remaining -= need; 732 } else { 733 /* There's no error return, so flag it. */ 734 xvp->cookie_out_never = 1; /* true */ 735 opts->hash_size = 0; 736 } 737 } 738 return MAX_TCP_OPTION_SPACE - remaining; 739} 740 741/* Compute TCP options for ESTABLISHED sockets. This is not the 742 * final wire format yet. 743 */ 744static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 745 struct tcp_out_options *opts, 746 struct tcp_md5sig_key **md5) 747{ 748 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 749 struct tcp_sock *tp = tcp_sk(sk); 750 unsigned int size = 0; 751 unsigned int eff_sacks; 752 753#ifdef CONFIG_TCP_MD5SIG 754 *md5 = tp->af_specific->md5_lookup(sk, sk); 755 if (unlikely(*md5)) { 756 opts->options |= OPTION_MD5; 757 size += TCPOLEN_MD5SIG_ALIGNED; 758 } 759#else 760 *md5 = NULL; 761#endif 762 763 if (likely(tp->rx_opt.tstamp_ok)) { 764 opts->options |= OPTION_TS; 765 opts->tsval = tcb ? tcb->when : 0; 766 opts->tsecr = tp->rx_opt.ts_recent; 767 size += TCPOLEN_TSTAMP_ALIGNED; 768 } 769 770 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 771 if (unlikely(eff_sacks)) { 772 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 773 opts->num_sack_blocks = 774 min_t(unsigned int, eff_sacks, 775 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 776 TCPOLEN_SACK_PERBLOCK); 777 size += TCPOLEN_SACK_BASE_ALIGNED + 778 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 779 } 780 781 return size; 782} 783 784/* This routine actually transmits TCP packets queued in by 785 * tcp_do_sendmsg(). This is used by both the initial 786 * transmission and possible later retransmissions. 787 * All SKB's seen here are completely headerless. It is our 788 * job to build the TCP header, and pass the packet down to 789 * IP so it can do the same plus pass the packet off to the 790 * device. 791 * 792 * We are working here with either a clone of the original 793 * SKB, or a fresh unique copy made by the retransmit engine. 794 */ 795static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 796 gfp_t gfp_mask) 797{ 798 const struct inet_connection_sock *icsk = inet_csk(sk); 799 struct inet_sock *inet; 800 struct tcp_sock *tp; 801 struct tcp_skb_cb *tcb; 802 struct tcp_out_options opts; 803 unsigned int tcp_options_size, tcp_header_size; 804 struct tcp_md5sig_key *md5; 805 struct tcphdr *th; 806 int err; 807 808 BUG_ON(!skb || !tcp_skb_pcount(skb)); 809 810 /* If congestion control is doing timestamping, we must 811 * take such a timestamp before we potentially clone/copy. 812 */ 813 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 814 __net_timestamp(skb); 815 816 if (likely(clone_it)) { 817 if (unlikely(skb_cloned(skb))) 818 skb = pskb_copy(skb, gfp_mask); 819 else 820 skb = skb_clone(skb, gfp_mask); 821 if (unlikely(!skb)) 822 return -ENOBUFS; 823 } 824 825 inet = inet_sk(sk); 826 tp = tcp_sk(sk); 827 tcb = TCP_SKB_CB(skb); 828 memset(&opts, 0, sizeof(opts)); 829 830 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 831 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 832 else 833 tcp_options_size = tcp_established_options(sk, skb, &opts, 834 &md5); 835 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 836 837 if (tcp_packets_in_flight(tp) == 0) { 838 tcp_ca_event(sk, CA_EVENT_TX_START); 839 skb->ooo_okay = 1; 840 } else 841 skb->ooo_okay = 0; 842 843 skb_push(skb, tcp_header_size); 844 skb_reset_transport_header(skb); 845 skb_set_owner_w(skb, sk); 846 847 /* Build TCP header and checksum it. */ 848 th = tcp_hdr(skb); 849 th->source = inet->inet_sport; 850 th->dest = inet->inet_dport; 851 th->seq = htonl(tcb->seq); 852 th->ack_seq = htonl(tp->rcv_nxt); 853 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 854 tcb->tcp_flags); 855 856 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 857 /* RFC1323: The window in SYN & SYN/ACK segments 858 * is never scaled. 859 */ 860 th->window = htons(min(tp->rcv_wnd, 65535U)); 861 } else { 862 th->window = htons(tcp_select_window(sk)); 863 } 864 th->check = 0; 865 th->urg_ptr = 0; 866 867 /* The urg_mode check is necessary during a below snd_una win probe */ 868 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 869 if (before(tp->snd_up, tcb->seq + 0x10000)) { 870 th->urg_ptr = htons(tp->snd_up - tcb->seq); 871 th->urg = 1; 872 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 873 th->urg_ptr = htons(0xFFFF); 874 th->urg = 1; 875 } 876 } 877 878 tcp_options_write((__be32 *)(th + 1), tp, &opts); 879 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 880 TCP_ECN_send(sk, skb, tcp_header_size); 881 882#ifdef CONFIG_TCP_MD5SIG 883 /* Calculate the MD5 hash, as we have all we need now */ 884 if (md5) { 885 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 886 tp->af_specific->calc_md5_hash(opts.hash_location, 887 md5, sk, NULL, skb); 888 } 889#endif 890 891 icsk->icsk_af_ops->send_check(sk, skb); 892 893 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 894 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 895 896 if (skb->len != tcp_header_size) 897 tcp_event_data_sent(tp, sk); 898 899 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 900 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 901 tcp_skb_pcount(skb)); 902 903 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 904 if (likely(err <= 0)) 905 return err; 906 907 tcp_enter_cwr(sk, 1); 908 909 return net_xmit_eval(err); 910} 911 912/* This routine just queues the buffer for sending. 913 * 914 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 915 * otherwise socket can stall. 916 */ 917static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 918{ 919 struct tcp_sock *tp = tcp_sk(sk); 920 921 /* Advance write_seq and place onto the write_queue. */ 922 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 923 skb_header_release(skb); 924 tcp_add_write_queue_tail(sk, skb); 925 sk->sk_wmem_queued += skb->truesize; 926 sk_mem_charge(sk, skb->truesize); 927} 928 929/* Initialize TSO segments for a packet. */ 930static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 931 unsigned int mss_now) 932{ 933 if (skb->len <= mss_now || !sk_can_gso(sk) || 934 skb->ip_summed == CHECKSUM_NONE) { 935 /* Avoid the costly divide in the normal 936 * non-TSO case. 937 */ 938 skb_shinfo(skb)->gso_segs = 1; 939 skb_shinfo(skb)->gso_size = 0; 940 skb_shinfo(skb)->gso_type = 0; 941 } else { 942 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 943 skb_shinfo(skb)->gso_size = mss_now; 944 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 945 } 946} 947 948/* When a modification to fackets out becomes necessary, we need to check 949 * skb is counted to fackets_out or not. 950 */ 951static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 952 int decr) 953{ 954 struct tcp_sock *tp = tcp_sk(sk); 955 956 if (!tp->sacked_out || tcp_is_reno(tp)) 957 return; 958 959 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 960 tp->fackets_out -= decr; 961} 962 963/* Pcount in the middle of the write queue got changed, we need to do various 964 * tweaks to fix counters 965 */ 966static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 967{ 968 struct tcp_sock *tp = tcp_sk(sk); 969 970 tp->packets_out -= decr; 971 972 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 973 tp->sacked_out -= decr; 974 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 975 tp->retrans_out -= decr; 976 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 977 tp->lost_out -= decr; 978 979 /* Reno case is special. Sigh... */ 980 if (tcp_is_reno(tp) && decr > 0) 981 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 982 983 tcp_adjust_fackets_out(sk, skb, decr); 984 985 if (tp->lost_skb_hint && 986 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 987 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 988 tp->lost_cnt_hint -= decr; 989 990 tcp_verify_left_out(tp); 991} 992 993/* Function to create two new TCP segments. Shrinks the given segment 994 * to the specified size and appends a new segment with the rest of the 995 * packet to the list. This won't be called frequently, I hope. 996 * Remember, these are still headerless SKBs at this point. 997 */ 998int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 999 unsigned int mss_now) 1000{ 1001 struct tcp_sock *tp = tcp_sk(sk); 1002 struct sk_buff *buff; 1003 int nsize, old_factor; 1004 int nlen; 1005 u8 flags; 1006 1007 if (WARN_ON(len > skb->len)) 1008 return -EINVAL; 1009 1010 nsize = skb_headlen(skb) - len; 1011 if (nsize < 0) 1012 nsize = 0; 1013 1014 if (skb_cloned(skb) && 1015 skb_is_nonlinear(skb) && 1016 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1017 return -ENOMEM; 1018 1019 /* Get a new skb... force flag on. */ 1020 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1021 if (buff == NULL) 1022 return -ENOMEM; /* We'll just try again later. */ 1023 1024 sk->sk_wmem_queued += buff->truesize; 1025 sk_mem_charge(sk, buff->truesize); 1026 nlen = skb->len - len - nsize; 1027 buff->truesize += nlen; 1028 skb->truesize -= nlen; 1029 1030 /* Correct the sequence numbers. */ 1031 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1032 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1033 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1034 1035 /* PSH and FIN should only be set in the second packet. */ 1036 flags = TCP_SKB_CB(skb)->tcp_flags; 1037 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1038 TCP_SKB_CB(buff)->tcp_flags = flags; 1039 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1040 1041 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1042 /* Copy and checksum data tail into the new buffer. */ 1043 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1044 skb_put(buff, nsize), 1045 nsize, 0); 1046 1047 skb_trim(skb, len); 1048 1049 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1050 } else { 1051 skb->ip_summed = CHECKSUM_PARTIAL; 1052 skb_split(skb, buff, len); 1053 } 1054 1055 buff->ip_summed = skb->ip_summed; 1056 1057 /* Looks stupid, but our code really uses when of 1058 * skbs, which it never sent before. --ANK 1059 */ 1060 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1061 buff->tstamp = skb->tstamp; 1062 1063 old_factor = tcp_skb_pcount(skb); 1064 1065 /* Fix up tso_factor for both original and new SKB. */ 1066 tcp_set_skb_tso_segs(sk, skb, mss_now); 1067 tcp_set_skb_tso_segs(sk, buff, mss_now); 1068 1069 /* If this packet has been sent out already, we must 1070 * adjust the various packet counters. 1071 */ 1072 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1073 int diff = old_factor - tcp_skb_pcount(skb) - 1074 tcp_skb_pcount(buff); 1075 1076 if (diff) 1077 tcp_adjust_pcount(sk, skb, diff); 1078 } 1079 1080 /* Link BUFF into the send queue. */ 1081 skb_header_release(buff); 1082 tcp_insert_write_queue_after(skb, buff, sk); 1083 1084 return 0; 1085} 1086 1087/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1088 * eventually). The difference is that pulled data not copied, but 1089 * immediately discarded. 1090 */ 1091static void __pskb_trim_head(struct sk_buff *skb, int len) 1092{ 1093 int i, k, eat; 1094 1095 eat = min_t(int, len, skb_headlen(skb)); 1096 if (eat) { 1097 __skb_pull(skb, eat); 1098 skb->avail_size -= eat; 1099 len -= eat; 1100 if (!len) 1101 return; 1102 } 1103 eat = len; 1104 k = 0; 1105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1106 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1107 1108 if (size <= eat) { 1109 skb_frag_unref(skb, i); 1110 eat -= size; 1111 } else { 1112 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1113 if (eat) { 1114 skb_shinfo(skb)->frags[k].page_offset += eat; 1115 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1116 eat = 0; 1117 } 1118 k++; 1119 } 1120 } 1121 skb_shinfo(skb)->nr_frags = k; 1122 1123 skb_reset_tail_pointer(skb); 1124 skb->data_len -= len; 1125 skb->len = skb->data_len; 1126} 1127 1128/* Remove acked data from a packet in the transmit queue. */ 1129int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1130{ 1131 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1132 return -ENOMEM; 1133 1134 __pskb_trim_head(skb, len); 1135 1136 TCP_SKB_CB(skb)->seq += len; 1137 skb->ip_summed = CHECKSUM_PARTIAL; 1138 1139 skb->truesize -= len; 1140 sk->sk_wmem_queued -= len; 1141 sk_mem_uncharge(sk, len); 1142 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1143 1144 /* Any change of skb->len requires recalculation of tso factor. */ 1145 if (tcp_skb_pcount(skb) > 1) 1146 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); 1147 1148 return 0; 1149} 1150 1151/* Calculate MSS. Not accounting for SACKs here. */ 1152int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1153{ 1154 const struct tcp_sock *tp = tcp_sk(sk); 1155 const struct inet_connection_sock *icsk = inet_csk(sk); 1156 int mss_now; 1157 1158 /* Calculate base mss without TCP options: 1159 It is MMS_S - sizeof(tcphdr) of rfc1122 1160 */ 1161 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1162 1163 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1164 if (icsk->icsk_af_ops->net_frag_header_len) { 1165 const struct dst_entry *dst = __sk_dst_get(sk); 1166 1167 if (dst && dst_allfrag(dst)) 1168 mss_now -= icsk->icsk_af_ops->net_frag_header_len; 1169 } 1170 1171 /* Clamp it (mss_clamp does not include tcp options) */ 1172 if (mss_now > tp->rx_opt.mss_clamp) 1173 mss_now = tp->rx_opt.mss_clamp; 1174 1175 /* Now subtract optional transport overhead */ 1176 mss_now -= icsk->icsk_ext_hdr_len; 1177 1178 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1179 if (mss_now < 48) 1180 mss_now = 48; 1181 1182 /* Now subtract TCP options size, not including SACKs */ 1183 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 1184 1185 return mss_now; 1186} 1187 1188/* Inverse of above */ 1189int tcp_mss_to_mtu(struct sock *sk, int mss) 1190{ 1191 const struct tcp_sock *tp = tcp_sk(sk); 1192 const struct inet_connection_sock *icsk = inet_csk(sk); 1193 int mtu; 1194 1195 mtu = mss + 1196 tp->tcp_header_len + 1197 icsk->icsk_ext_hdr_len + 1198 icsk->icsk_af_ops->net_header_len; 1199 1200 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1201 if (icsk->icsk_af_ops->net_frag_header_len) { 1202 const struct dst_entry *dst = __sk_dst_get(sk); 1203 1204 if (dst && dst_allfrag(dst)) 1205 mtu += icsk->icsk_af_ops->net_frag_header_len; 1206 } 1207 return mtu; 1208} 1209 1210/* MTU probing init per socket */ 1211void tcp_mtup_init(struct sock *sk) 1212{ 1213 struct tcp_sock *tp = tcp_sk(sk); 1214 struct inet_connection_sock *icsk = inet_csk(sk); 1215 1216 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1217 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1218 icsk->icsk_af_ops->net_header_len; 1219 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1220 icsk->icsk_mtup.probe_size = 0; 1221} 1222EXPORT_SYMBOL(tcp_mtup_init); 1223 1224/* This function synchronize snd mss to current pmtu/exthdr set. 1225 1226 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1227 for TCP options, but includes only bare TCP header. 1228 1229 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1230 It is minimum of user_mss and mss received with SYN. 1231 It also does not include TCP options. 1232 1233 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1234 1235 tp->mss_cache is current effective sending mss, including 1236 all tcp options except for SACKs. It is evaluated, 1237 taking into account current pmtu, but never exceeds 1238 tp->rx_opt.mss_clamp. 1239 1240 NOTE1. rfc1122 clearly states that advertised MSS 1241 DOES NOT include either tcp or ip options. 1242 1243 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1244 are READ ONLY outside this function. --ANK (980731) 1245 */ 1246unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1247{ 1248 struct tcp_sock *tp = tcp_sk(sk); 1249 struct inet_connection_sock *icsk = inet_csk(sk); 1250 int mss_now; 1251 1252 if (icsk->icsk_mtup.search_high > pmtu) 1253 icsk->icsk_mtup.search_high = pmtu; 1254 1255 mss_now = tcp_mtu_to_mss(sk, pmtu); 1256 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1257 1258 /* And store cached results */ 1259 icsk->icsk_pmtu_cookie = pmtu; 1260 if (icsk->icsk_mtup.enabled) 1261 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1262 tp->mss_cache = mss_now; 1263 1264 return mss_now; 1265} 1266EXPORT_SYMBOL(tcp_sync_mss); 1267 1268/* Compute the current effective MSS, taking SACKs and IP options, 1269 * and even PMTU discovery events into account. 1270 */ 1271unsigned int tcp_current_mss(struct sock *sk) 1272{ 1273 const struct tcp_sock *tp = tcp_sk(sk); 1274 const struct dst_entry *dst = __sk_dst_get(sk); 1275 u32 mss_now; 1276 unsigned int header_len; 1277 struct tcp_out_options opts; 1278 struct tcp_md5sig_key *md5; 1279 1280 mss_now = tp->mss_cache; 1281 1282 if (dst) { 1283 u32 mtu = dst_mtu(dst); 1284 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1285 mss_now = tcp_sync_mss(sk, mtu); 1286 } 1287 1288 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1289 sizeof(struct tcphdr); 1290 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1291 * some common options. If this is an odd packet (because we have SACK 1292 * blocks etc) then our calculated header_len will be different, and 1293 * we have to adjust mss_now correspondingly */ 1294 if (header_len != tp->tcp_header_len) { 1295 int delta = (int) header_len - tp->tcp_header_len; 1296 mss_now -= delta; 1297 } 1298 1299 return mss_now; 1300} 1301 1302/* Congestion window validation. (RFC2861) */ 1303static void tcp_cwnd_validate(struct sock *sk) 1304{ 1305 struct tcp_sock *tp = tcp_sk(sk); 1306 1307 if (tp->packets_out >= tp->snd_cwnd) { 1308 /* Network is feed fully. */ 1309 tp->snd_cwnd_used = 0; 1310 tp->snd_cwnd_stamp = tcp_time_stamp; 1311 } else { 1312 /* Network starves. */ 1313 if (tp->packets_out > tp->snd_cwnd_used) 1314 tp->snd_cwnd_used = tp->packets_out; 1315 1316 if (sysctl_tcp_slow_start_after_idle && 1317 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1318 tcp_cwnd_application_limited(sk); 1319 } 1320} 1321 1322/* Returns the portion of skb which can be sent right away without 1323 * introducing MSS oddities to segment boundaries. In rare cases where 1324 * mss_now != mss_cache, we will request caller to create a small skb 1325 * per input skb which could be mostly avoided here (if desired). 1326 * 1327 * We explicitly want to create a request for splitting write queue tail 1328 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1329 * thus all the complexity (cwnd_len is always MSS multiple which we 1330 * return whenever allowed by the other factors). Basically we need the 1331 * modulo only when the receiver window alone is the limiting factor or 1332 * when we would be allowed to send the split-due-to-Nagle skb fully. 1333 */ 1334static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1335 unsigned int mss_now, unsigned int cwnd) 1336{ 1337 const struct tcp_sock *tp = tcp_sk(sk); 1338 u32 needed, window, cwnd_len; 1339 1340 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1341 cwnd_len = mss_now * cwnd; 1342 1343 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1344 return cwnd_len; 1345 1346 needed = min(skb->len, window); 1347 1348 if (cwnd_len <= needed) 1349 return cwnd_len; 1350 1351 return needed - needed % mss_now; 1352} 1353 1354/* Can at least one segment of SKB be sent right now, according to the 1355 * congestion window rules? If so, return how many segments are allowed. 1356 */ 1357static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1358 const struct sk_buff *skb) 1359{ 1360 u32 in_flight, cwnd; 1361 1362 /* Don't be strict about the congestion window for the final FIN. */ 1363 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 1364 tcp_skb_pcount(skb) == 1) 1365 return 1; 1366 1367 in_flight = tcp_packets_in_flight(tp); 1368 cwnd = tp->snd_cwnd; 1369 if (in_flight < cwnd) 1370 return (cwnd - in_flight); 1371 1372 return 0; 1373} 1374 1375/* Initialize TSO state of a skb. 1376 * This must be invoked the first time we consider transmitting 1377 * SKB onto the wire. 1378 */ 1379static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1380 unsigned int mss_now) 1381{ 1382 int tso_segs = tcp_skb_pcount(skb); 1383 1384 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1385 tcp_set_skb_tso_segs(sk, skb, mss_now); 1386 tso_segs = tcp_skb_pcount(skb); 1387 } 1388 return tso_segs; 1389} 1390 1391/* Minshall's variant of the Nagle send check. */ 1392static inline int tcp_minshall_check(const struct tcp_sock *tp) 1393{ 1394 return after(tp->snd_sml, tp->snd_una) && 1395 !after(tp->snd_sml, tp->snd_nxt); 1396} 1397 1398/* Return 0, if packet can be sent now without violation Nagle's rules: 1399 * 1. It is full sized. 1400 * 2. Or it contains FIN. (already checked by caller) 1401 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1402 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1403 * With Minshall's modification: all sent small packets are ACKed. 1404 */ 1405static inline int tcp_nagle_check(const struct tcp_sock *tp, 1406 const struct sk_buff *skb, 1407 unsigned int mss_now, int nonagle) 1408{ 1409 return skb->len < mss_now && 1410 ((nonagle & TCP_NAGLE_CORK) || 1411 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1412} 1413 1414/* Return non-zero if the Nagle test allows this packet to be 1415 * sent now. 1416 */ 1417static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1418 unsigned int cur_mss, int nonagle) 1419{ 1420 /* Nagle rule does not apply to frames, which sit in the middle of the 1421 * write_queue (they have no chances to get new data). 1422 * 1423 * This is implemented in the callers, where they modify the 'nonagle' 1424 * argument based upon the location of SKB in the send queue. 1425 */ 1426 if (nonagle & TCP_NAGLE_PUSH) 1427 return 1; 1428 1429 /* Don't use the nagle rule for urgent data (or for the final FIN). 1430 * Nagle can be ignored during F-RTO too (see RFC4138). 1431 */ 1432 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1433 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1434 return 1; 1435 1436 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1437 return 1; 1438 1439 return 0; 1440} 1441 1442/* Does at least the first segment of SKB fit into the send window? */ 1443static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1444 unsigned int cur_mss) 1445{ 1446 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1447 1448 if (skb->len > cur_mss) 1449 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1450 1451 return !after(end_seq, tcp_wnd_end(tp)); 1452} 1453 1454/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1455 * should be put on the wire right now. If so, it returns the number of 1456 * packets allowed by the congestion window. 1457 */ 1458static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1459 unsigned int cur_mss, int nonagle) 1460{ 1461 const struct tcp_sock *tp = tcp_sk(sk); 1462 unsigned int cwnd_quota; 1463 1464 tcp_init_tso_segs(sk, skb, cur_mss); 1465 1466 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1467 return 0; 1468 1469 cwnd_quota = tcp_cwnd_test(tp, skb); 1470 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1471 cwnd_quota = 0; 1472 1473 return cwnd_quota; 1474} 1475 1476/* Test if sending is allowed right now. */ 1477int tcp_may_send_now(struct sock *sk) 1478{ 1479 const struct tcp_sock *tp = tcp_sk(sk); 1480 struct sk_buff *skb = tcp_send_head(sk); 1481 1482 return skb && 1483 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1484 (tcp_skb_is_last(sk, skb) ? 1485 tp->nonagle : TCP_NAGLE_PUSH)); 1486} 1487 1488/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1489 * which is put after SKB on the list. It is very much like 1490 * tcp_fragment() except that it may make several kinds of assumptions 1491 * in order to speed up the splitting operation. In particular, we 1492 * know that all the data is in scatter-gather pages, and that the 1493 * packet has never been sent out before (and thus is not cloned). 1494 */ 1495static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1496 unsigned int mss_now, gfp_t gfp) 1497{ 1498 struct sk_buff *buff; 1499 int nlen = skb->len - len; 1500 u8 flags; 1501 1502 /* All of a TSO frame must be composed of paged data. */ 1503 if (skb->len != skb->data_len) 1504 return tcp_fragment(sk, skb, len, mss_now); 1505 1506 buff = sk_stream_alloc_skb(sk, 0, gfp); 1507 if (unlikely(buff == NULL)) 1508 return -ENOMEM; 1509 1510 sk->sk_wmem_queued += buff->truesize; 1511 sk_mem_charge(sk, buff->truesize); 1512 buff->truesize += nlen; 1513 skb->truesize -= nlen; 1514 1515 /* Correct the sequence numbers. */ 1516 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1517 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1518 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1519 1520 /* PSH and FIN should only be set in the second packet. */ 1521 flags = TCP_SKB_CB(skb)->tcp_flags; 1522 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1523 TCP_SKB_CB(buff)->tcp_flags = flags; 1524 1525 /* This packet was never sent out yet, so no SACK bits. */ 1526 TCP_SKB_CB(buff)->sacked = 0; 1527 1528 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1529 skb_split(skb, buff, len); 1530 1531 /* Fix up tso_factor for both original and new SKB. */ 1532 tcp_set_skb_tso_segs(sk, skb, mss_now); 1533 tcp_set_skb_tso_segs(sk, buff, mss_now); 1534 1535 /* Link BUFF into the send queue. */ 1536 skb_header_release(buff); 1537 tcp_insert_write_queue_after(skb, buff, sk); 1538 1539 return 0; 1540} 1541 1542/* Try to defer sending, if possible, in order to minimize the amount 1543 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1544 * 1545 * This algorithm is from John Heffner. 1546 */ 1547static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1548{ 1549 struct tcp_sock *tp = tcp_sk(sk); 1550 const struct inet_connection_sock *icsk = inet_csk(sk); 1551 u32 send_win, cong_win, limit, in_flight; 1552 int win_divisor; 1553 1554 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1555 goto send_now; 1556 1557 if (icsk->icsk_ca_state != TCP_CA_Open) 1558 goto send_now; 1559 1560 /* Defer for less than two clock ticks. */ 1561 if (tp->tso_deferred && 1562 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1563 goto send_now; 1564 1565 in_flight = tcp_packets_in_flight(tp); 1566 1567 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1568 1569 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1570 1571 /* From in_flight test above, we know that cwnd > in_flight. */ 1572 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1573 1574 limit = min(send_win, cong_win); 1575 1576 /* If a full-sized TSO skb can be sent, do it. */ 1577 if (limit >= sk->sk_gso_max_size) 1578 goto send_now; 1579 1580 /* Middle in queue won't get any more data, full sendable already? */ 1581 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1582 goto send_now; 1583 1584 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1585 if (win_divisor) { 1586 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1587 1588 /* If at least some fraction of a window is available, 1589 * just use it. 1590 */ 1591 chunk /= win_divisor; 1592 if (limit >= chunk) 1593 goto send_now; 1594 } else { 1595 /* Different approach, try not to defer past a single 1596 * ACK. Receiver should ACK every other full sized 1597 * frame, so if we have space for more than 3 frames 1598 * then send now. 1599 */ 1600 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1601 goto send_now; 1602 } 1603 1604 /* Ok, it looks like it is advisable to defer. */ 1605 tp->tso_deferred = 1 | (jiffies << 1); 1606 1607 return 1; 1608 1609send_now: 1610 tp->tso_deferred = 0; 1611 return 0; 1612} 1613 1614/* Create a new MTU probe if we are ready. 1615 * MTU probe is regularly attempting to increase the path MTU by 1616 * deliberately sending larger packets. This discovers routing 1617 * changes resulting in larger path MTUs. 1618 * 1619 * Returns 0 if we should wait to probe (no cwnd available), 1620 * 1 if a probe was sent, 1621 * -1 otherwise 1622 */ 1623static int tcp_mtu_probe(struct sock *sk) 1624{ 1625 struct tcp_sock *tp = tcp_sk(sk); 1626 struct inet_connection_sock *icsk = inet_csk(sk); 1627 struct sk_buff *skb, *nskb, *next; 1628 int len; 1629 int probe_size; 1630 int size_needed; 1631 int copy; 1632 int mss_now; 1633 1634 /* Not currently probing/verifying, 1635 * not in recovery, 1636 * have enough cwnd, and 1637 * not SACKing (the variable headers throw things off) */ 1638 if (!icsk->icsk_mtup.enabled || 1639 icsk->icsk_mtup.probe_size || 1640 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1641 tp->snd_cwnd < 11 || 1642 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1643 return -1; 1644 1645 /* Very simple search strategy: just double the MSS. */ 1646 mss_now = tcp_current_mss(sk); 1647 probe_size = 2 * tp->mss_cache; 1648 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1649 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1650 /* TODO: set timer for probe_converge_event */ 1651 return -1; 1652 } 1653 1654 /* Have enough data in the send queue to probe? */ 1655 if (tp->write_seq - tp->snd_nxt < size_needed) 1656 return -1; 1657 1658 if (tp->snd_wnd < size_needed) 1659 return -1; 1660 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1661 return 0; 1662 1663 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1664 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1665 if (!tcp_packets_in_flight(tp)) 1666 return -1; 1667 else 1668 return 0; 1669 } 1670 1671 /* We're allowed to probe. Build it now. */ 1672 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1673 return -1; 1674 sk->sk_wmem_queued += nskb->truesize; 1675 sk_mem_charge(sk, nskb->truesize); 1676 1677 skb = tcp_send_head(sk); 1678 1679 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1680 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1681 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 1682 TCP_SKB_CB(nskb)->sacked = 0; 1683 nskb->csum = 0; 1684 nskb->ip_summed = skb->ip_summed; 1685 1686 tcp_insert_write_queue_before(nskb, skb, sk); 1687 1688 len = 0; 1689 tcp_for_write_queue_from_safe(skb, next, sk) { 1690 copy = min_t(int, skb->len, probe_size - len); 1691 if (nskb->ip_summed) 1692 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1693 else 1694 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1695 skb_put(nskb, copy), 1696 copy, nskb->csum); 1697 1698 if (skb->len <= copy) { 1699 /* We've eaten all the data from this skb. 1700 * Throw it away. */ 1701 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1702 tcp_unlink_write_queue(skb, sk); 1703 sk_wmem_free_skb(sk, skb); 1704 } else { 1705 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1706 ~(TCPHDR_FIN|TCPHDR_PSH); 1707 if (!skb_shinfo(skb)->nr_frags) { 1708 skb_pull(skb, copy); 1709 if (skb->ip_summed != CHECKSUM_PARTIAL) 1710 skb->csum = csum_partial(skb->data, 1711 skb->len, 0); 1712 } else { 1713 __pskb_trim_head(skb, copy); 1714 tcp_set_skb_tso_segs(sk, skb, mss_now); 1715 } 1716 TCP_SKB_CB(skb)->seq += copy; 1717 } 1718 1719 len += copy; 1720 1721 if (len >= probe_size) 1722 break; 1723 } 1724 tcp_init_tso_segs(sk, nskb, nskb->len); 1725 1726 /* We're ready to send. If this fails, the probe will 1727 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1728 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1729 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1730 /* Decrement cwnd here because we are sending 1731 * effectively two packets. */ 1732 tp->snd_cwnd--; 1733 tcp_event_new_data_sent(sk, nskb); 1734 1735 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1736 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1737 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1738 1739 return 1; 1740 } 1741 1742 return -1; 1743} 1744 1745/* This routine writes packets to the network. It advances the 1746 * send_head. This happens as incoming acks open up the remote 1747 * window for us. 1748 * 1749 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1750 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1751 * account rare use of URG, this is not a big flaw. 1752 * 1753 * Returns 1, if no segments are in flight and we have queued segments, but 1754 * cannot send anything now because of SWS or another problem. 1755 */ 1756static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1757 int push_one, gfp_t gfp) 1758{ 1759 struct tcp_sock *tp = tcp_sk(sk); 1760 struct sk_buff *skb; 1761 unsigned int tso_segs, sent_pkts; 1762 int cwnd_quota; 1763 int result; 1764 1765 sent_pkts = 0; 1766 1767 if (!push_one) { 1768 /* Do MTU probing. */ 1769 result = tcp_mtu_probe(sk); 1770 if (!result) { 1771 return 0; 1772 } else if (result > 0) { 1773 sent_pkts = 1; 1774 } 1775 } 1776 1777 while ((skb = tcp_send_head(sk))) { 1778 unsigned int limit; 1779 1780 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1781 BUG_ON(!tso_segs); 1782 1783 cwnd_quota = tcp_cwnd_test(tp, skb); 1784 if (!cwnd_quota) 1785 break; 1786 1787 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1788 break; 1789 1790 if (tso_segs == 1) { 1791 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1792 (tcp_skb_is_last(sk, skb) ? 1793 nonagle : TCP_NAGLE_PUSH)))) 1794 break; 1795 } else { 1796 if (!push_one && tcp_tso_should_defer(sk, skb)) 1797 break; 1798 } 1799 1800 limit = mss_now; 1801 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1802 limit = tcp_mss_split_point(sk, skb, mss_now, 1803 cwnd_quota); 1804 1805 if (skb->len > limit && 1806 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1807 break; 1808 1809 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1810 1811 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1812 break; 1813 1814 /* Advance the send_head. This one is sent out. 1815 * This call will increment packets_out. 1816 */ 1817 tcp_event_new_data_sent(sk, skb); 1818 1819 tcp_minshall_update(tp, mss_now, skb); 1820 sent_pkts += tcp_skb_pcount(skb); 1821 1822 if (push_one) 1823 break; 1824 } 1825 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 1826 tp->prr_out += sent_pkts; 1827 1828 if (likely(sent_pkts)) { 1829 tcp_cwnd_validate(sk); 1830 return 0; 1831 } 1832 return !tp->packets_out && tcp_send_head(sk); 1833} 1834 1835/* Push out any pending frames which were held back due to 1836 * TCP_CORK or attempt at coalescing tiny packets. 1837 * The socket must be locked by the caller. 1838 */ 1839void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1840 int nonagle) 1841{ 1842 /* If we are closed, the bytes will have to remain here. 1843 * In time closedown will finish, we empty the write queue and 1844 * all will be happy. 1845 */ 1846 if (unlikely(sk->sk_state == TCP_CLOSE)) 1847 return; 1848 1849 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1850 tcp_check_probe_timer(sk); 1851} 1852 1853/* Send _single_ skb sitting at the send head. This function requires 1854 * true push pending frames to setup probe timer etc. 1855 */ 1856void tcp_push_one(struct sock *sk, unsigned int mss_now) 1857{ 1858 struct sk_buff *skb = tcp_send_head(sk); 1859 1860 BUG_ON(!skb || skb->len < mss_now); 1861 1862 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1863} 1864 1865/* This function returns the amount that we can raise the 1866 * usable window based on the following constraints 1867 * 1868 * 1. The window can never be shrunk once it is offered (RFC 793) 1869 * 2. We limit memory per socket 1870 * 1871 * RFC 1122: 1872 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1873 * RECV.NEXT + RCV.WIN fixed until: 1874 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1875 * 1876 * i.e. don't raise the right edge of the window until you can raise 1877 * it at least MSS bytes. 1878 * 1879 * Unfortunately, the recommended algorithm breaks header prediction, 1880 * since header prediction assumes th->window stays fixed. 1881 * 1882 * Strictly speaking, keeping th->window fixed violates the receiver 1883 * side SWS prevention criteria. The problem is that under this rule 1884 * a stream of single byte packets will cause the right side of the 1885 * window to always advance by a single byte. 1886 * 1887 * Of course, if the sender implements sender side SWS prevention 1888 * then this will not be a problem. 1889 * 1890 * BSD seems to make the following compromise: 1891 * 1892 * If the free space is less than the 1/4 of the maximum 1893 * space available and the free space is less than 1/2 mss, 1894 * then set the window to 0. 1895 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1896 * Otherwise, just prevent the window from shrinking 1897 * and from being larger than the largest representable value. 1898 * 1899 * This prevents incremental opening of the window in the regime 1900 * where TCP is limited by the speed of the reader side taking 1901 * data out of the TCP receive queue. It does nothing about 1902 * those cases where the window is constrained on the sender side 1903 * because the pipeline is full. 1904 * 1905 * BSD also seems to "accidentally" limit itself to windows that are a 1906 * multiple of MSS, at least until the free space gets quite small. 1907 * This would appear to be a side effect of the mbuf implementation. 1908 * Combining these two algorithms results in the observed behavior 1909 * of having a fixed window size at almost all times. 1910 * 1911 * Below we obtain similar behavior by forcing the offered window to 1912 * a multiple of the mss when it is feasible to do so. 1913 * 1914 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1915 * Regular options like TIMESTAMP are taken into account. 1916 */ 1917u32 __tcp_select_window(struct sock *sk) 1918{ 1919 struct inet_connection_sock *icsk = inet_csk(sk); 1920 struct tcp_sock *tp = tcp_sk(sk); 1921 /* MSS for the peer's data. Previous versions used mss_clamp 1922 * here. I don't know if the value based on our guesses 1923 * of peer's MSS is better for the performance. It's more correct 1924 * but may be worse for the performance because of rcv_mss 1925 * fluctuations. --SAW 1998/11/1 1926 */ 1927 int mss = icsk->icsk_ack.rcv_mss; 1928 int free_space = tcp_space(sk); 1929 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1930 int window; 1931 1932 if (mss > full_space) 1933 mss = full_space; 1934 1935 if (free_space < (full_space >> 1)) { 1936 icsk->icsk_ack.quick = 0; 1937 1938 if (sk_under_memory_pressure(sk)) 1939 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1940 4U * tp->advmss); 1941 1942 if (free_space < mss) 1943 return 0; 1944 } 1945 1946 if (free_space > tp->rcv_ssthresh) 1947 free_space = tp->rcv_ssthresh; 1948 1949 /* Don't do rounding if we are using window scaling, since the 1950 * scaled window will not line up with the MSS boundary anyway. 1951 */ 1952 window = tp->rcv_wnd; 1953 if (tp->rx_opt.rcv_wscale) { 1954 window = free_space; 1955 1956 /* Advertise enough space so that it won't get scaled away. 1957 * Import case: prevent zero window announcement if 1958 * 1<<rcv_wscale > mss. 1959 */ 1960 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1961 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1962 << tp->rx_opt.rcv_wscale); 1963 } else { 1964 /* Get the largest window that is a nice multiple of mss. 1965 * Window clamp already applied above. 1966 * If our current window offering is within 1 mss of the 1967 * free space we just keep it. This prevents the divide 1968 * and multiply from happening most of the time. 1969 * We also don't do any window rounding when the free space 1970 * is too small. 1971 */ 1972 if (window <= free_space - mss || window > free_space) 1973 window = (free_space / mss) * mss; 1974 else if (mss == full_space && 1975 free_space > window + (full_space >> 1)) 1976 window = free_space; 1977 } 1978 1979 return window; 1980} 1981 1982/* Collapses two adjacent SKB's during retransmission. */ 1983static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1984{ 1985 struct tcp_sock *tp = tcp_sk(sk); 1986 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1987 int skb_size, next_skb_size; 1988 1989 skb_size = skb->len; 1990 next_skb_size = next_skb->len; 1991 1992 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1993 1994 tcp_highest_sack_combine(sk, next_skb, skb); 1995 1996 tcp_unlink_write_queue(next_skb, sk); 1997 1998 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1999 next_skb_size); 2000 2001 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 2002 skb->ip_summed = CHECKSUM_PARTIAL; 2003 2004 if (skb->ip_summed != CHECKSUM_PARTIAL) 2005 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 2006 2007 /* Update sequence range on original skb. */ 2008 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2009 2010 /* Merge over control information. This moves PSH/FIN etc. over */ 2011 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 2012 2013 /* All done, get rid of second SKB and account for it so 2014 * packet counting does not break. 2015 */ 2016 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2017 2018 /* changed transmit queue under us so clear hints */ 2019 tcp_clear_retrans_hints_partial(tp); 2020 if (next_skb == tp->retransmit_skb_hint) 2021 tp->retransmit_skb_hint = skb; 2022 2023 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2024 2025 sk_wmem_free_skb(sk, next_skb); 2026} 2027 2028/* Check if coalescing SKBs is legal. */ 2029static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2030{ 2031 if (tcp_skb_pcount(skb) > 1) 2032 return 0; 2033 /* TODO: SACK collapsing could be used to remove this condition */ 2034 if (skb_shinfo(skb)->nr_frags != 0) 2035 return 0; 2036 if (skb_cloned(skb)) 2037 return 0; 2038 if (skb == tcp_send_head(sk)) 2039 return 0; 2040 /* Some heurestics for collapsing over SACK'd could be invented */ 2041 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2042 return 0; 2043 2044 return 1; 2045} 2046 2047/* Collapse packets in the retransmit queue to make to create 2048 * less packets on the wire. This is only done on retransmission. 2049 */ 2050static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2051 int space) 2052{ 2053 struct tcp_sock *tp = tcp_sk(sk); 2054 struct sk_buff *skb = to, *tmp; 2055 int first = 1; 2056 2057 if (!sysctl_tcp_retrans_collapse) 2058 return; 2059 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2060 return; 2061 2062 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2063 if (!tcp_can_collapse(sk, skb)) 2064 break; 2065 2066 space -= skb->len; 2067 2068 if (first) { 2069 first = 0; 2070 continue; 2071 } 2072 2073 if (space < 0) 2074 break; 2075 /* Punt if not enough space exists in the first SKB for 2076 * the data in the second 2077 */ 2078 if (skb->len > skb_availroom(to)) 2079 break; 2080 2081 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2082 break; 2083 2084 tcp_collapse_retrans(sk, to); 2085 } 2086} 2087 2088/* This retransmits one SKB. Policy decisions and retransmit queue 2089 * state updates are done by the caller. Returns non-zero if an 2090 * error occurred which prevented the send. 2091 */ 2092int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2093{ 2094 struct tcp_sock *tp = tcp_sk(sk); 2095 struct inet_connection_sock *icsk = inet_csk(sk); 2096 unsigned int cur_mss; 2097 int err; 2098 2099 /* Inconslusive MTU probe */ 2100 if (icsk->icsk_mtup.probe_size) { 2101 icsk->icsk_mtup.probe_size = 0; 2102 } 2103 2104 /* Do not sent more than we queued. 1/4 is reserved for possible 2105 * copying overhead: fragmentation, tunneling, mangling etc. 2106 */ 2107 if (atomic_read(&sk->sk_wmem_alloc) > 2108 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2109 return -EAGAIN; 2110 2111 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2112 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2113 BUG(); 2114 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2115 return -ENOMEM; 2116 } 2117 2118 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2119 return -EHOSTUNREACH; /* Routing failure or similar. */ 2120 2121 cur_mss = tcp_current_mss(sk); 2122 2123 /* If receiver has shrunk his window, and skb is out of 2124 * new window, do not retransmit it. The exception is the 2125 * case, when window is shrunk to zero. In this case 2126 * our retransmit serves as a zero window probe. 2127 */ 2128 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2129 TCP_SKB_CB(skb)->seq != tp->snd_una) 2130 return -EAGAIN; 2131 2132 if (skb->len > cur_mss) { 2133 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2134 return -ENOMEM; /* We'll try again later. */ 2135 } else { 2136 int oldpcount = tcp_skb_pcount(skb); 2137 2138 if (unlikely(oldpcount > 1)) { 2139 tcp_init_tso_segs(sk, skb, cur_mss); 2140 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2141 } 2142 } 2143 2144 tcp_retrans_try_collapse(sk, skb, cur_mss); 2145 2146 /* Some Solaris stacks overoptimize and ignore the FIN on a 2147 * retransmit when old data is attached. So strip it off 2148 * since it is cheap to do so and saves bytes on the network. 2149 */ 2150 if (skb->len > 0 && 2151 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 2152 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2153 if (!pskb_trim(skb, 0)) { 2154 /* Reuse, even though it does some unnecessary work */ 2155 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2156 TCP_SKB_CB(skb)->tcp_flags); 2157 skb->ip_summed = CHECKSUM_NONE; 2158 } 2159 } 2160 2161 /* Make a copy, if the first transmission SKB clone we made 2162 * is still in somebody's hands, else make a clone. 2163 */ 2164 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2165 2166 /* make sure skb->data is aligned on arches that require it */ 2167 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2168 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2169 GFP_ATOMIC); 2170 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2171 -ENOBUFS; 2172 } else { 2173 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2174 } 2175 2176 if (err == 0) { 2177 /* Update global TCP statistics. */ 2178 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2179 2180 tp->total_retrans++; 2181 2182#if FASTRETRANS_DEBUG > 0 2183 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2184 if (net_ratelimit()) 2185 printk(KERN_DEBUG "retrans_out leaked.\n"); 2186 } 2187#endif 2188 if (!tp->retrans_out) 2189 tp->lost_retrans_low = tp->snd_nxt; 2190 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2191 tp->retrans_out += tcp_skb_pcount(skb); 2192 2193 /* Save stamp of the first retransmit. */ 2194 if (!tp->retrans_stamp) 2195 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2196 2197 tp->undo_retrans += tcp_skb_pcount(skb); 2198 2199 /* snd_nxt is stored to detect loss of retransmitted segment, 2200 * see tcp_input.c tcp_sacktag_write_queue(). 2201 */ 2202 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2203 } 2204 return err; 2205} 2206 2207/* Check if we forward retransmits are possible in the current 2208 * window/congestion state. 2209 */ 2210static int tcp_can_forward_retransmit(struct sock *sk) 2211{ 2212 const struct inet_connection_sock *icsk = inet_csk(sk); 2213 const struct tcp_sock *tp = tcp_sk(sk); 2214 2215 /* Forward retransmissions are possible only during Recovery. */ 2216 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2217 return 0; 2218 2219 /* No forward retransmissions in Reno are possible. */ 2220 if (tcp_is_reno(tp)) 2221 return 0; 2222 2223 /* Yeah, we have to make difficult choice between forward transmission 2224 * and retransmission... Both ways have their merits... 2225 * 2226 * For now we do not retransmit anything, while we have some new 2227 * segments to send. In the other cases, follow rule 3 for 2228 * NextSeg() specified in RFC3517. 2229 */ 2230 2231 if (tcp_may_send_now(sk)) 2232 return 0; 2233 2234 return 1; 2235} 2236 2237/* This gets called after a retransmit timeout, and the initially 2238 * retransmitted data is acknowledged. It tries to continue 2239 * resending the rest of the retransmit queue, until either 2240 * we've sent it all or the congestion window limit is reached. 2241 * If doing SACK, the first ACK which comes back for a timeout 2242 * based retransmit packet might feed us FACK information again. 2243 * If so, we use it to avoid unnecessarily retransmissions. 2244 */ 2245void tcp_xmit_retransmit_queue(struct sock *sk) 2246{ 2247 const struct inet_connection_sock *icsk = inet_csk(sk); 2248 struct tcp_sock *tp = tcp_sk(sk); 2249 struct sk_buff *skb; 2250 struct sk_buff *hole = NULL; 2251 u32 last_lost; 2252 int mib_idx; 2253 int fwd_rexmitting = 0; 2254 2255 if (!tp->packets_out) 2256 return; 2257 2258 if (!tp->lost_out) 2259 tp->retransmit_high = tp->snd_una; 2260 2261 if (tp->retransmit_skb_hint) { 2262 skb = tp->retransmit_skb_hint; 2263 last_lost = TCP_SKB_CB(skb)->end_seq; 2264 if (after(last_lost, tp->retransmit_high)) 2265 last_lost = tp->retransmit_high; 2266 } else { 2267 skb = tcp_write_queue_head(sk); 2268 last_lost = tp->snd_una; 2269 } 2270 2271 tcp_for_write_queue_from(skb, sk) { 2272 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2273 2274 if (skb == tcp_send_head(sk)) 2275 break; 2276 /* we could do better than to assign each time */ 2277 if (hole == NULL) 2278 tp->retransmit_skb_hint = skb; 2279 2280 /* Assume this retransmit will generate 2281 * only one packet for congestion window 2282 * calculation purposes. This works because 2283 * tcp_retransmit_skb() will chop up the 2284 * packet to be MSS sized and all the 2285 * packet counting works out. 2286 */ 2287 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2288 return; 2289 2290 if (fwd_rexmitting) { 2291begin_fwd: 2292 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2293 break; 2294 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2295 2296 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2297 tp->retransmit_high = last_lost; 2298 if (!tcp_can_forward_retransmit(sk)) 2299 break; 2300 /* Backtrack if necessary to non-L'ed skb */ 2301 if (hole != NULL) { 2302 skb = hole; 2303 hole = NULL; 2304 } 2305 fwd_rexmitting = 1; 2306 goto begin_fwd; 2307 2308 } else if (!(sacked & TCPCB_LOST)) { 2309 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2310 hole = skb; 2311 continue; 2312 2313 } else { 2314 last_lost = TCP_SKB_CB(skb)->end_seq; 2315 if (icsk->icsk_ca_state != TCP_CA_Loss) 2316 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2317 else 2318 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2319 } 2320 2321 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2322 continue; 2323 2324 if (tcp_retransmit_skb(sk, skb)) { 2325 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2326 return; 2327 } 2328 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2329 2330 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 2331 tp->prr_out += tcp_skb_pcount(skb); 2332 2333 if (skb == tcp_write_queue_head(sk)) 2334 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2335 inet_csk(sk)->icsk_rto, 2336 TCP_RTO_MAX); 2337 } 2338} 2339 2340/* Send a fin. The caller locks the socket for us. This cannot be 2341 * allowed to fail queueing a FIN frame under any circumstances. 2342 */ 2343void tcp_send_fin(struct sock *sk) 2344{ 2345 struct tcp_sock *tp = tcp_sk(sk); 2346 struct sk_buff *skb = tcp_write_queue_tail(sk); 2347 int mss_now; 2348 2349 /* Optimization, tack on the FIN if we have a queue of 2350 * unsent frames. But be careful about outgoing SACKS 2351 * and IP options. 2352 */ 2353 mss_now = tcp_current_mss(sk); 2354 2355 if (tcp_send_head(sk) != NULL) { 2356 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2357 TCP_SKB_CB(skb)->end_seq++; 2358 tp->write_seq++; 2359 } else { 2360 /* Socket is locked, keep trying until memory is available. */ 2361 for (;;) { 2362 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2363 sk->sk_allocation); 2364 if (skb) 2365 break; 2366 yield(); 2367 } 2368 2369 /* Reserve space for headers and prepare control bits. */ 2370 skb_reserve(skb, MAX_TCP_HEADER); 2371 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2372 tcp_init_nondata_skb(skb, tp->write_seq, 2373 TCPHDR_ACK | TCPHDR_FIN); 2374 tcp_queue_skb(sk, skb); 2375 } 2376 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2377} 2378 2379/* We get here when a process closes a file descriptor (either due to 2380 * an explicit close() or as a byproduct of exit()'ing) and there 2381 * was unread data in the receive queue. This behavior is recommended 2382 * by RFC 2525, section 2.17. -DaveM 2383 */ 2384void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2385{ 2386 struct sk_buff *skb; 2387 2388 /* NOTE: No TCP options attached and we never retransmit this. */ 2389 skb = alloc_skb(MAX_TCP_HEADER, priority); 2390 if (!skb) { 2391 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2392 return; 2393 } 2394 2395 /* Reserve space for headers and prepare control bits. */ 2396 skb_reserve(skb, MAX_TCP_HEADER); 2397 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2398 TCPHDR_ACK | TCPHDR_RST); 2399 /* Send it off. */ 2400 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2401 if (tcp_transmit_skb(sk, skb, 0, priority)) 2402 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2403 2404 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2405} 2406 2407/* Send a crossed SYN-ACK during socket establishment. 2408 * WARNING: This routine must only be called when we have already sent 2409 * a SYN packet that crossed the incoming SYN that caused this routine 2410 * to get called. If this assumption fails then the initial rcv_wnd 2411 * and rcv_wscale values will not be correct. 2412 */ 2413int tcp_send_synack(struct sock *sk) 2414{ 2415 struct sk_buff *skb; 2416 2417 skb = tcp_write_queue_head(sk); 2418 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2419 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2420 return -EFAULT; 2421 } 2422 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2423 if (skb_cloned(skb)) { 2424 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2425 if (nskb == NULL) 2426 return -ENOMEM; 2427 tcp_unlink_write_queue(skb, sk); 2428 skb_header_release(nskb); 2429 __tcp_add_write_queue_head(sk, nskb); 2430 sk_wmem_free_skb(sk, skb); 2431 sk->sk_wmem_queued += nskb->truesize; 2432 sk_mem_charge(sk, nskb->truesize); 2433 skb = nskb; 2434 } 2435 2436 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2437 TCP_ECN_send_synack(tcp_sk(sk), skb); 2438 } 2439 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2440 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2441} 2442 2443/* Prepare a SYN-ACK. */ 2444struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2445 struct request_sock *req, 2446 struct request_values *rvp) 2447{ 2448 struct tcp_out_options opts; 2449 struct tcp_extend_values *xvp = tcp_xv(rvp); 2450 struct inet_request_sock *ireq = inet_rsk(req); 2451 struct tcp_sock *tp = tcp_sk(sk); 2452 const struct tcp_cookie_values *cvp = tp->cookie_values; 2453 struct tcphdr *th; 2454 struct sk_buff *skb; 2455 struct tcp_md5sig_key *md5; 2456 int tcp_header_size; 2457 int mss; 2458 int s_data_desired = 0; 2459 2460 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2461 s_data_desired = cvp->s_data_desired; 2462 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); 2463 if (skb == NULL) 2464 return NULL; 2465 2466 /* Reserve space for headers. */ 2467 skb_reserve(skb, MAX_TCP_HEADER); 2468 2469 skb_dst_set(skb, dst_clone(dst)); 2470 2471 mss = dst_metric_advmss(dst); 2472 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2473 mss = tp->rx_opt.user_mss; 2474 2475 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2476 __u8 rcv_wscale; 2477 /* Set this up on the first call only */ 2478 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2479 2480 /* limit the window selection if the user enforce a smaller rx buffer */ 2481 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2482 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2483 req->window_clamp = tcp_full_space(sk); 2484 2485 /* tcp_full_space because it is guaranteed to be the first packet */ 2486 tcp_select_initial_window(tcp_full_space(sk), 2487 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2488 &req->rcv_wnd, 2489 &req->window_clamp, 2490 ireq->wscale_ok, 2491 &rcv_wscale, 2492 dst_metric(dst, RTAX_INITRWND)); 2493 ireq->rcv_wscale = rcv_wscale; 2494 } 2495 2496 memset(&opts, 0, sizeof(opts)); 2497#ifdef CONFIG_SYN_COOKIES 2498 if (unlikely(req->cookie_ts)) 2499 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2500 else 2501#endif 2502 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2503 tcp_header_size = tcp_synack_options(sk, req, mss, 2504 skb, &opts, &md5, xvp) 2505 + sizeof(*th); 2506 2507 skb_push(skb, tcp_header_size); 2508 skb_reset_transport_header(skb); 2509 2510 th = tcp_hdr(skb); 2511 memset(th, 0, sizeof(struct tcphdr)); 2512 th->syn = 1; 2513 th->ack = 1; 2514 TCP_ECN_make_synack(req, th); 2515 th->source = ireq->loc_port; 2516 th->dest = ireq->rmt_port; 2517 /* Setting of flags are superfluous here for callers (and ECE is 2518 * not even correctly set) 2519 */ 2520 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2521 TCPHDR_SYN | TCPHDR_ACK); 2522 2523 if (OPTION_COOKIE_EXTENSION & opts.options) { 2524 if (s_data_desired) { 2525 u8 *buf = skb_put(skb, s_data_desired); 2526 2527 /* copy data directly from the listening socket. */ 2528 memcpy(buf, cvp->s_data_payload, s_data_desired); 2529 TCP_SKB_CB(skb)->end_seq += s_data_desired; 2530 } 2531 2532 if (opts.hash_size > 0) { 2533 __u32 workspace[SHA_WORKSPACE_WORDS]; 2534 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; 2535 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; 2536 2537 /* Secret recipe depends on the Timestamp, (future) 2538 * Sequence and Acknowledgment Numbers, Initiator 2539 * Cookie, and others handled by IP variant caller. 2540 */ 2541 *tail-- ^= opts.tsval; 2542 *tail-- ^= tcp_rsk(req)->rcv_isn + 1; 2543 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2544 2545 /* recommended */ 2546 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); 2547 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2548 2549 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2550 (char *)mess, 2551 &workspace[0]); 2552 opts.hash_location = 2553 (__u8 *)&xvp->cookie_bakery[0]; 2554 } 2555 } 2556 2557 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2558 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2559 2560 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2561 th->window = htons(min(req->rcv_wnd, 65535U)); 2562 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2563 th->doff = (tcp_header_size >> 2); 2564 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2565 2566#ifdef CONFIG_TCP_MD5SIG 2567 /* Okay, we have all we need - do the md5 hash if needed */ 2568 if (md5) { 2569 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 2570 md5, NULL, req, skb); 2571 } 2572#endif 2573 2574 return skb; 2575} 2576EXPORT_SYMBOL(tcp_make_synack); 2577 2578/* Do all connect socket setups that can be done AF independent. */ 2579void tcp_connect_init(struct sock *sk) 2580{ 2581 const struct dst_entry *dst = __sk_dst_get(sk); 2582 struct tcp_sock *tp = tcp_sk(sk); 2583 __u8 rcv_wscale; 2584 2585 /* We'll fix this up when we get a response from the other end. 2586 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2587 */ 2588 tp->tcp_header_len = sizeof(struct tcphdr) + 2589 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2590 2591#ifdef CONFIG_TCP_MD5SIG 2592 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2593 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2594#endif 2595 2596 /* If user gave his TCP_MAXSEG, record it to clamp */ 2597 if (tp->rx_opt.user_mss) 2598 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2599 tp->max_window = 0; 2600 tcp_mtup_init(sk); 2601 tcp_sync_mss(sk, dst_mtu(dst)); 2602 2603 if (!tp->window_clamp) 2604 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2605 tp->advmss = dst_metric_advmss(dst); 2606 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2607 tp->advmss = tp->rx_opt.user_mss; 2608 2609 tcp_initialize_rcv_mss(sk); 2610 2611 /* limit the window selection if the user enforce a smaller rx buffer */ 2612 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2613 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2614 tp->window_clamp = tcp_full_space(sk); 2615 2616 tcp_select_initial_window(tcp_full_space(sk), 2617 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2618 &tp->rcv_wnd, 2619 &tp->window_clamp, 2620 sysctl_tcp_window_scaling, 2621 &rcv_wscale, 2622 dst_metric(dst, RTAX_INITRWND)); 2623 2624 tp->rx_opt.rcv_wscale = rcv_wscale; 2625 tp->rcv_ssthresh = tp->rcv_wnd; 2626 2627 sk->sk_err = 0; 2628 sock_reset_flag(sk, SOCK_DONE); 2629 tp->snd_wnd = 0; 2630 tcp_init_wl(tp, 0); 2631 tp->snd_una = tp->write_seq; 2632 tp->snd_sml = tp->write_seq; 2633 tp->snd_up = tp->write_seq; 2634 tp->snd_nxt = tp->write_seq; 2635 2636 if (likely(!tp->repair)) 2637 tp->rcv_nxt = 0; 2638 tp->rcv_wup = tp->rcv_nxt; 2639 tp->copied_seq = tp->rcv_nxt; 2640 2641 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2642 inet_csk(sk)->icsk_retransmits = 0; 2643 tcp_clear_retrans(tp); 2644} 2645 2646/* Build a SYN and send it off. */ 2647int tcp_connect(struct sock *sk) 2648{ 2649 struct tcp_sock *tp = tcp_sk(sk); 2650 struct sk_buff *buff; 2651 int err; 2652 2653 tcp_connect_init(sk); 2654 2655 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2656 if (unlikely(buff == NULL)) 2657 return -ENOBUFS; 2658 2659 /* Reserve space for headers. */ 2660 skb_reserve(buff, MAX_TCP_HEADER); 2661 2662 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2663 TCP_ECN_send_syn(sk, buff); 2664 2665 /* Send it off. */ 2666 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2667 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2668 skb_header_release(buff); 2669 __tcp_add_write_queue_tail(sk, buff); 2670 sk->sk_wmem_queued += buff->truesize; 2671 sk_mem_charge(sk, buff->truesize); 2672 tp->packets_out += tcp_skb_pcount(buff); 2673 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2674 if (err == -ECONNREFUSED) 2675 return err; 2676 2677 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2678 * in order to make this packet get counted in tcpOutSegs. 2679 */ 2680 tp->snd_nxt = tp->write_seq; 2681 tp->pushed_seq = tp->write_seq; 2682 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2683 2684 /* Timer for repeating the SYN until an answer. */ 2685 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2686 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2687 return 0; 2688} 2689EXPORT_SYMBOL(tcp_connect); 2690 2691/* Send out a delayed ack, the caller does the policy checking 2692 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2693 * for details. 2694 */ 2695void tcp_send_delayed_ack(struct sock *sk) 2696{ 2697 struct inet_connection_sock *icsk = inet_csk(sk); 2698 int ato = icsk->icsk_ack.ato; 2699 unsigned long timeout; 2700 2701 if (ato > TCP_DELACK_MIN) { 2702 const struct tcp_sock *tp = tcp_sk(sk); 2703 int max_ato = HZ / 2; 2704 2705 if (icsk->icsk_ack.pingpong || 2706 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2707 max_ato = TCP_DELACK_MAX; 2708 2709 /* Slow path, intersegment interval is "high". */ 2710 2711 /* If some rtt estimate is known, use it to bound delayed ack. 2712 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2713 * directly. 2714 */ 2715 if (tp->srtt) { 2716 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2717 2718 if (rtt < max_ato) 2719 max_ato = rtt; 2720 } 2721 2722 ato = min(ato, max_ato); 2723 } 2724 2725 /* Stay within the limit we were given */ 2726 timeout = jiffies + ato; 2727 2728 /* Use new timeout only if there wasn't a older one earlier. */ 2729 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2730 /* If delack timer was blocked or is about to expire, 2731 * send ACK now. 2732 */ 2733 if (icsk->icsk_ack.blocked || 2734 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2735 tcp_send_ack(sk); 2736 return; 2737 } 2738 2739 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2740 timeout = icsk->icsk_ack.timeout; 2741 } 2742 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2743 icsk->icsk_ack.timeout = timeout; 2744 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2745} 2746 2747/* This routine sends an ack and also updates the window. */ 2748void tcp_send_ack(struct sock *sk) 2749{ 2750 struct sk_buff *buff; 2751 2752 /* If we have been reset, we may not send again. */ 2753 if (sk->sk_state == TCP_CLOSE) 2754 return; 2755 2756 /* We are not putting this on the write queue, so 2757 * tcp_transmit_skb() will set the ownership to this 2758 * sock. 2759 */ 2760 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2761 if (buff == NULL) { 2762 inet_csk_schedule_ack(sk); 2763 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2764 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2765 TCP_DELACK_MAX, TCP_RTO_MAX); 2766 return; 2767 } 2768 2769 /* Reserve space for headers and prepare control bits. */ 2770 skb_reserve(buff, MAX_TCP_HEADER); 2771 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 2772 2773 /* Send it off, this clears delayed acks for us. */ 2774 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2775 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2776} 2777 2778/* This routine sends a packet with an out of date sequence 2779 * number. It assumes the other end will try to ack it. 2780 * 2781 * Question: what should we make while urgent mode? 2782 * 4.4BSD forces sending single byte of data. We cannot send 2783 * out of window data, because we have SND.NXT==SND.MAX... 2784 * 2785 * Current solution: to send TWO zero-length segments in urgent mode: 2786 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2787 * out-of-date with SND.UNA-1 to probe window. 2788 */ 2789static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2790{ 2791 struct tcp_sock *tp = tcp_sk(sk); 2792 struct sk_buff *skb; 2793 2794 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2795 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2796 if (skb == NULL) 2797 return -1; 2798 2799 /* Reserve space for headers and set control bits. */ 2800 skb_reserve(skb, MAX_TCP_HEADER); 2801 /* Use a previous sequence. This should cause the other 2802 * end to send an ack. Don't queue or clone SKB, just 2803 * send it. 2804 */ 2805 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 2806 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2807 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2808} 2809 2810void tcp_send_window_probe(struct sock *sk) 2811{ 2812 if (sk->sk_state == TCP_ESTABLISHED) { 2813 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 2814 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; 2815 tcp_xmit_probe_skb(sk, 0); 2816 } 2817} 2818 2819/* Initiate keepalive or window probe from timer. */ 2820int tcp_write_wakeup(struct sock *sk) 2821{ 2822 struct tcp_sock *tp = tcp_sk(sk); 2823 struct sk_buff *skb; 2824 2825 if (sk->sk_state == TCP_CLOSE) 2826 return -1; 2827 2828 if ((skb = tcp_send_head(sk)) != NULL && 2829 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2830 int err; 2831 unsigned int mss = tcp_current_mss(sk); 2832 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2833 2834 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2835 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2836 2837 /* We are probing the opening of a window 2838 * but the window size is != 0 2839 * must have been a result SWS avoidance ( sender ) 2840 */ 2841 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2842 skb->len > mss) { 2843 seg_size = min(seg_size, mss); 2844 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 2845 if (tcp_fragment(sk, skb, seg_size, mss)) 2846 return -1; 2847 } else if (!tcp_skb_pcount(skb)) 2848 tcp_set_skb_tso_segs(sk, skb, mss); 2849 2850 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 2851 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2852 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2853 if (!err) 2854 tcp_event_new_data_sent(sk, skb); 2855 return err; 2856 } else { 2857 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2858 tcp_xmit_probe_skb(sk, 1); 2859 return tcp_xmit_probe_skb(sk, 0); 2860 } 2861} 2862 2863/* A window probe timeout has occurred. If window is not closed send 2864 * a partial packet else a zero probe. 2865 */ 2866void tcp_send_probe0(struct sock *sk) 2867{ 2868 struct inet_connection_sock *icsk = inet_csk(sk); 2869 struct tcp_sock *tp = tcp_sk(sk); 2870 int err; 2871 2872 err = tcp_write_wakeup(sk); 2873 2874 if (tp->packets_out || !tcp_send_head(sk)) { 2875 /* Cancel probe timer, if it is not required. */ 2876 icsk->icsk_probes_out = 0; 2877 icsk->icsk_backoff = 0; 2878 return; 2879 } 2880 2881 if (err <= 0) { 2882 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2883 icsk->icsk_backoff++; 2884 icsk->icsk_probes_out++; 2885 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2886 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2887 TCP_RTO_MAX); 2888 } else { 2889 /* If packet was not sent due to local congestion, 2890 * do not backoff and do not remember icsk_probes_out. 2891 * Let local senders to fight for local resources. 2892 * 2893 * Use accumulated backoff yet. 2894 */ 2895 if (!icsk->icsk_probes_out) 2896 icsk->icsk_probes_out = 1; 2897 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2898 min(icsk->icsk_rto << icsk->icsk_backoff, 2899 TCP_RESOURCE_PROBE_INTERVAL), 2900 TCP_RTO_MAX); 2901 } 2902} 2903