tcp_output.c revision b08d6cb22c777c8c91c16d8e3b8aafc93c98cbd9
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 */ 22 23/* 24 * Changes: Pedro Roque : Retransmit queue handled by TCP. 25 * : Fragmentation on mtu decrease 26 * : Segment collapse on retransmit 27 * : AF independence 28 * 29 * Linus Torvalds : send_delayed_ack 30 * David S. Miller : Charge memory using the right skb 31 * during syn/ack processing. 32 * David S. Miller : Output engine completely rewritten. 33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 34 * Cacophonix Gaul : draft-minshall-nagle-01 35 * J Hadi Salim : ECN support 36 * 37 */ 38 39#include <net/tcp.h> 40 41#include <linux/compiler.h> 42#include <linux/module.h> 43 44/* People can turn this off for buggy TCP's found in printers etc. */ 45int sysctl_tcp_retrans_collapse __read_mostly = 1; 46 47/* People can turn this on to work with those rare, broken TCPs that 48 * interpret the window field as a signed quantity. 49 */ 50int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 51 52/* This limits the percentage of the congestion window which we 53 * will allow a single TSO frame to consume. Building TSO frames 54 * which are too large can cause TCP streams to be bursty. 55 */ 56int sysctl_tcp_tso_win_divisor __read_mostly = 3; 57 58int sysctl_tcp_mtu_probing __read_mostly = 0; 59int sysctl_tcp_base_mss __read_mostly = 512; 60 61/* By default, RFC2861 behavior. */ 62int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 63 64static inline void tcp_packets_out_inc(struct sock *sk, 65 const struct sk_buff *skb) 66{ 67 struct tcp_sock *tp = tcp_sk(sk); 68 int orig = tp->packets_out; 69 70 tp->packets_out += tcp_skb_pcount(skb); 71 if (!orig) 72 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 73 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 74} 75 76static void update_send_head(struct sock *sk, struct sk_buff *skb) 77{ 78 struct tcp_sock *tp = tcp_sk(sk); 79 80 tcp_advance_send_head(sk, skb); 81 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 82 tcp_packets_out_inc(sk, skb); 83} 84 85/* SND.NXT, if window was not shrunk. 86 * If window has been shrunk, what should we make? It is not clear at all. 87 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 88 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 89 * invalid. OK, let's make this for now: 90 */ 91static inline __u32 tcp_acceptable_seq(struct sock *sk) 92{ 93 struct tcp_sock *tp = tcp_sk(sk); 94 95 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 96 return tp->snd_nxt; 97 else 98 return tp->snd_una+tp->snd_wnd; 99} 100 101/* Calculate mss to advertise in SYN segment. 102 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 103 * 104 * 1. It is independent of path mtu. 105 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 106 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 107 * attached devices, because some buggy hosts are confused by 108 * large MSS. 109 * 4. We do not make 3, we advertise MSS, calculated from first 110 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 111 * This may be overridden via information stored in routing table. 112 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 113 * probably even Jumbo". 114 */ 115static __u16 tcp_advertise_mss(struct sock *sk) 116{ 117 struct tcp_sock *tp = tcp_sk(sk); 118 struct dst_entry *dst = __sk_dst_get(sk); 119 int mss = tp->advmss; 120 121 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 122 mss = dst_metric(dst, RTAX_ADVMSS); 123 tp->advmss = mss; 124 } 125 126 return (__u16)mss; 127} 128 129/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 130 * This is the first part of cwnd validation mechanism. */ 131static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 132{ 133 struct tcp_sock *tp = tcp_sk(sk); 134 s32 delta = tcp_time_stamp - tp->lsndtime; 135 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 136 u32 cwnd = tp->snd_cwnd; 137 138 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 139 140 tp->snd_ssthresh = tcp_current_ssthresh(sk); 141 restart_cwnd = min(restart_cwnd, cwnd); 142 143 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 144 cwnd >>= 1; 145 tp->snd_cwnd = max(cwnd, restart_cwnd); 146 tp->snd_cwnd_stamp = tcp_time_stamp; 147 tp->snd_cwnd_used = 0; 148} 149 150static void tcp_event_data_sent(struct tcp_sock *tp, 151 struct sk_buff *skb, struct sock *sk) 152{ 153 struct inet_connection_sock *icsk = inet_csk(sk); 154 const u32 now = tcp_time_stamp; 155 156 if (sysctl_tcp_slow_start_after_idle && 157 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 158 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 159 160 tp->lsndtime = now; 161 162 /* If it is a reply for ato after last received 163 * packet, enter pingpong mode. 164 */ 165 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 166 icsk->icsk_ack.pingpong = 1; 167} 168 169static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 170{ 171 tcp_dec_quickack_mode(sk, pkts); 172 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 173} 174 175/* Determine a window scaling and initial window to offer. 176 * Based on the assumption that the given amount of space 177 * will be offered. Store the results in the tp structure. 178 * NOTE: for smooth operation initial space offering should 179 * be a multiple of mss if possible. We assume here that mss >= 1. 180 * This MUST be enforced by all callers. 181 */ 182void tcp_select_initial_window(int __space, __u32 mss, 183 __u32 *rcv_wnd, __u32 *window_clamp, 184 int wscale_ok, __u8 *rcv_wscale) 185{ 186 unsigned int space = (__space < 0 ? 0 : __space); 187 188 /* If no clamp set the clamp to the max possible scaled window */ 189 if (*window_clamp == 0) 190 (*window_clamp) = (65535 << 14); 191 space = min(*window_clamp, space); 192 193 /* Quantize space offering to a multiple of mss if possible. */ 194 if (space > mss) 195 space = (space / mss) * mss; 196 197 /* NOTE: offering an initial window larger than 32767 198 * will break some buggy TCP stacks. If the admin tells us 199 * it is likely we could be speaking with such a buggy stack 200 * we will truncate our initial window offering to 32K-1 201 * unless the remote has sent us a window scaling option, 202 * which we interpret as a sign the remote TCP is not 203 * misinterpreting the window field as a signed quantity. 204 */ 205 if (sysctl_tcp_workaround_signed_windows) 206 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 207 else 208 (*rcv_wnd) = space; 209 210 (*rcv_wscale) = 0; 211 if (wscale_ok) { 212 /* Set window scaling on max possible window 213 * See RFC1323 for an explanation of the limit to 14 214 */ 215 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 216 space = min_t(u32, space, *window_clamp); 217 while (space > 65535 && (*rcv_wscale) < 14) { 218 space >>= 1; 219 (*rcv_wscale)++; 220 } 221 } 222 223 /* Set initial window to value enough for senders, 224 * following RFC2414. Senders, not following this RFC, 225 * will be satisfied with 2. 226 */ 227 if (mss > (1<<*rcv_wscale)) { 228 int init_cwnd = 4; 229 if (mss > 1460*3) 230 init_cwnd = 2; 231 else if (mss > 1460) 232 init_cwnd = 3; 233 if (*rcv_wnd > init_cwnd*mss) 234 *rcv_wnd = init_cwnd*mss; 235 } 236 237 /* Set the clamp no higher than max representable value */ 238 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 239} 240 241/* Chose a new window to advertise, update state in tcp_sock for the 242 * socket, and return result with RFC1323 scaling applied. The return 243 * value can be stuffed directly into th->window for an outgoing 244 * frame. 245 */ 246static u16 tcp_select_window(struct sock *sk) 247{ 248 struct tcp_sock *tp = tcp_sk(sk); 249 u32 cur_win = tcp_receive_window(tp); 250 u32 new_win = __tcp_select_window(sk); 251 252 /* Never shrink the offered window */ 253 if (new_win < cur_win) { 254 /* Danger Will Robinson! 255 * Don't update rcv_wup/rcv_wnd here or else 256 * we will not be able to advertise a zero 257 * window in time. --DaveM 258 * 259 * Relax Will Robinson. 260 */ 261 new_win = cur_win; 262 } 263 tp->rcv_wnd = new_win; 264 tp->rcv_wup = tp->rcv_nxt; 265 266 /* Make sure we do not exceed the maximum possible 267 * scaled window. 268 */ 269 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 270 new_win = min(new_win, MAX_TCP_WINDOW); 271 else 272 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 273 274 /* RFC1323 scaling applied */ 275 new_win >>= tp->rx_opt.rcv_wscale; 276 277 /* If we advertise zero window, disable fast path. */ 278 if (new_win == 0) 279 tp->pred_flags = 0; 280 281 return new_win; 282} 283 284static inline void TCP_ECN_send_synack(struct tcp_sock *tp, 285 struct sk_buff *skb) 286{ 287 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 288 if (!(tp->ecn_flags&TCP_ECN_OK)) 289 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 290} 291 292static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 293{ 294 struct tcp_sock *tp = tcp_sk(sk); 295 296 tp->ecn_flags = 0; 297 if (sysctl_tcp_ecn) { 298 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; 299 tp->ecn_flags = TCP_ECN_OK; 300 } 301} 302 303static __inline__ void 304TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 305{ 306 if (inet_rsk(req)->ecn_ok) 307 th->ece = 1; 308} 309 310static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 311 int tcp_header_len) 312{ 313 struct tcp_sock *tp = tcp_sk(sk); 314 315 if (tp->ecn_flags & TCP_ECN_OK) { 316 /* Not-retransmitted data segment: set ECT and inject CWR. */ 317 if (skb->len != tcp_header_len && 318 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 319 INET_ECN_xmit(sk); 320 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { 321 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 322 tcp_hdr(skb)->cwr = 1; 323 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 324 } 325 } else { 326 /* ACK or retransmitted segment: clear ECT|CE */ 327 INET_ECN_dontxmit(sk); 328 } 329 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 330 tcp_hdr(skb)->ece = 1; 331 } 332} 333 334static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, 335 __u32 tstamp, __u8 **md5_hash) 336{ 337 if (tp->rx_opt.tstamp_ok) { 338 *ptr++ = htonl((TCPOPT_NOP << 24) | 339 (TCPOPT_NOP << 16) | 340 (TCPOPT_TIMESTAMP << 8) | 341 TCPOLEN_TIMESTAMP); 342 *ptr++ = htonl(tstamp); 343 *ptr++ = htonl(tp->rx_opt.ts_recent); 344 } 345 if (tp->rx_opt.eff_sacks) { 346 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; 347 int this_sack; 348 349 *ptr++ = htonl((TCPOPT_NOP << 24) | 350 (TCPOPT_NOP << 16) | 351 (TCPOPT_SACK << 8) | 352 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 353 TCPOLEN_SACK_PERBLOCK))); 354 355 for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 356 *ptr++ = htonl(sp[this_sack].start_seq); 357 *ptr++ = htonl(sp[this_sack].end_seq); 358 } 359 360 if (tp->rx_opt.dsack) { 361 tp->rx_opt.dsack = 0; 362 tp->rx_opt.eff_sacks--; 363 } 364 } 365#ifdef CONFIG_TCP_MD5SIG 366 if (md5_hash) { 367 *ptr++ = htonl((TCPOPT_NOP << 24) | 368 (TCPOPT_NOP << 16) | 369 (TCPOPT_MD5SIG << 8) | 370 TCPOLEN_MD5SIG); 371 *md5_hash = (__u8 *)ptr; 372 } 373#endif 374} 375 376/* Construct a tcp options header for a SYN or SYN_ACK packet. 377 * If this is every changed make sure to change the definition of 378 * MAX_SYN_SIZE to match the new maximum number of options that you 379 * can generate. 380 * 381 * Note - that with the RFC2385 TCP option, we make room for the 382 * 16 byte MD5 hash. This will be filled in later, so the pointer for the 383 * location to be filled is passed back up. 384 */ 385static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, 386 int offer_wscale, int wscale, __u32 tstamp, 387 __u32 ts_recent, __u8 **md5_hash) 388{ 389 /* We always get an MSS option. 390 * The option bytes which will be seen in normal data 391 * packets should timestamps be used, must be in the MSS 392 * advertised. But we subtract them from tp->mss_cache so 393 * that calculations in tcp_sendmsg are simpler etc. 394 * So account for this fact here if necessary. If we 395 * don't do this correctly, as a receiver we won't 396 * recognize data packets as being full sized when we 397 * should, and thus we won't abide by the delayed ACK 398 * rules correctly. 399 * SACKs don't matter, we never delay an ACK when we 400 * have any of those going out. 401 */ 402 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 403 if (ts) { 404 if (sack) 405 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 406 (TCPOLEN_SACK_PERM << 16) | 407 (TCPOPT_TIMESTAMP << 8) | 408 TCPOLEN_TIMESTAMP); 409 else 410 *ptr++ = htonl((TCPOPT_NOP << 24) | 411 (TCPOPT_NOP << 16) | 412 (TCPOPT_TIMESTAMP << 8) | 413 TCPOLEN_TIMESTAMP); 414 *ptr++ = htonl(tstamp); /* TSVAL */ 415 *ptr++ = htonl(ts_recent); /* TSECR */ 416 } else if (sack) 417 *ptr++ = htonl((TCPOPT_NOP << 24) | 418 (TCPOPT_NOP << 16) | 419 (TCPOPT_SACK_PERM << 8) | 420 TCPOLEN_SACK_PERM); 421 if (offer_wscale) 422 *ptr++ = htonl((TCPOPT_NOP << 24) | 423 (TCPOPT_WINDOW << 16) | 424 (TCPOLEN_WINDOW << 8) | 425 (wscale)); 426#ifdef CONFIG_TCP_MD5SIG 427 /* 428 * If MD5 is enabled, then we set the option, and include the size 429 * (always 18). The actual MD5 hash is added just before the 430 * packet is sent. 431 */ 432 if (md5_hash) { 433 *ptr++ = htonl((TCPOPT_NOP << 24) | 434 (TCPOPT_NOP << 16) | 435 (TCPOPT_MD5SIG << 8) | 436 TCPOLEN_MD5SIG); 437 *md5_hash = (__u8 *) ptr; 438 } 439#endif 440} 441 442/* This routine actually transmits TCP packets queued in by 443 * tcp_do_sendmsg(). This is used by both the initial 444 * transmission and possible later retransmissions. 445 * All SKB's seen here are completely headerless. It is our 446 * job to build the TCP header, and pass the packet down to 447 * IP so it can do the same plus pass the packet off to the 448 * device. 449 * 450 * We are working here with either a clone of the original 451 * SKB, or a fresh unique copy made by the retransmit engine. 452 */ 453static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) 454{ 455 const struct inet_connection_sock *icsk = inet_csk(sk); 456 struct inet_sock *inet; 457 struct tcp_sock *tp; 458 struct tcp_skb_cb *tcb; 459 int tcp_header_size; 460#ifdef CONFIG_TCP_MD5SIG 461 struct tcp_md5sig_key *md5; 462 __u8 *md5_hash_location; 463#endif 464 struct tcphdr *th; 465 int sysctl_flags; 466 int err; 467 468 BUG_ON(!skb || !tcp_skb_pcount(skb)); 469 470 /* If congestion control is doing timestamping, we must 471 * take such a timestamp before we potentially clone/copy. 472 */ 473 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 474 __net_timestamp(skb); 475 476 if (likely(clone_it)) { 477 if (unlikely(skb_cloned(skb))) 478 skb = pskb_copy(skb, gfp_mask); 479 else 480 skb = skb_clone(skb, gfp_mask); 481 if (unlikely(!skb)) 482 return -ENOBUFS; 483 } 484 485 inet = inet_sk(sk); 486 tp = tcp_sk(sk); 487 tcb = TCP_SKB_CB(skb); 488 tcp_header_size = tp->tcp_header_len; 489 490#define SYSCTL_FLAG_TSTAMPS 0x1 491#define SYSCTL_FLAG_WSCALE 0x2 492#define SYSCTL_FLAG_SACK 0x4 493 494 sysctl_flags = 0; 495 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 496 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 497 if (sysctl_tcp_timestamps) { 498 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 499 sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 500 } 501 if (sysctl_tcp_window_scaling) { 502 tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 503 sysctl_flags |= SYSCTL_FLAG_WSCALE; 504 } 505 if (sysctl_tcp_sack) { 506 sysctl_flags |= SYSCTL_FLAG_SACK; 507 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 508 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 509 } 510 } else if (unlikely(tp->rx_opt.eff_sacks)) { 511 /* A SACK is 2 pad bytes, a 2 byte header, plus 512 * 2 32-bit sequence numbers for each SACK block. 513 */ 514 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 515 (tp->rx_opt.eff_sacks * 516 TCPOLEN_SACK_PERBLOCK)); 517 } 518 519 if (tcp_packets_in_flight(tp) == 0) 520 tcp_ca_event(sk, CA_EVENT_TX_START); 521 522#ifdef CONFIG_TCP_MD5SIG 523 /* 524 * Are we doing MD5 on this segment? If so - make 525 * room for it. 526 */ 527 md5 = tp->af_specific->md5_lookup(sk, sk); 528 if (md5) 529 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 530#endif 531 532 skb_push(skb, tcp_header_size); 533 skb_reset_transport_header(skb); 534 skb_set_owner_w(skb, sk); 535 536 /* Build TCP header and checksum it. */ 537 th = tcp_hdr(skb); 538 th->source = inet->sport; 539 th->dest = inet->dport; 540 th->seq = htonl(tcb->seq); 541 th->ack_seq = htonl(tp->rcv_nxt); 542 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 543 tcb->flags); 544 545 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 546 /* RFC1323: The window in SYN & SYN/ACK segments 547 * is never scaled. 548 */ 549 th->window = htons(min(tp->rcv_wnd, 65535U)); 550 } else { 551 th->window = htons(tcp_select_window(sk)); 552 } 553 th->check = 0; 554 th->urg_ptr = 0; 555 556 if (unlikely(tp->urg_mode && 557 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { 558 th->urg_ptr = htons(tp->snd_up-tcb->seq); 559 th->urg = 1; 560 } 561 562 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 563 tcp_syn_build_options((__be32 *)(th + 1), 564 tcp_advertise_mss(sk), 565 (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 566 (sysctl_flags & SYSCTL_FLAG_SACK), 567 (sysctl_flags & SYSCTL_FLAG_WSCALE), 568 tp->rx_opt.rcv_wscale, 569 tcb->when, 570 tp->rx_opt.ts_recent, 571 572#ifdef CONFIG_TCP_MD5SIG 573 md5 ? &md5_hash_location : 574#endif 575 NULL); 576 } else { 577 tcp_build_and_update_options((__be32 *)(th + 1), 578 tp, tcb->when, 579#ifdef CONFIG_TCP_MD5SIG 580 md5 ? &md5_hash_location : 581#endif 582 NULL); 583 TCP_ECN_send(sk, skb, tcp_header_size); 584 } 585 586#ifdef CONFIG_TCP_MD5SIG 587 /* Calculate the MD5 hash, as we have all we need now */ 588 if (md5) { 589 tp->af_specific->calc_md5_hash(md5_hash_location, 590 md5, 591 sk, NULL, NULL, 592 tcp_hdr(skb), 593 sk->sk_protocol, 594 skb->len); 595 } 596#endif 597 598 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 599 600 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 601 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 602 603 if (skb->len != tcp_header_size) 604 tcp_event_data_sent(tp, skb, sk); 605 606 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 607 TCP_INC_STATS(TCP_MIB_OUTSEGS); 608 609 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 610 if (likely(err <= 0)) 611 return err; 612 613 tcp_enter_cwr(sk, 1); 614 615 return net_xmit_eval(err); 616 617#undef SYSCTL_FLAG_TSTAMPS 618#undef SYSCTL_FLAG_WSCALE 619#undef SYSCTL_FLAG_SACK 620} 621 622 623/* This routine just queue's the buffer 624 * 625 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 626 * otherwise socket can stall. 627 */ 628static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 629{ 630 struct tcp_sock *tp = tcp_sk(sk); 631 632 /* Advance write_seq and place onto the write_queue. */ 633 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 634 skb_header_release(skb); 635 tcp_add_write_queue_tail(sk, skb); 636 sk_charge_skb(sk, skb); 637} 638 639static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 640{ 641 if (skb->len <= mss_now || !sk_can_gso(sk)) { 642 /* Avoid the costly divide in the normal 643 * non-TSO case. 644 */ 645 skb_shinfo(skb)->gso_segs = 1; 646 skb_shinfo(skb)->gso_size = 0; 647 skb_shinfo(skb)->gso_type = 0; 648 } else { 649 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 650 skb_shinfo(skb)->gso_size = mss_now; 651 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 652 } 653} 654 655/* When a modification to fackets out becomes necessary, we need to check 656 * skb is counted to fackets_out or not. Another important thing is to 657 * tweak SACK fastpath hint too as it would overwrite all changes unless 658 * hint is also changed. 659 */ 660static void tcp_adjust_fackets_out(struct tcp_sock *tp, struct sk_buff *skb, 661 int decr) 662{ 663 if (!tp->sacked_out || tcp_is_reno(tp)) 664 return; 665 666 if (!before(tp->highest_sack, TCP_SKB_CB(skb)->seq)) 667 tp->fackets_out -= decr; 668 669 /* cnt_hint is "off-by-one" compared with fackets_out (see sacktag) */ 670 if (tp->fastpath_skb_hint != NULL && 671 after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) 672 tp->fastpath_cnt_hint -= decr; 673} 674 675/* Function to create two new TCP segments. Shrinks the given segment 676 * to the specified size and appends a new segment with the rest of the 677 * packet to the list. This won't be called frequently, I hope. 678 * Remember, these are still headerless SKBs at this point. 679 */ 680int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 681{ 682 struct tcp_sock *tp = tcp_sk(sk); 683 struct sk_buff *buff; 684 int nsize, old_factor; 685 int nlen; 686 u16 flags; 687 688 BUG_ON(len > skb->len); 689 690 tcp_clear_retrans_hints_partial(tp); 691 nsize = skb_headlen(skb) - len; 692 if (nsize < 0) 693 nsize = 0; 694 695 if (skb_cloned(skb) && 696 skb_is_nonlinear(skb) && 697 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 698 return -ENOMEM; 699 700 /* Get a new skb... force flag on. */ 701 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 702 if (buff == NULL) 703 return -ENOMEM; /* We'll just try again later. */ 704 705 sk_charge_skb(sk, buff); 706 nlen = skb->len - len - nsize; 707 buff->truesize += nlen; 708 skb->truesize -= nlen; 709 710 /* Correct the sequence numbers. */ 711 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 712 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 713 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 714 715 if (tcp_is_sack(tp) && tp->sacked_out && 716 (TCP_SKB_CB(skb)->seq == tp->highest_sack)) 717 tp->highest_sack = TCP_SKB_CB(buff)->seq; 718 719 /* PSH and FIN should only be set in the second packet. */ 720 flags = TCP_SKB_CB(skb)->flags; 721 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 722 TCP_SKB_CB(buff)->flags = flags; 723 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 724 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 725 726 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 727 /* Copy and checksum data tail into the new buffer. */ 728 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 729 nsize, 0); 730 731 skb_trim(skb, len); 732 733 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 734 } else { 735 skb->ip_summed = CHECKSUM_PARTIAL; 736 skb_split(skb, buff, len); 737 } 738 739 buff->ip_summed = skb->ip_summed; 740 741 /* Looks stupid, but our code really uses when of 742 * skbs, which it never sent before. --ANK 743 */ 744 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 745 buff->tstamp = skb->tstamp; 746 747 old_factor = tcp_skb_pcount(skb); 748 749 /* Fix up tso_factor for both original and new SKB. */ 750 tcp_set_skb_tso_segs(sk, skb, mss_now); 751 tcp_set_skb_tso_segs(sk, buff, mss_now); 752 753 /* If this packet has been sent out already, we must 754 * adjust the various packet counters. 755 */ 756 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 757 int diff = old_factor - tcp_skb_pcount(skb) - 758 tcp_skb_pcount(buff); 759 760 tp->packets_out -= diff; 761 762 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 763 tp->sacked_out -= diff; 764 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 765 tp->retrans_out -= diff; 766 767 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 768 tp->lost_out -= diff; 769 770 /* Adjust Reno SACK estimate. */ 771 if (tcp_is_reno(tp) && diff > 0) { 772 tcp_dec_pcount_approx_int(&tp->sacked_out, diff); 773 tcp_verify_left_out(tp); 774 } 775 tcp_adjust_fackets_out(tp, skb, diff); 776 } 777 778 /* Link BUFF into the send queue. */ 779 skb_header_release(buff); 780 tcp_insert_write_queue_after(skb, buff, sk); 781 782 return 0; 783} 784 785/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 786 * eventually). The difference is that pulled data not copied, but 787 * immediately discarded. 788 */ 789static void __pskb_trim_head(struct sk_buff *skb, int len) 790{ 791 int i, k, eat; 792 793 eat = len; 794 k = 0; 795 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 796 if (skb_shinfo(skb)->frags[i].size <= eat) { 797 put_page(skb_shinfo(skb)->frags[i].page); 798 eat -= skb_shinfo(skb)->frags[i].size; 799 } else { 800 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 801 if (eat) { 802 skb_shinfo(skb)->frags[k].page_offset += eat; 803 skb_shinfo(skb)->frags[k].size -= eat; 804 eat = 0; 805 } 806 k++; 807 } 808 } 809 skb_shinfo(skb)->nr_frags = k; 810 811 skb_reset_tail_pointer(skb); 812 skb->data_len -= len; 813 skb->len = skb->data_len; 814} 815 816int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 817{ 818 if (skb_cloned(skb) && 819 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 820 return -ENOMEM; 821 822 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 823 if (unlikely(len < skb_headlen(skb))) 824 __skb_pull(skb, len); 825 else 826 __pskb_trim_head(skb, len - skb_headlen(skb)); 827 828 TCP_SKB_CB(skb)->seq += len; 829 skb->ip_summed = CHECKSUM_PARTIAL; 830 831 skb->truesize -= len; 832 sk->sk_wmem_queued -= len; 833 sk->sk_forward_alloc += len; 834 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 835 836 /* Any change of skb->len requires recalculation of tso 837 * factor and mss. 838 */ 839 if (tcp_skb_pcount(skb) > 1) 840 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 841 842 return 0; 843} 844 845/* Not accounting for SACKs here. */ 846int tcp_mtu_to_mss(struct sock *sk, int pmtu) 847{ 848 struct tcp_sock *tp = tcp_sk(sk); 849 struct inet_connection_sock *icsk = inet_csk(sk); 850 int mss_now; 851 852 /* Calculate base mss without TCP options: 853 It is MMS_S - sizeof(tcphdr) of rfc1122 854 */ 855 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 856 857 /* Clamp it (mss_clamp does not include tcp options) */ 858 if (mss_now > tp->rx_opt.mss_clamp) 859 mss_now = tp->rx_opt.mss_clamp; 860 861 /* Now subtract optional transport overhead */ 862 mss_now -= icsk->icsk_ext_hdr_len; 863 864 /* Then reserve room for full set of TCP options and 8 bytes of data */ 865 if (mss_now < 48) 866 mss_now = 48; 867 868 /* Now subtract TCP options size, not including SACKs */ 869 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 870 871 return mss_now; 872} 873 874/* Inverse of above */ 875int tcp_mss_to_mtu(struct sock *sk, int mss) 876{ 877 struct tcp_sock *tp = tcp_sk(sk); 878 struct inet_connection_sock *icsk = inet_csk(sk); 879 int mtu; 880 881 mtu = mss + 882 tp->tcp_header_len + 883 icsk->icsk_ext_hdr_len + 884 icsk->icsk_af_ops->net_header_len; 885 886 return mtu; 887} 888 889void tcp_mtup_init(struct sock *sk) 890{ 891 struct tcp_sock *tp = tcp_sk(sk); 892 struct inet_connection_sock *icsk = inet_csk(sk); 893 894 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 895 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 896 icsk->icsk_af_ops->net_header_len; 897 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 898 icsk->icsk_mtup.probe_size = 0; 899} 900 901/* This function synchronize snd mss to current pmtu/exthdr set. 902 903 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 904 for TCP options, but includes only bare TCP header. 905 906 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 907 It is minimum of user_mss and mss received with SYN. 908 It also does not include TCP options. 909 910 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 911 912 tp->mss_cache is current effective sending mss, including 913 all tcp options except for SACKs. It is evaluated, 914 taking into account current pmtu, but never exceeds 915 tp->rx_opt.mss_clamp. 916 917 NOTE1. rfc1122 clearly states that advertised MSS 918 DOES NOT include either tcp or ip options. 919 920 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 921 are READ ONLY outside this function. --ANK (980731) 922 */ 923 924unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 925{ 926 struct tcp_sock *tp = tcp_sk(sk); 927 struct inet_connection_sock *icsk = inet_csk(sk); 928 int mss_now; 929 930 if (icsk->icsk_mtup.search_high > pmtu) 931 icsk->icsk_mtup.search_high = pmtu; 932 933 mss_now = tcp_mtu_to_mss(sk, pmtu); 934 935 /* Bound mss with half of window */ 936 if (tp->max_window && mss_now > (tp->max_window>>1)) 937 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 938 939 /* And store cached results */ 940 icsk->icsk_pmtu_cookie = pmtu; 941 if (icsk->icsk_mtup.enabled) 942 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 943 tp->mss_cache = mss_now; 944 945 return mss_now; 946} 947 948/* Compute the current effective MSS, taking SACKs and IP options, 949 * and even PMTU discovery events into account. 950 * 951 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 952 * cannot be large. However, taking into account rare use of URG, this 953 * is not a big flaw. 954 */ 955unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 956{ 957 struct tcp_sock *tp = tcp_sk(sk); 958 struct dst_entry *dst = __sk_dst_get(sk); 959 u32 mss_now; 960 u16 xmit_size_goal; 961 int doing_tso = 0; 962 963 mss_now = tp->mss_cache; 964 965 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) 966 doing_tso = 1; 967 968 if (dst) { 969 u32 mtu = dst_mtu(dst); 970 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 971 mss_now = tcp_sync_mss(sk, mtu); 972 } 973 974 if (tp->rx_opt.eff_sacks) 975 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 976 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 977 978#ifdef CONFIG_TCP_MD5SIG 979 if (tp->af_specific->md5_lookup(sk, sk)) 980 mss_now -= TCPOLEN_MD5SIG_ALIGNED; 981#endif 982 983 xmit_size_goal = mss_now; 984 985 if (doing_tso) { 986 xmit_size_goal = (65535 - 987 inet_csk(sk)->icsk_af_ops->net_header_len - 988 inet_csk(sk)->icsk_ext_hdr_len - 989 tp->tcp_header_len); 990 991 if (tp->max_window && 992 (xmit_size_goal > (tp->max_window >> 1))) 993 xmit_size_goal = max((tp->max_window >> 1), 994 68U - tp->tcp_header_len); 995 996 xmit_size_goal -= (xmit_size_goal % mss_now); 997 } 998 tp->xmit_size_goal = xmit_size_goal; 999 1000 return mss_now; 1001} 1002 1003/* Congestion window validation. (RFC2861) */ 1004 1005static void tcp_cwnd_validate(struct sock *sk) 1006{ 1007 struct tcp_sock *tp = tcp_sk(sk); 1008 __u32 packets_out = tp->packets_out; 1009 1010 if (packets_out >= tp->snd_cwnd) { 1011 /* Network is feed fully. */ 1012 tp->snd_cwnd_used = 0; 1013 tp->snd_cwnd_stamp = tcp_time_stamp; 1014 } else { 1015 /* Network starves. */ 1016 if (tp->packets_out > tp->snd_cwnd_used) 1017 tp->snd_cwnd_used = tp->packets_out; 1018 1019 if (sysctl_tcp_slow_start_after_idle && 1020 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1021 tcp_cwnd_application_limited(sk); 1022 } 1023} 1024 1025static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 1026{ 1027 u32 window, cwnd_len; 1028 1029 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 1030 cwnd_len = mss_now * cwnd; 1031 return min(window, cwnd_len); 1032} 1033 1034/* Can at least one segment of SKB be sent right now, according to the 1035 * congestion window rules? If so, return how many segments are allowed. 1036 */ 1037static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 1038{ 1039 u32 in_flight, cwnd; 1040 1041 /* Don't be strict about the congestion window for the final FIN. */ 1042 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1043 tcp_skb_pcount(skb) == 1) 1044 return 1; 1045 1046 in_flight = tcp_packets_in_flight(tp); 1047 cwnd = tp->snd_cwnd; 1048 if (in_flight < cwnd) 1049 return (cwnd - in_flight); 1050 1051 return 0; 1052} 1053 1054/* This must be invoked the first time we consider transmitting 1055 * SKB onto the wire. 1056 */ 1057static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 1058{ 1059 int tso_segs = tcp_skb_pcount(skb); 1060 1061 if (!tso_segs || 1062 (tso_segs > 1 && 1063 tcp_skb_mss(skb) != mss_now)) { 1064 tcp_set_skb_tso_segs(sk, skb, mss_now); 1065 tso_segs = tcp_skb_pcount(skb); 1066 } 1067 return tso_segs; 1068} 1069 1070static inline int tcp_minshall_check(const struct tcp_sock *tp) 1071{ 1072 return after(tp->snd_sml,tp->snd_una) && 1073 !after(tp->snd_sml, tp->snd_nxt); 1074} 1075 1076/* Return 0, if packet can be sent now without violation Nagle's rules: 1077 * 1. It is full sized. 1078 * 2. Or it contains FIN. (already checked by caller) 1079 * 3. Or TCP_NODELAY was set. 1080 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1081 * With Minshall's modification: all sent small packets are ACKed. 1082 */ 1083 1084static inline int tcp_nagle_check(const struct tcp_sock *tp, 1085 const struct sk_buff *skb, 1086 unsigned mss_now, int nonagle) 1087{ 1088 return (skb->len < mss_now && 1089 ((nonagle&TCP_NAGLE_CORK) || 1090 (!nonagle && 1091 tp->packets_out && 1092 tcp_minshall_check(tp)))); 1093} 1094 1095/* Return non-zero if the Nagle test allows this packet to be 1096 * sent now. 1097 */ 1098static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1099 unsigned int cur_mss, int nonagle) 1100{ 1101 /* Nagle rule does not apply to frames, which sit in the middle of the 1102 * write_queue (they have no chances to get new data). 1103 * 1104 * This is implemented in the callers, where they modify the 'nonagle' 1105 * argument based upon the location of SKB in the send queue. 1106 */ 1107 if (nonagle & TCP_NAGLE_PUSH) 1108 return 1; 1109 1110 /* Don't use the nagle rule for urgent data (or for the final FIN). 1111 * Nagle can be ignored during F-RTO too (see RFC4138). 1112 */ 1113 if (tp->urg_mode || (tp->frto_counter == 2) || 1114 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1115 return 1; 1116 1117 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1118 return 1; 1119 1120 return 0; 1121} 1122 1123/* Does at least the first segment of SKB fit into the send window? */ 1124static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 1125{ 1126 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1127 1128 if (skb->len > cur_mss) 1129 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1130 1131 return !after(end_seq, tp->snd_una + tp->snd_wnd); 1132} 1133 1134/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1135 * should be put on the wire right now. If so, it returns the number of 1136 * packets allowed by the congestion window. 1137 */ 1138static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1139 unsigned int cur_mss, int nonagle) 1140{ 1141 struct tcp_sock *tp = tcp_sk(sk); 1142 unsigned int cwnd_quota; 1143 1144 tcp_init_tso_segs(sk, skb, cur_mss); 1145 1146 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1147 return 0; 1148 1149 cwnd_quota = tcp_cwnd_test(tp, skb); 1150 if (cwnd_quota && 1151 !tcp_snd_wnd_test(tp, skb, cur_mss)) 1152 cwnd_quota = 0; 1153 1154 return cwnd_quota; 1155} 1156 1157int tcp_may_send_now(struct sock *sk) 1158{ 1159 struct tcp_sock *tp = tcp_sk(sk); 1160 struct sk_buff *skb = tcp_send_head(sk); 1161 1162 return (skb && 1163 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1164 (tcp_skb_is_last(sk, skb) ? 1165 TCP_NAGLE_PUSH : 1166 tp->nonagle))); 1167} 1168 1169/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1170 * which is put after SKB on the list. It is very much like 1171 * tcp_fragment() except that it may make several kinds of assumptions 1172 * in order to speed up the splitting operation. In particular, we 1173 * know that all the data is in scatter-gather pages, and that the 1174 * packet has never been sent out before (and thus is not cloned). 1175 */ 1176static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 1177{ 1178 struct sk_buff *buff; 1179 int nlen = skb->len - len; 1180 u16 flags; 1181 1182 /* All of a TSO frame must be composed of paged data. */ 1183 if (skb->len != skb->data_len) 1184 return tcp_fragment(sk, skb, len, mss_now); 1185 1186 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 1187 if (unlikely(buff == NULL)) 1188 return -ENOMEM; 1189 1190 sk_charge_skb(sk, buff); 1191 buff->truesize += nlen; 1192 skb->truesize -= nlen; 1193 1194 /* Correct the sequence numbers. */ 1195 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1196 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1197 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1198 1199 /* PSH and FIN should only be set in the second packet. */ 1200 flags = TCP_SKB_CB(skb)->flags; 1201 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1202 TCP_SKB_CB(buff)->flags = flags; 1203 1204 /* This packet was never sent out yet, so no SACK bits. */ 1205 TCP_SKB_CB(buff)->sacked = 0; 1206 1207 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1208 skb_split(skb, buff, len); 1209 1210 /* Fix up tso_factor for both original and new SKB. */ 1211 tcp_set_skb_tso_segs(sk, skb, mss_now); 1212 tcp_set_skb_tso_segs(sk, buff, mss_now); 1213 1214 /* Link BUFF into the send queue. */ 1215 skb_header_release(buff); 1216 tcp_insert_write_queue_after(skb, buff, sk); 1217 1218 return 0; 1219} 1220 1221/* Try to defer sending, if possible, in order to minimize the amount 1222 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1223 * 1224 * This algorithm is from John Heffner. 1225 */ 1226static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1227{ 1228 struct tcp_sock *tp = tcp_sk(sk); 1229 const struct inet_connection_sock *icsk = inet_csk(sk); 1230 u32 send_win, cong_win, limit, in_flight; 1231 1232 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1233 goto send_now; 1234 1235 if (icsk->icsk_ca_state != TCP_CA_Open) 1236 goto send_now; 1237 1238 /* Defer for less than two clock ticks. */ 1239 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1) 1240 goto send_now; 1241 1242 in_flight = tcp_packets_in_flight(tp); 1243 1244 BUG_ON(tcp_skb_pcount(skb) <= 1 || 1245 (tp->snd_cwnd <= in_flight)); 1246 1247 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 1248 1249 /* From in_flight test above, we know that cwnd > in_flight. */ 1250 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1251 1252 limit = min(send_win, cong_win); 1253 1254 /* If a full-sized TSO skb can be sent, do it. */ 1255 if (limit >= 65536) 1256 goto send_now; 1257 1258 if (sysctl_tcp_tso_win_divisor) { 1259 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1260 1261 /* If at least some fraction of a window is available, 1262 * just use it. 1263 */ 1264 chunk /= sysctl_tcp_tso_win_divisor; 1265 if (limit >= chunk) 1266 goto send_now; 1267 } else { 1268 /* Different approach, try not to defer past a single 1269 * ACK. Receiver should ACK every other full sized 1270 * frame, so if we have space for more than 3 frames 1271 * then send now. 1272 */ 1273 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1274 goto send_now; 1275 } 1276 1277 /* Ok, it looks like it is advisable to defer. */ 1278 tp->tso_deferred = 1 | (jiffies<<1); 1279 1280 return 1; 1281 1282send_now: 1283 tp->tso_deferred = 0; 1284 return 0; 1285} 1286 1287/* Create a new MTU probe if we are ready. 1288 * Returns 0 if we should wait to probe (no cwnd available), 1289 * 1 if a probe was sent, 1290 * -1 otherwise */ 1291static int tcp_mtu_probe(struct sock *sk) 1292{ 1293 struct tcp_sock *tp = tcp_sk(sk); 1294 struct inet_connection_sock *icsk = inet_csk(sk); 1295 struct sk_buff *skb, *nskb, *next; 1296 int len; 1297 int probe_size; 1298 unsigned int pif; 1299 int copy; 1300 int mss_now; 1301 1302 /* Not currently probing/verifying, 1303 * not in recovery, 1304 * have enough cwnd, and 1305 * not SACKing (the variable headers throw things off) */ 1306 if (!icsk->icsk_mtup.enabled || 1307 icsk->icsk_mtup.probe_size || 1308 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1309 tp->snd_cwnd < 11 || 1310 tp->rx_opt.eff_sacks) 1311 return -1; 1312 1313 /* Very simple search strategy: just double the MSS. */ 1314 mss_now = tcp_current_mss(sk, 0); 1315 probe_size = 2*tp->mss_cache; 1316 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1317 /* TODO: set timer for probe_converge_event */ 1318 return -1; 1319 } 1320 1321 /* Have enough data in the send queue to probe? */ 1322 len = 0; 1323 if ((skb = tcp_send_head(sk)) == NULL) 1324 return -1; 1325 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) 1326 skb = tcp_write_queue_next(sk, skb); 1327 if (len < probe_size) 1328 return -1; 1329 1330 /* Receive window check. */ 1331 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { 1332 if (tp->snd_wnd < probe_size) 1333 return -1; 1334 else 1335 return 0; 1336 } 1337 1338 /* Do we need to wait to drain cwnd? */ 1339 pif = tcp_packets_in_flight(tp); 1340 if (pif + 2 > tp->snd_cwnd) { 1341 /* With no packets in flight, don't stall. */ 1342 if (pif == 0) 1343 return -1; 1344 else 1345 return 0; 1346 } 1347 1348 /* We're allowed to probe. Build it now. */ 1349 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1350 return -1; 1351 sk_charge_skb(sk, nskb); 1352 1353 skb = tcp_send_head(sk); 1354 tcp_insert_write_queue_before(nskb, skb, sk); 1355 tcp_advance_send_head(sk, skb); 1356 1357 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1358 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1359 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1360 TCP_SKB_CB(nskb)->sacked = 0; 1361 nskb->csum = 0; 1362 nskb->ip_summed = skb->ip_summed; 1363 1364 len = 0; 1365 while (len < probe_size) { 1366 next = tcp_write_queue_next(sk, skb); 1367 1368 copy = min_t(int, skb->len, probe_size - len); 1369 if (nskb->ip_summed) 1370 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1371 else 1372 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1373 skb_put(nskb, copy), copy, nskb->csum); 1374 1375 if (skb->len <= copy) { 1376 /* We've eaten all the data from this skb. 1377 * Throw it away. */ 1378 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1379 tcp_unlink_write_queue(skb, sk); 1380 sk_stream_free_skb(sk, skb); 1381 } else { 1382 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1383 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1384 if (!skb_shinfo(skb)->nr_frags) { 1385 skb_pull(skb, copy); 1386 if (skb->ip_summed != CHECKSUM_PARTIAL) 1387 skb->csum = csum_partial(skb->data, skb->len, 0); 1388 } else { 1389 __pskb_trim_head(skb, copy); 1390 tcp_set_skb_tso_segs(sk, skb, mss_now); 1391 } 1392 TCP_SKB_CB(skb)->seq += copy; 1393 } 1394 1395 len += copy; 1396 skb = next; 1397 } 1398 tcp_init_tso_segs(sk, nskb, nskb->len); 1399 1400 /* We're ready to send. If this fails, the probe will 1401 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1402 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1403 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1404 /* Decrement cwnd here because we are sending 1405 * effectively two packets. */ 1406 tp->snd_cwnd--; 1407 update_send_head(sk, nskb); 1408 1409 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1410 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1411 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1412 1413 return 1; 1414 } 1415 1416 return -1; 1417} 1418 1419 1420/* This routine writes packets to the network. It advances the 1421 * send_head. This happens as incoming acks open up the remote 1422 * window for us. 1423 * 1424 * Returns 1, if no segments are in flight and we have queued segments, but 1425 * cannot send anything now because of SWS or another problem. 1426 */ 1427static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 1428{ 1429 struct tcp_sock *tp = tcp_sk(sk); 1430 struct sk_buff *skb; 1431 unsigned int tso_segs, sent_pkts; 1432 int cwnd_quota; 1433 int result; 1434 1435 /* If we are closed, the bytes will have to remain here. 1436 * In time closedown will finish, we empty the write queue and all 1437 * will be happy. 1438 */ 1439 if (unlikely(sk->sk_state == TCP_CLOSE)) 1440 return 0; 1441 1442 sent_pkts = 0; 1443 1444 /* Do MTU probing. */ 1445 if ((result = tcp_mtu_probe(sk)) == 0) { 1446 return 0; 1447 } else if (result > 0) { 1448 sent_pkts = 1; 1449 } 1450 1451 while ((skb = tcp_send_head(sk))) { 1452 unsigned int limit; 1453 1454 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1455 BUG_ON(!tso_segs); 1456 1457 cwnd_quota = tcp_cwnd_test(tp, skb); 1458 if (!cwnd_quota) 1459 break; 1460 1461 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1462 break; 1463 1464 if (tso_segs == 1) { 1465 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1466 (tcp_skb_is_last(sk, skb) ? 1467 nonagle : TCP_NAGLE_PUSH)))) 1468 break; 1469 } else { 1470 if (tcp_tso_should_defer(sk, skb)) 1471 break; 1472 } 1473 1474 limit = mss_now; 1475 if (tso_segs > 1) { 1476 limit = tcp_window_allows(tp, skb, 1477 mss_now, cwnd_quota); 1478 1479 if (skb->len < limit) { 1480 unsigned int trim = skb->len % mss_now; 1481 1482 if (trim) 1483 limit = skb->len - trim; 1484 } 1485 } 1486 1487 if (skb->len > limit && 1488 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1489 break; 1490 1491 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1492 1493 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) 1494 break; 1495 1496 /* Advance the send_head. This one is sent out. 1497 * This call will increment packets_out. 1498 */ 1499 update_send_head(sk, skb); 1500 1501 tcp_minshall_update(tp, mss_now, skb); 1502 sent_pkts++; 1503 } 1504 1505 if (likely(sent_pkts)) { 1506 tcp_cwnd_validate(sk); 1507 return 0; 1508 } 1509 return !tp->packets_out && tcp_send_head(sk); 1510} 1511 1512/* Push out any pending frames which were held back due to 1513 * TCP_CORK or attempt at coalescing tiny packets. 1514 * The socket must be locked by the caller. 1515 */ 1516void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1517 int nonagle) 1518{ 1519 struct sk_buff *skb = tcp_send_head(sk); 1520 1521 if (skb) { 1522 if (tcp_write_xmit(sk, cur_mss, nonagle)) 1523 tcp_check_probe_timer(sk); 1524 } 1525} 1526 1527/* Send _single_ skb sitting at the send head. This function requires 1528 * true push pending frames to setup probe timer etc. 1529 */ 1530void tcp_push_one(struct sock *sk, unsigned int mss_now) 1531{ 1532 struct tcp_sock *tp = tcp_sk(sk); 1533 struct sk_buff *skb = tcp_send_head(sk); 1534 unsigned int tso_segs, cwnd_quota; 1535 1536 BUG_ON(!skb || skb->len < mss_now); 1537 1538 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1539 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1540 1541 if (likely(cwnd_quota)) { 1542 unsigned int limit; 1543 1544 BUG_ON(!tso_segs); 1545 1546 limit = mss_now; 1547 if (tso_segs > 1) { 1548 limit = tcp_window_allows(tp, skb, 1549 mss_now, cwnd_quota); 1550 1551 if (skb->len < limit) { 1552 unsigned int trim = skb->len % mss_now; 1553 1554 if (trim) 1555 limit = skb->len - trim; 1556 } 1557 } 1558 1559 if (skb->len > limit && 1560 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1561 return; 1562 1563 /* Send it out now. */ 1564 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1565 1566 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 1567 update_send_head(sk, skb); 1568 tcp_cwnd_validate(sk); 1569 return; 1570 } 1571 } 1572} 1573 1574/* This function returns the amount that we can raise the 1575 * usable window based on the following constraints 1576 * 1577 * 1. The window can never be shrunk once it is offered (RFC 793) 1578 * 2. We limit memory per socket 1579 * 1580 * RFC 1122: 1581 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1582 * RECV.NEXT + RCV.WIN fixed until: 1583 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1584 * 1585 * i.e. don't raise the right edge of the window until you can raise 1586 * it at least MSS bytes. 1587 * 1588 * Unfortunately, the recommended algorithm breaks header prediction, 1589 * since header prediction assumes th->window stays fixed. 1590 * 1591 * Strictly speaking, keeping th->window fixed violates the receiver 1592 * side SWS prevention criteria. The problem is that under this rule 1593 * a stream of single byte packets will cause the right side of the 1594 * window to always advance by a single byte. 1595 * 1596 * Of course, if the sender implements sender side SWS prevention 1597 * then this will not be a problem. 1598 * 1599 * BSD seems to make the following compromise: 1600 * 1601 * If the free space is less than the 1/4 of the maximum 1602 * space available and the free space is less than 1/2 mss, 1603 * then set the window to 0. 1604 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1605 * Otherwise, just prevent the window from shrinking 1606 * and from being larger than the largest representable value. 1607 * 1608 * This prevents incremental opening of the window in the regime 1609 * where TCP is limited by the speed of the reader side taking 1610 * data out of the TCP receive queue. It does nothing about 1611 * those cases where the window is constrained on the sender side 1612 * because the pipeline is full. 1613 * 1614 * BSD also seems to "accidentally" limit itself to windows that are a 1615 * multiple of MSS, at least until the free space gets quite small. 1616 * This would appear to be a side effect of the mbuf implementation. 1617 * Combining these two algorithms results in the observed behavior 1618 * of having a fixed window size at almost all times. 1619 * 1620 * Below we obtain similar behavior by forcing the offered window to 1621 * a multiple of the mss when it is feasible to do so. 1622 * 1623 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1624 * Regular options like TIMESTAMP are taken into account. 1625 */ 1626u32 __tcp_select_window(struct sock *sk) 1627{ 1628 struct inet_connection_sock *icsk = inet_csk(sk); 1629 struct tcp_sock *tp = tcp_sk(sk); 1630 /* MSS for the peer's data. Previous versions used mss_clamp 1631 * here. I don't know if the value based on our guesses 1632 * of peer's MSS is better for the performance. It's more correct 1633 * but may be worse for the performance because of rcv_mss 1634 * fluctuations. --SAW 1998/11/1 1635 */ 1636 int mss = icsk->icsk_ack.rcv_mss; 1637 int free_space = tcp_space(sk); 1638 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1639 int window; 1640 1641 if (mss > full_space) 1642 mss = full_space; 1643 1644 if (free_space < full_space/2) { 1645 icsk->icsk_ack.quick = 0; 1646 1647 if (tcp_memory_pressure) 1648 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 1649 1650 if (free_space < mss) 1651 return 0; 1652 } 1653 1654 if (free_space > tp->rcv_ssthresh) 1655 free_space = tp->rcv_ssthresh; 1656 1657 /* Don't do rounding if we are using window scaling, since the 1658 * scaled window will not line up with the MSS boundary anyway. 1659 */ 1660 window = tp->rcv_wnd; 1661 if (tp->rx_opt.rcv_wscale) { 1662 window = free_space; 1663 1664 /* Advertise enough space so that it won't get scaled away. 1665 * Import case: prevent zero window announcement if 1666 * 1<<rcv_wscale > mss. 1667 */ 1668 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1669 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1670 << tp->rx_opt.rcv_wscale); 1671 } else { 1672 /* Get the largest window that is a nice multiple of mss. 1673 * Window clamp already applied above. 1674 * If our current window offering is within 1 mss of the 1675 * free space we just keep it. This prevents the divide 1676 * and multiply from happening most of the time. 1677 * We also don't do any window rounding when the free space 1678 * is too small. 1679 */ 1680 if (window <= free_space - mss || window > free_space) 1681 window = (free_space/mss)*mss; 1682 else if (mss == full_space && 1683 free_space > window + full_space/2) 1684 window = free_space; 1685 } 1686 1687 return window; 1688} 1689 1690/* Attempt to collapse two adjacent SKB's during retransmission. */ 1691static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 1692{ 1693 struct tcp_sock *tp = tcp_sk(sk); 1694 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1695 1696 /* The first test we must make is that neither of these two 1697 * SKB's are still referenced by someone else. 1698 */ 1699 if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 1700 int skb_size = skb->len, next_skb_size = next_skb->len; 1701 u16 flags = TCP_SKB_CB(skb)->flags; 1702 1703 /* Also punt if next skb has been SACK'd. */ 1704 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 1705 return; 1706 1707 /* Next skb is out of window. */ 1708 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 1709 return; 1710 1711 /* Punt if not enough space exists in the first SKB for 1712 * the data in the second, or the total combined payload 1713 * would exceed the MSS. 1714 */ 1715 if ((next_skb_size > skb_tailroom(skb)) || 1716 ((skb_size + next_skb_size) > mss_now)) 1717 return; 1718 1719 BUG_ON(tcp_skb_pcount(skb) != 1 || 1720 tcp_skb_pcount(next_skb) != 1); 1721 1722 if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out && 1723 (TCP_SKB_CB(next_skb)->seq == tp->highest_sack))) 1724 return; 1725 1726 /* Ok. We will be able to collapse the packet. */ 1727 tcp_unlink_write_queue(next_skb, sk); 1728 1729 skb_copy_from_linear_data(next_skb, 1730 skb_put(skb, next_skb_size), 1731 next_skb_size); 1732 1733 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1734 skb->ip_summed = CHECKSUM_PARTIAL; 1735 1736 if (skb->ip_summed != CHECKSUM_PARTIAL) 1737 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1738 1739 /* Update sequence range on original skb. */ 1740 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1741 1742 /* Merge over control information. */ 1743 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 1744 TCP_SKB_CB(skb)->flags = flags; 1745 1746 /* All done, get rid of second SKB and account for it so 1747 * packet counting does not break. 1748 */ 1749 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 1750 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 1751 tp->retrans_out -= tcp_skb_pcount(next_skb); 1752 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) 1753 tp->lost_out -= tcp_skb_pcount(next_skb); 1754 /* Reno case is special. Sigh... */ 1755 if (tcp_is_reno(tp) && tp->sacked_out) 1756 tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 1757 1758 tcp_adjust_fackets_out(tp, next_skb, tcp_skb_pcount(next_skb)); 1759 tp->packets_out -= tcp_skb_pcount(next_skb); 1760 1761 /* changed transmit queue under us so clear hints */ 1762 tcp_clear_retrans_hints_partial(tp); 1763 /* manually tune sacktag skb hint */ 1764 if (tp->fastpath_skb_hint == next_skb) { 1765 tp->fastpath_skb_hint = skb; 1766 tp->fastpath_cnt_hint -= tcp_skb_pcount(skb); 1767 } 1768 1769 sk_stream_free_skb(sk, next_skb); 1770 } 1771} 1772 1773/* Do a simple retransmit without using the backoff mechanisms in 1774 * tcp_timer. This is used for path mtu discovery. 1775 * The socket is already locked here. 1776 */ 1777void tcp_simple_retransmit(struct sock *sk) 1778{ 1779 const struct inet_connection_sock *icsk = inet_csk(sk); 1780 struct tcp_sock *tp = tcp_sk(sk); 1781 struct sk_buff *skb; 1782 unsigned int mss = tcp_current_mss(sk, 0); 1783 int lost = 0; 1784 1785 tcp_for_write_queue(skb, sk) { 1786 if (skb == tcp_send_head(sk)) 1787 break; 1788 if (skb->len > mss && 1789 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 1790 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1791 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1792 tp->retrans_out -= tcp_skb_pcount(skb); 1793 } 1794 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 1795 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1796 tp->lost_out += tcp_skb_pcount(skb); 1797 lost = 1; 1798 } 1799 } 1800 } 1801 1802 tcp_clear_all_retrans_hints(tp); 1803 1804 if (!lost) 1805 return; 1806 1807 tcp_verify_left_out(tp); 1808 1809 /* Don't muck with the congestion window here. 1810 * Reason is that we do not increase amount of _data_ 1811 * in network, but units changed and effective 1812 * cwnd/ssthresh really reduced now. 1813 */ 1814 if (icsk->icsk_ca_state != TCP_CA_Loss) { 1815 tp->high_seq = tp->snd_nxt; 1816 tp->snd_ssthresh = tcp_current_ssthresh(sk); 1817 tp->prior_ssthresh = 0; 1818 tp->undo_marker = 0; 1819 tcp_set_ca_state(sk, TCP_CA_Loss); 1820 } 1821 tcp_xmit_retransmit_queue(sk); 1822} 1823 1824/* This retransmits one SKB. Policy decisions and retransmit queue 1825 * state updates are done by the caller. Returns non-zero if an 1826 * error occurred which prevented the send. 1827 */ 1828int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1829{ 1830 struct tcp_sock *tp = tcp_sk(sk); 1831 struct inet_connection_sock *icsk = inet_csk(sk); 1832 unsigned int cur_mss = tcp_current_mss(sk, 0); 1833 int err; 1834 1835 /* Inconslusive MTU probe */ 1836 if (icsk->icsk_mtup.probe_size) { 1837 icsk->icsk_mtup.probe_size = 0; 1838 } 1839 1840 /* Do not sent more than we queued. 1/4 is reserved for possible 1841 * copying overhead: fragmentation, tunneling, mangling etc. 1842 */ 1843 if (atomic_read(&sk->sk_wmem_alloc) > 1844 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1845 return -EAGAIN; 1846 1847 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1848 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1849 BUG(); 1850 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1851 return -ENOMEM; 1852 } 1853 1854 /* If receiver has shrunk his window, and skb is out of 1855 * new window, do not retransmit it. The exception is the 1856 * case, when window is shrunk to zero. In this case 1857 * our retransmit serves as a zero window probe. 1858 */ 1859 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 1860 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1861 return -EAGAIN; 1862 1863 if (skb->len > cur_mss) { 1864 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1865 return -ENOMEM; /* We'll try again later. */ 1866 } 1867 1868 /* Collapse two adjacent packets if worthwhile and we can. */ 1869 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1870 (skb->len < (cur_mss >> 1)) && 1871 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && 1872 (!tcp_skb_is_last(sk, skb)) && 1873 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1874 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && 1875 (sysctl_tcp_retrans_collapse != 0)) 1876 tcp_retrans_try_collapse(sk, skb, cur_mss); 1877 1878 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1879 return -EHOSTUNREACH; /* Routing failure or similar. */ 1880 1881 /* Some Solaris stacks overoptimize and ignore the FIN on a 1882 * retransmit when old data is attached. So strip it off 1883 * since it is cheap to do so and saves bytes on the network. 1884 */ 1885 if (skb->len > 0 && 1886 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1887 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1888 if (!pskb_trim(skb, 0)) { 1889 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 1890 skb_shinfo(skb)->gso_segs = 1; 1891 skb_shinfo(skb)->gso_size = 0; 1892 skb_shinfo(skb)->gso_type = 0; 1893 skb->ip_summed = CHECKSUM_NONE; 1894 skb->csum = 0; 1895 } 1896 } 1897 1898 /* Make a copy, if the first transmission SKB clone we made 1899 * is still in somebody's hands, else make a clone. 1900 */ 1901 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1902 1903 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1904 1905 if (err == 0) { 1906 /* Update global TCP statistics. */ 1907 TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 1908 1909 tp->total_retrans++; 1910 1911#if FASTRETRANS_DEBUG > 0 1912 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1913 if (net_ratelimit()) 1914 printk(KERN_DEBUG "retrans_out leaked.\n"); 1915 } 1916#endif 1917 if (!tp->retrans_out) 1918 tp->lost_retrans_low = tp->snd_nxt; 1919 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1920 tp->retrans_out += tcp_skb_pcount(skb); 1921 1922 /* Save stamp of the first retransmit. */ 1923 if (!tp->retrans_stamp) 1924 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1925 1926 tp->undo_retrans++; 1927 1928 /* snd_nxt is stored to detect loss of retransmitted segment, 1929 * see tcp_input.c tcp_sacktag_write_queue(). 1930 */ 1931 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1932 } 1933 return err; 1934} 1935 1936/* This gets called after a retransmit timeout, and the initially 1937 * retransmitted data is acknowledged. It tries to continue 1938 * resending the rest of the retransmit queue, until either 1939 * we've sent it all or the congestion window limit is reached. 1940 * If doing SACK, the first ACK which comes back for a timeout 1941 * based retransmit packet might feed us FACK information again. 1942 * If so, we use it to avoid unnecessarily retransmissions. 1943 */ 1944void tcp_xmit_retransmit_queue(struct sock *sk) 1945{ 1946 const struct inet_connection_sock *icsk = inet_csk(sk); 1947 struct tcp_sock *tp = tcp_sk(sk); 1948 struct sk_buff *skb; 1949 int packet_cnt; 1950 1951 if (tp->retransmit_skb_hint) { 1952 skb = tp->retransmit_skb_hint; 1953 packet_cnt = tp->retransmit_cnt_hint; 1954 }else{ 1955 skb = tcp_write_queue_head(sk); 1956 packet_cnt = 0; 1957 } 1958 1959 /* First pass: retransmit lost packets. */ 1960 if (tp->lost_out) { 1961 tcp_for_write_queue_from(skb, sk) { 1962 __u8 sacked = TCP_SKB_CB(skb)->sacked; 1963 1964 if (skb == tcp_send_head(sk)) 1965 break; 1966 /* we could do better than to assign each time */ 1967 tp->retransmit_skb_hint = skb; 1968 tp->retransmit_cnt_hint = packet_cnt; 1969 1970 /* Assume this retransmit will generate 1971 * only one packet for congestion window 1972 * calculation purposes. This works because 1973 * tcp_retransmit_skb() will chop up the 1974 * packet to be MSS sized and all the 1975 * packet counting works out. 1976 */ 1977 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 1978 return; 1979 1980 if (sacked & TCPCB_LOST) { 1981 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1982 if (tcp_retransmit_skb(sk, skb)) { 1983 tp->retransmit_skb_hint = NULL; 1984 return; 1985 } 1986 if (icsk->icsk_ca_state != TCP_CA_Loss) 1987 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1988 else 1989 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1990 1991 if (skb == tcp_write_queue_head(sk)) 1992 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1993 inet_csk(sk)->icsk_rto, 1994 TCP_RTO_MAX); 1995 } 1996 1997 packet_cnt += tcp_skb_pcount(skb); 1998 if (packet_cnt >= tp->lost_out) 1999 break; 2000 } 2001 } 2002 } 2003 2004 /* OK, demanded retransmission is finished. */ 2005 2006 /* Forward retransmissions are possible only during Recovery. */ 2007 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2008 return; 2009 2010 /* No forward retransmissions in Reno are possible. */ 2011 if (tcp_is_reno(tp)) 2012 return; 2013 2014 /* Yeah, we have to make difficult choice between forward transmission 2015 * and retransmission... Both ways have their merits... 2016 * 2017 * For now we do not retransmit anything, while we have some new 2018 * segments to send. In the other cases, follow rule 3 for 2019 * NextSeg() specified in RFC3517. 2020 */ 2021 2022 if (tcp_may_send_now(sk)) 2023 return; 2024 2025 /* If nothing is SACKed, highest_sack in the loop won't be valid */ 2026 if (!tp->sacked_out) 2027 return; 2028 2029 if (tp->forward_skb_hint) 2030 skb = tp->forward_skb_hint; 2031 else 2032 skb = tcp_write_queue_head(sk); 2033 2034 tcp_for_write_queue_from(skb, sk) { 2035 if (skb == tcp_send_head(sk)) 2036 break; 2037 tp->forward_skb_hint = skb; 2038 2039 if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack)) 2040 break; 2041 2042 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2043 break; 2044 2045 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 2046 continue; 2047 2048 /* Ok, retransmit it. */ 2049 if (tcp_retransmit_skb(sk, skb)) { 2050 tp->forward_skb_hint = NULL; 2051 break; 2052 } 2053 2054 if (skb == tcp_write_queue_head(sk)) 2055 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2056 inet_csk(sk)->icsk_rto, 2057 TCP_RTO_MAX); 2058 2059 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 2060 } 2061} 2062 2063 2064/* Send a fin. The caller locks the socket for us. This cannot be 2065 * allowed to fail queueing a FIN frame under any circumstances. 2066 */ 2067void tcp_send_fin(struct sock *sk) 2068{ 2069 struct tcp_sock *tp = tcp_sk(sk); 2070 struct sk_buff *skb = tcp_write_queue_tail(sk); 2071 int mss_now; 2072 2073 /* Optimization, tack on the FIN if we have a queue of 2074 * unsent frames. But be careful about outgoing SACKS 2075 * and IP options. 2076 */ 2077 mss_now = tcp_current_mss(sk, 1); 2078 2079 if (tcp_send_head(sk) != NULL) { 2080 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2081 TCP_SKB_CB(skb)->end_seq++; 2082 tp->write_seq++; 2083 } else { 2084 /* Socket is locked, keep trying until memory is available. */ 2085 for (;;) { 2086 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 2087 if (skb) 2088 break; 2089 yield(); 2090 } 2091 2092 /* Reserve space for headers and prepare control bits. */ 2093 skb_reserve(skb, MAX_TCP_HEADER); 2094 skb->csum = 0; 2095 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 2096 TCP_SKB_CB(skb)->sacked = 0; 2097 skb_shinfo(skb)->gso_segs = 1; 2098 skb_shinfo(skb)->gso_size = 0; 2099 skb_shinfo(skb)->gso_type = 0; 2100 2101 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2102 TCP_SKB_CB(skb)->seq = tp->write_seq; 2103 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 2104 tcp_queue_skb(sk, skb); 2105 } 2106 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2107} 2108 2109/* We get here when a process closes a file descriptor (either due to 2110 * an explicit close() or as a byproduct of exit()'ing) and there 2111 * was unread data in the receive queue. This behavior is recommended 2112 * by RFC 2525, section 2.17. -DaveM 2113 */ 2114void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2115{ 2116 struct sk_buff *skb; 2117 2118 /* NOTE: No TCP options attached and we never retransmit this. */ 2119 skb = alloc_skb(MAX_TCP_HEADER, priority); 2120 if (!skb) { 2121 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2122 return; 2123 } 2124 2125 /* Reserve space for headers and prepare control bits. */ 2126 skb_reserve(skb, MAX_TCP_HEADER); 2127 skb->csum = 0; 2128 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 2129 TCP_SKB_CB(skb)->sacked = 0; 2130 skb_shinfo(skb)->gso_segs = 1; 2131 skb_shinfo(skb)->gso_size = 0; 2132 skb_shinfo(skb)->gso_type = 0; 2133 2134 /* Send it off. */ 2135 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk); 2136 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 2137 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2138 if (tcp_transmit_skb(sk, skb, 0, priority)) 2139 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2140} 2141 2142/* WARNING: This routine must only be called when we have already sent 2143 * a SYN packet that crossed the incoming SYN that caused this routine 2144 * to get called. If this assumption fails then the initial rcv_wnd 2145 * and rcv_wscale values will not be correct. 2146 */ 2147int tcp_send_synack(struct sock *sk) 2148{ 2149 struct sk_buff* skb; 2150 2151 skb = tcp_write_queue_head(sk); 2152 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 2153 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2154 return -EFAULT; 2155 } 2156 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 2157 if (skb_cloned(skb)) { 2158 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2159 if (nskb == NULL) 2160 return -ENOMEM; 2161 tcp_unlink_write_queue(skb, sk); 2162 skb_header_release(nskb); 2163 __tcp_add_write_queue_head(sk, nskb); 2164 sk_stream_free_skb(sk, skb); 2165 sk_charge_skb(sk, nskb); 2166 skb = nskb; 2167 } 2168 2169 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2170 TCP_ECN_send_synack(tcp_sk(sk), skb); 2171 } 2172 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2173 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2174} 2175 2176/* 2177 * Prepare a SYN-ACK. 2178 */ 2179struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2180 struct request_sock *req) 2181{ 2182 struct inet_request_sock *ireq = inet_rsk(req); 2183 struct tcp_sock *tp = tcp_sk(sk); 2184 struct tcphdr *th; 2185 int tcp_header_size; 2186 struct sk_buff *skb; 2187#ifdef CONFIG_TCP_MD5SIG 2188 struct tcp_md5sig_key *md5; 2189 __u8 *md5_hash_location; 2190#endif 2191 2192 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2193 if (skb == NULL) 2194 return NULL; 2195 2196 /* Reserve space for headers. */ 2197 skb_reserve(skb, MAX_TCP_HEADER); 2198 2199 skb->dst = dst_clone(dst); 2200 2201 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 2202 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 2203 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 2204 /* SACK_PERM is in the place of NOP NOP of TS */ 2205 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 2206 2207#ifdef CONFIG_TCP_MD5SIG 2208 /* Are we doing MD5 on this segment? If so - make room for it */ 2209 md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 2210 if (md5) 2211 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 2212#endif 2213 skb_push(skb, tcp_header_size); 2214 skb_reset_transport_header(skb); 2215 2216 th = tcp_hdr(skb); 2217 memset(th, 0, sizeof(struct tcphdr)); 2218 th->syn = 1; 2219 th->ack = 1; 2220 TCP_ECN_make_synack(req, th); 2221 th->source = inet_sk(sk)->sport; 2222 th->dest = ireq->rmt_port; 2223 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 2224 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 2225 TCP_SKB_CB(skb)->sacked = 0; 2226 skb_shinfo(skb)->gso_segs = 1; 2227 skb_shinfo(skb)->gso_size = 0; 2228 skb_shinfo(skb)->gso_type = 0; 2229 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2230 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2231 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2232 __u8 rcv_wscale; 2233 /* Set this up on the first call only */ 2234 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2235 /* tcp_full_space because it is guaranteed to be the first packet */ 2236 tcp_select_initial_window(tcp_full_space(sk), 2237 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2238 &req->rcv_wnd, 2239 &req->window_clamp, 2240 ireq->wscale_ok, 2241 &rcv_wscale); 2242 ireq->rcv_wscale = rcv_wscale; 2243 } 2244 2245 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2246 th->window = htons(min(req->rcv_wnd, 65535U)); 2247 2248 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2249 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2250 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2251 TCP_SKB_CB(skb)->when, 2252 req->ts_recent, 2253 ( 2254#ifdef CONFIG_TCP_MD5SIG 2255 md5 ? &md5_hash_location : 2256#endif 2257 NULL) 2258 ); 2259 2260 skb->csum = 0; 2261 th->doff = (tcp_header_size >> 2); 2262 TCP_INC_STATS(TCP_MIB_OUTSEGS); 2263 2264#ifdef CONFIG_TCP_MD5SIG 2265 /* Okay, we have all we need - do the md5 hash if needed */ 2266 if (md5) { 2267 tp->af_specific->calc_md5_hash(md5_hash_location, 2268 md5, 2269 NULL, dst, req, 2270 tcp_hdr(skb), sk->sk_protocol, 2271 skb->len); 2272 } 2273#endif 2274 2275 return skb; 2276} 2277 2278/* 2279 * Do all connect socket setups that can be done AF independent. 2280 */ 2281static void tcp_connect_init(struct sock *sk) 2282{ 2283 struct dst_entry *dst = __sk_dst_get(sk); 2284 struct tcp_sock *tp = tcp_sk(sk); 2285 __u8 rcv_wscale; 2286 2287 /* We'll fix this up when we get a response from the other end. 2288 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2289 */ 2290 tp->tcp_header_len = sizeof(struct tcphdr) + 2291 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2292 2293#ifdef CONFIG_TCP_MD5SIG 2294 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2295 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2296#endif 2297 2298 /* If user gave his TCP_MAXSEG, record it to clamp */ 2299 if (tp->rx_opt.user_mss) 2300 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2301 tp->max_window = 0; 2302 tcp_mtup_init(sk); 2303 tcp_sync_mss(sk, dst_mtu(dst)); 2304 2305 if (!tp->window_clamp) 2306 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2307 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2308 tcp_initialize_rcv_mss(sk); 2309 2310 tcp_select_initial_window(tcp_full_space(sk), 2311 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2312 &tp->rcv_wnd, 2313 &tp->window_clamp, 2314 sysctl_tcp_window_scaling, 2315 &rcv_wscale); 2316 2317 tp->rx_opt.rcv_wscale = rcv_wscale; 2318 tp->rcv_ssthresh = tp->rcv_wnd; 2319 2320 sk->sk_err = 0; 2321 sock_reset_flag(sk, SOCK_DONE); 2322 tp->snd_wnd = 0; 2323 tcp_init_wl(tp, tp->write_seq, 0); 2324 tp->snd_una = tp->write_seq; 2325 tp->snd_sml = tp->write_seq; 2326 tp->rcv_nxt = 0; 2327 tp->rcv_wup = 0; 2328 tp->copied_seq = 0; 2329 2330 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2331 inet_csk(sk)->icsk_retransmits = 0; 2332 tcp_clear_retrans(tp); 2333} 2334 2335/* 2336 * Build a SYN and send it off. 2337 */ 2338int tcp_connect(struct sock *sk) 2339{ 2340 struct tcp_sock *tp = tcp_sk(sk); 2341 struct sk_buff *buff; 2342 2343 tcp_connect_init(sk); 2344 2345 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2346 if (unlikely(buff == NULL)) 2347 return -ENOBUFS; 2348 2349 /* Reserve space for headers. */ 2350 skb_reserve(buff, MAX_TCP_HEADER); 2351 2352 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 2353 TCP_ECN_send_syn(sk, buff); 2354 TCP_SKB_CB(buff)->sacked = 0; 2355 skb_shinfo(buff)->gso_segs = 1; 2356 skb_shinfo(buff)->gso_size = 0; 2357 skb_shinfo(buff)->gso_type = 0; 2358 buff->csum = 0; 2359 tp->snd_nxt = tp->write_seq; 2360 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2361 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2362 2363 /* Send it off. */ 2364 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2365 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2366 skb_header_release(buff); 2367 __tcp_add_write_queue_tail(sk, buff); 2368 sk_charge_skb(sk, buff); 2369 tp->packets_out += tcp_skb_pcount(buff); 2370 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2371 2372 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2373 * in order to make this packet get counted in tcpOutSegs. 2374 */ 2375 tp->snd_nxt = tp->write_seq; 2376 tp->pushed_seq = tp->write_seq; 2377 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2378 2379 /* Timer for repeating the SYN until an answer. */ 2380 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2381 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2382 return 0; 2383} 2384 2385/* Send out a delayed ack, the caller does the policy checking 2386 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2387 * for details. 2388 */ 2389void tcp_send_delayed_ack(struct sock *sk) 2390{ 2391 struct inet_connection_sock *icsk = inet_csk(sk); 2392 int ato = icsk->icsk_ack.ato; 2393 unsigned long timeout; 2394 2395 if (ato > TCP_DELACK_MIN) { 2396 const struct tcp_sock *tp = tcp_sk(sk); 2397 int max_ato = HZ/2; 2398 2399 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2400 max_ato = TCP_DELACK_MAX; 2401 2402 /* Slow path, intersegment interval is "high". */ 2403 2404 /* If some rtt estimate is known, use it to bound delayed ack. 2405 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2406 * directly. 2407 */ 2408 if (tp->srtt) { 2409 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 2410 2411 if (rtt < max_ato) 2412 max_ato = rtt; 2413 } 2414 2415 ato = min(ato, max_ato); 2416 } 2417 2418 /* Stay within the limit we were given */ 2419 timeout = jiffies + ato; 2420 2421 /* Use new timeout only if there wasn't a older one earlier. */ 2422 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2423 /* If delack timer was blocked or is about to expire, 2424 * send ACK now. 2425 */ 2426 if (icsk->icsk_ack.blocked || 2427 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2428 tcp_send_ack(sk); 2429 return; 2430 } 2431 2432 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2433 timeout = icsk->icsk_ack.timeout; 2434 } 2435 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2436 icsk->icsk_ack.timeout = timeout; 2437 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2438} 2439 2440/* This routine sends an ack and also updates the window. */ 2441void tcp_send_ack(struct sock *sk) 2442{ 2443 /* If we have been reset, we may not send again. */ 2444 if (sk->sk_state != TCP_CLOSE) { 2445 struct sk_buff *buff; 2446 2447 /* We are not putting this on the write queue, so 2448 * tcp_transmit_skb() will set the ownership to this 2449 * sock. 2450 */ 2451 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2452 if (buff == NULL) { 2453 inet_csk_schedule_ack(sk); 2454 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2455 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2456 TCP_DELACK_MAX, TCP_RTO_MAX); 2457 return; 2458 } 2459 2460 /* Reserve space for headers and prepare control bits. */ 2461 skb_reserve(buff, MAX_TCP_HEADER); 2462 buff->csum = 0; 2463 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 2464 TCP_SKB_CB(buff)->sacked = 0; 2465 skb_shinfo(buff)->gso_segs = 1; 2466 skb_shinfo(buff)->gso_size = 0; 2467 skb_shinfo(buff)->gso_type = 0; 2468 2469 /* Send it off, this clears delayed acks for us. */ 2470 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); 2471 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2472 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2473 } 2474} 2475 2476/* This routine sends a packet with an out of date sequence 2477 * number. It assumes the other end will try to ack it. 2478 * 2479 * Question: what should we make while urgent mode? 2480 * 4.4BSD forces sending single byte of data. We cannot send 2481 * out of window data, because we have SND.NXT==SND.MAX... 2482 * 2483 * Current solution: to send TWO zero-length segments in urgent mode: 2484 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2485 * out-of-date with SND.UNA-1 to probe window. 2486 */ 2487static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2488{ 2489 struct tcp_sock *tp = tcp_sk(sk); 2490 struct sk_buff *skb; 2491 2492 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2493 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2494 if (skb == NULL) 2495 return -1; 2496 2497 /* Reserve space for headers and set control bits. */ 2498 skb_reserve(skb, MAX_TCP_HEADER); 2499 skb->csum = 0; 2500 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 2501 TCP_SKB_CB(skb)->sacked = urgent; 2502 skb_shinfo(skb)->gso_segs = 1; 2503 skb_shinfo(skb)->gso_size = 0; 2504 skb_shinfo(skb)->gso_type = 0; 2505 2506 /* Use a previous sequence. This should cause the other 2507 * end to send an ack. Don't queue or clone SKB, just 2508 * send it. 2509 */ 2510 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 2511 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 2512 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2513 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2514} 2515 2516int tcp_write_wakeup(struct sock *sk) 2517{ 2518 if (sk->sk_state != TCP_CLOSE) { 2519 struct tcp_sock *tp = tcp_sk(sk); 2520 struct sk_buff *skb; 2521 2522 if ((skb = tcp_send_head(sk)) != NULL && 2523 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 2524 int err; 2525 unsigned int mss = tcp_current_mss(sk, 0); 2526 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 2527 2528 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2529 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2530 2531 /* We are probing the opening of a window 2532 * but the window size is != 0 2533 * must have been a result SWS avoidance ( sender ) 2534 */ 2535 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2536 skb->len > mss) { 2537 seg_size = min(seg_size, mss); 2538 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2539 if (tcp_fragment(sk, skb, seg_size, mss)) 2540 return -1; 2541 } else if (!tcp_skb_pcount(skb)) 2542 tcp_set_skb_tso_segs(sk, skb, mss); 2543 2544 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2545 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2546 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2547 if (!err) { 2548 update_send_head(sk, skb); 2549 } 2550 return err; 2551 } else { 2552 if (tp->urg_mode && 2553 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 2554 tcp_xmit_probe_skb(sk, TCPCB_URG); 2555 return tcp_xmit_probe_skb(sk, 0); 2556 } 2557 } 2558 return -1; 2559} 2560 2561/* A window probe timeout has occurred. If window is not closed send 2562 * a partial packet else a zero probe. 2563 */ 2564void tcp_send_probe0(struct sock *sk) 2565{ 2566 struct inet_connection_sock *icsk = inet_csk(sk); 2567 struct tcp_sock *tp = tcp_sk(sk); 2568 int err; 2569 2570 err = tcp_write_wakeup(sk); 2571 2572 if (tp->packets_out || !tcp_send_head(sk)) { 2573 /* Cancel probe timer, if it is not required. */ 2574 icsk->icsk_probes_out = 0; 2575 icsk->icsk_backoff = 0; 2576 return; 2577 } 2578 2579 if (err <= 0) { 2580 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2581 icsk->icsk_backoff++; 2582 icsk->icsk_probes_out++; 2583 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2584 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2585 TCP_RTO_MAX); 2586 } else { 2587 /* If packet was not sent due to local congestion, 2588 * do not backoff and do not remember icsk_probes_out. 2589 * Let local senders to fight for local resources. 2590 * 2591 * Use accumulated backoff yet. 2592 */ 2593 if (!icsk->icsk_probes_out) 2594 icsk->icsk_probes_out = 1; 2595 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2596 min(icsk->icsk_rto << icsk->icsk_backoff, 2597 TCP_RESOURCE_PROBE_INTERVAL), 2598 TCP_RTO_MAX); 2599 } 2600} 2601 2602EXPORT_SYMBOL(tcp_connect); 2603EXPORT_SYMBOL(tcp_make_synack); 2604EXPORT_SYMBOL(tcp_simple_retransmit); 2605EXPORT_SYMBOL(tcp_sync_mss); 2606EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); 2607EXPORT_SYMBOL(tcp_mtup_init); 2608