tcp_output.c revision 0c54b85f2828128274f319a1eb3ce7f604fe2a53
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21/* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37#include <net/tcp.h> 38 39#include <linux/compiler.h> 40#include <linux/module.h> 41 42/* People can turn this off for buggy TCP's found in printers etc. */ 43int sysctl_tcp_retrans_collapse __read_mostly = 1; 44 45/* People can turn this on to work with those rare, broken TCPs that 46 * interpret the window field as a signed quantity. 47 */ 48int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 49 50/* This limits the percentage of the congestion window which we 51 * will allow a single TSO frame to consume. Building TSO frames 52 * which are too large can cause TCP streams to be bursty. 53 */ 54int sysctl_tcp_tso_win_divisor __read_mostly = 3; 55 56int sysctl_tcp_mtu_probing __read_mostly = 0; 57int sysctl_tcp_base_mss __read_mostly = 512; 58 59/* By default, RFC2861 behavior. */ 60int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 61 62static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 63{ 64 struct tcp_sock *tp = tcp_sk(sk); 65 unsigned int prior_packets = tp->packets_out; 66 67 tcp_advance_send_head(sk, skb); 68 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 69 70 /* Don't override Nagle indefinately with F-RTO */ 71 if (tp->frto_counter == 2) 72 tp->frto_counter = 3; 73 74 tp->packets_out += tcp_skb_pcount(skb); 75 if (!prior_packets) 76 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 77 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 78} 79 80/* SND.NXT, if window was not shrunk. 81 * If window has been shrunk, what should we make? It is not clear at all. 82 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 83 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 84 * invalid. OK, let's make this for now: 85 */ 86static inline __u32 tcp_acceptable_seq(struct sock *sk) 87{ 88 struct tcp_sock *tp = tcp_sk(sk); 89 90 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 91 return tp->snd_nxt; 92 else 93 return tcp_wnd_end(tp); 94} 95 96/* Calculate mss to advertise in SYN segment. 97 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 98 * 99 * 1. It is independent of path mtu. 100 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 101 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 102 * attached devices, because some buggy hosts are confused by 103 * large MSS. 104 * 4. We do not make 3, we advertise MSS, calculated from first 105 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 106 * This may be overridden via information stored in routing table. 107 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 108 * probably even Jumbo". 109 */ 110static __u16 tcp_advertise_mss(struct sock *sk) 111{ 112 struct tcp_sock *tp = tcp_sk(sk); 113 struct dst_entry *dst = __sk_dst_get(sk); 114 int mss = tp->advmss; 115 116 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 117 mss = dst_metric(dst, RTAX_ADVMSS); 118 tp->advmss = mss; 119 } 120 121 return (__u16)mss; 122} 123 124/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 125 * This is the first part of cwnd validation mechanism. */ 126static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 127{ 128 struct tcp_sock *tp = tcp_sk(sk); 129 s32 delta = tcp_time_stamp - tp->lsndtime; 130 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 131 u32 cwnd = tp->snd_cwnd; 132 133 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 134 135 tp->snd_ssthresh = tcp_current_ssthresh(sk); 136 restart_cwnd = min(restart_cwnd, cwnd); 137 138 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 139 cwnd >>= 1; 140 tp->snd_cwnd = max(cwnd, restart_cwnd); 141 tp->snd_cwnd_stamp = tcp_time_stamp; 142 tp->snd_cwnd_used = 0; 143} 144 145static void tcp_event_data_sent(struct tcp_sock *tp, 146 struct sk_buff *skb, struct sock *sk) 147{ 148 struct inet_connection_sock *icsk = inet_csk(sk); 149 const u32 now = tcp_time_stamp; 150 151 if (sysctl_tcp_slow_start_after_idle && 152 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 153 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 154 155 tp->lsndtime = now; 156 157 /* If it is a reply for ato after last received 158 * packet, enter pingpong mode. 159 */ 160 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 161 icsk->icsk_ack.pingpong = 1; 162} 163 164static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 165{ 166 tcp_dec_quickack_mode(sk, pkts); 167 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 168} 169 170/* Determine a window scaling and initial window to offer. 171 * Based on the assumption that the given amount of space 172 * will be offered. Store the results in the tp structure. 173 * NOTE: for smooth operation initial space offering should 174 * be a multiple of mss if possible. We assume here that mss >= 1. 175 * This MUST be enforced by all callers. 176 */ 177void tcp_select_initial_window(int __space, __u32 mss, 178 __u32 *rcv_wnd, __u32 *window_clamp, 179 int wscale_ok, __u8 *rcv_wscale) 180{ 181 unsigned int space = (__space < 0 ? 0 : __space); 182 183 /* If no clamp set the clamp to the max possible scaled window */ 184 if (*window_clamp == 0) 185 (*window_clamp) = (65535 << 14); 186 space = min(*window_clamp, space); 187 188 /* Quantize space offering to a multiple of mss if possible. */ 189 if (space > mss) 190 space = (space / mss) * mss; 191 192 /* NOTE: offering an initial window larger than 32767 193 * will break some buggy TCP stacks. If the admin tells us 194 * it is likely we could be speaking with such a buggy stack 195 * we will truncate our initial window offering to 32K-1 196 * unless the remote has sent us a window scaling option, 197 * which we interpret as a sign the remote TCP is not 198 * misinterpreting the window field as a signed quantity. 199 */ 200 if (sysctl_tcp_workaround_signed_windows) 201 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 202 else 203 (*rcv_wnd) = space; 204 205 (*rcv_wscale) = 0; 206 if (wscale_ok) { 207 /* Set window scaling on max possible window 208 * See RFC1323 for an explanation of the limit to 14 209 */ 210 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 211 space = min_t(u32, space, *window_clamp); 212 while (space > 65535 && (*rcv_wscale) < 14) { 213 space >>= 1; 214 (*rcv_wscale)++; 215 } 216 } 217 218 /* Set initial window to value enough for senders, 219 * following RFC2414. Senders, not following this RFC, 220 * will be satisfied with 2. 221 */ 222 if (mss > (1 << *rcv_wscale)) { 223 int init_cwnd = 4; 224 if (mss > 1460 * 3) 225 init_cwnd = 2; 226 else if (mss > 1460) 227 init_cwnd = 3; 228 if (*rcv_wnd > init_cwnd * mss) 229 *rcv_wnd = init_cwnd * mss; 230 } 231 232 /* Set the clamp no higher than max representable value */ 233 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 234} 235 236/* Chose a new window to advertise, update state in tcp_sock for the 237 * socket, and return result with RFC1323 scaling applied. The return 238 * value can be stuffed directly into th->window for an outgoing 239 * frame. 240 */ 241static u16 tcp_select_window(struct sock *sk) 242{ 243 struct tcp_sock *tp = tcp_sk(sk); 244 u32 cur_win = tcp_receive_window(tp); 245 u32 new_win = __tcp_select_window(sk); 246 247 /* Never shrink the offered window */ 248 if (new_win < cur_win) { 249 /* Danger Will Robinson! 250 * Don't update rcv_wup/rcv_wnd here or else 251 * we will not be able to advertise a zero 252 * window in time. --DaveM 253 * 254 * Relax Will Robinson. 255 */ 256 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 257 } 258 tp->rcv_wnd = new_win; 259 tp->rcv_wup = tp->rcv_nxt; 260 261 /* Make sure we do not exceed the maximum possible 262 * scaled window. 263 */ 264 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 265 new_win = min(new_win, MAX_TCP_WINDOW); 266 else 267 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 268 269 /* RFC1323 scaling applied */ 270 new_win >>= tp->rx_opt.rcv_wscale; 271 272 /* If we advertise zero window, disable fast path. */ 273 if (new_win == 0) 274 tp->pred_flags = 0; 275 276 return new_win; 277} 278 279static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 280{ 281 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 282 if (!(tp->ecn_flags & TCP_ECN_OK)) 283 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 284} 285 286static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 287{ 288 struct tcp_sock *tp = tcp_sk(sk); 289 290 tp->ecn_flags = 0; 291 if (sysctl_tcp_ecn) { 292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 293 tp->ecn_flags = TCP_ECN_OK; 294 } 295} 296 297static __inline__ void 298TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 299{ 300 if (inet_rsk(req)->ecn_ok) 301 th->ece = 1; 302} 303 304static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 305 int tcp_header_len) 306{ 307 struct tcp_sock *tp = tcp_sk(sk); 308 309 if (tp->ecn_flags & TCP_ECN_OK) { 310 /* Not-retransmitted data segment: set ECT and inject CWR. */ 311 if (skb->len != tcp_header_len && 312 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 313 INET_ECN_xmit(sk); 314 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 315 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 316 tcp_hdr(skb)->cwr = 1; 317 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 318 } 319 } else { 320 /* ACK or retransmitted segment: clear ECT|CE */ 321 INET_ECN_dontxmit(sk); 322 } 323 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 324 tcp_hdr(skb)->ece = 1; 325 } 326} 327 328/* Constructs common control bits of non-data skb. If SYN/FIN is present, 329 * auto increment end seqno. 330 */ 331static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 332{ 333 skb->csum = 0; 334 335 TCP_SKB_CB(skb)->flags = flags; 336 TCP_SKB_CB(skb)->sacked = 0; 337 338 skb_shinfo(skb)->gso_segs = 1; 339 skb_shinfo(skb)->gso_size = 0; 340 skb_shinfo(skb)->gso_type = 0; 341 342 TCP_SKB_CB(skb)->seq = seq; 343 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) 344 seq++; 345 TCP_SKB_CB(skb)->end_seq = seq; 346} 347 348static inline int tcp_urg_mode(const struct tcp_sock *tp) 349{ 350 return tp->snd_una != tp->snd_up; 351} 352 353#define OPTION_SACK_ADVERTISE (1 << 0) 354#define OPTION_TS (1 << 1) 355#define OPTION_MD5 (1 << 2) 356 357struct tcp_out_options { 358 u8 options; /* bit field of OPTION_* */ 359 u8 ws; /* window scale, 0 to disable */ 360 u8 num_sack_blocks; /* number of SACK blocks to include */ 361 u16 mss; /* 0 to disable */ 362 __u32 tsval, tsecr; /* need to include OPTION_TS */ 363}; 364 365/* Beware: Something in the Internet is very sensitive to the ordering of 366 * TCP options, we learned this through the hard way, so be careful here. 367 * Luckily we can at least blame others for their non-compliance but from 368 * inter-operatibility perspective it seems that we're somewhat stuck with 369 * the ordering which we have been using if we want to keep working with 370 * those broken things (not that it currently hurts anybody as there isn't 371 * particular reason why the ordering would need to be changed). 372 * 373 * At least SACK_PERM as the first option is known to lead to a disaster 374 * (but it may well be that other scenarios fail similarly). 375 */ 376static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 377 const struct tcp_out_options *opts, 378 __u8 **md5_hash) { 379 if (unlikely(OPTION_MD5 & opts->options)) { 380 *ptr++ = htonl((TCPOPT_NOP << 24) | 381 (TCPOPT_NOP << 16) | 382 (TCPOPT_MD5SIG << 8) | 383 TCPOLEN_MD5SIG); 384 *md5_hash = (__u8 *)ptr; 385 ptr += 4; 386 } else { 387 *md5_hash = NULL; 388 } 389 390 if (unlikely(opts->mss)) { 391 *ptr++ = htonl((TCPOPT_MSS << 24) | 392 (TCPOLEN_MSS << 16) | 393 opts->mss); 394 } 395 396 if (likely(OPTION_TS & opts->options)) { 397 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { 398 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 399 (TCPOLEN_SACK_PERM << 16) | 400 (TCPOPT_TIMESTAMP << 8) | 401 TCPOLEN_TIMESTAMP); 402 } else { 403 *ptr++ = htonl((TCPOPT_NOP << 24) | 404 (TCPOPT_NOP << 16) | 405 (TCPOPT_TIMESTAMP << 8) | 406 TCPOLEN_TIMESTAMP); 407 } 408 *ptr++ = htonl(opts->tsval); 409 *ptr++ = htonl(opts->tsecr); 410 } 411 412 if (unlikely(OPTION_SACK_ADVERTISE & opts->options && 413 !(OPTION_TS & opts->options))) { 414 *ptr++ = htonl((TCPOPT_NOP << 24) | 415 (TCPOPT_NOP << 16) | 416 (TCPOPT_SACK_PERM << 8) | 417 TCPOLEN_SACK_PERM); 418 } 419 420 if (unlikely(opts->ws)) { 421 *ptr++ = htonl((TCPOPT_NOP << 24) | 422 (TCPOPT_WINDOW << 16) | 423 (TCPOLEN_WINDOW << 8) | 424 opts->ws); 425 } 426 427 if (unlikely(opts->num_sack_blocks)) { 428 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 429 tp->duplicate_sack : tp->selective_acks; 430 int this_sack; 431 432 *ptr++ = htonl((TCPOPT_NOP << 24) | 433 (TCPOPT_NOP << 16) | 434 (TCPOPT_SACK << 8) | 435 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 436 TCPOLEN_SACK_PERBLOCK))); 437 438 for (this_sack = 0; this_sack < opts->num_sack_blocks; 439 ++this_sack) { 440 *ptr++ = htonl(sp[this_sack].start_seq); 441 *ptr++ = htonl(sp[this_sack].end_seq); 442 } 443 444 tp->rx_opt.dsack = 0; 445 } 446} 447 448static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 449 struct tcp_out_options *opts, 450 struct tcp_md5sig_key **md5) { 451 struct tcp_sock *tp = tcp_sk(sk); 452 unsigned size = 0; 453 454#ifdef CONFIG_TCP_MD5SIG 455 *md5 = tp->af_specific->md5_lookup(sk, sk); 456 if (*md5) { 457 opts->options |= OPTION_MD5; 458 size += TCPOLEN_MD5SIG_ALIGNED; 459 } 460#else 461 *md5 = NULL; 462#endif 463 464 /* We always get an MSS option. The option bytes which will be seen in 465 * normal data packets should timestamps be used, must be in the MSS 466 * advertised. But we subtract them from tp->mss_cache so that 467 * calculations in tcp_sendmsg are simpler etc. So account for this 468 * fact here if necessary. If we don't do this correctly, as a 469 * receiver we won't recognize data packets as being full sized when we 470 * should, and thus we won't abide by the delayed ACK rules correctly. 471 * SACKs don't matter, we never delay an ACK when we have any of those 472 * going out. */ 473 opts->mss = tcp_advertise_mss(sk); 474 size += TCPOLEN_MSS_ALIGNED; 475 476 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 477 opts->options |= OPTION_TS; 478 opts->tsval = TCP_SKB_CB(skb)->when; 479 opts->tsecr = tp->rx_opt.ts_recent; 480 size += TCPOLEN_TSTAMP_ALIGNED; 481 } 482 if (likely(sysctl_tcp_window_scaling)) { 483 opts->ws = tp->rx_opt.rcv_wscale; 484 if (likely(opts->ws)) 485 size += TCPOLEN_WSCALE_ALIGNED; 486 } 487 if (likely(sysctl_tcp_sack)) { 488 opts->options |= OPTION_SACK_ADVERTISE; 489 if (unlikely(!(OPTION_TS & opts->options))) 490 size += TCPOLEN_SACKPERM_ALIGNED; 491 } 492 493 return size; 494} 495 496static unsigned tcp_synack_options(struct sock *sk, 497 struct request_sock *req, 498 unsigned mss, struct sk_buff *skb, 499 struct tcp_out_options *opts, 500 struct tcp_md5sig_key **md5) { 501 unsigned size = 0; 502 struct inet_request_sock *ireq = inet_rsk(req); 503 char doing_ts; 504 505#ifdef CONFIG_TCP_MD5SIG 506 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 507 if (*md5) { 508 opts->options |= OPTION_MD5; 509 size += TCPOLEN_MD5SIG_ALIGNED; 510 } 511#else 512 *md5 = NULL; 513#endif 514 515 /* we can't fit any SACK blocks in a packet with MD5 + TS 516 options. There was discussion about disabling SACK rather than TS in 517 order to fit in better with old, buggy kernels, but that was deemed 518 to be unnecessary. */ 519 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok); 520 521 opts->mss = mss; 522 size += TCPOLEN_MSS_ALIGNED; 523 524 if (likely(ireq->wscale_ok)) { 525 opts->ws = ireq->rcv_wscale; 526 if (likely(opts->ws)) 527 size += TCPOLEN_WSCALE_ALIGNED; 528 } 529 if (likely(doing_ts)) { 530 opts->options |= OPTION_TS; 531 opts->tsval = TCP_SKB_CB(skb)->when; 532 opts->tsecr = req->ts_recent; 533 size += TCPOLEN_TSTAMP_ALIGNED; 534 } 535 if (likely(ireq->sack_ok)) { 536 opts->options |= OPTION_SACK_ADVERTISE; 537 if (unlikely(!doing_ts)) 538 size += TCPOLEN_SACKPERM_ALIGNED; 539 } 540 541 return size; 542} 543 544static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 545 struct tcp_out_options *opts, 546 struct tcp_md5sig_key **md5) { 547 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 548 struct tcp_sock *tp = tcp_sk(sk); 549 unsigned size = 0; 550 unsigned int eff_sacks; 551 552#ifdef CONFIG_TCP_MD5SIG 553 *md5 = tp->af_specific->md5_lookup(sk, sk); 554 if (unlikely(*md5)) { 555 opts->options |= OPTION_MD5; 556 size += TCPOLEN_MD5SIG_ALIGNED; 557 } 558#else 559 *md5 = NULL; 560#endif 561 562 if (likely(tp->rx_opt.tstamp_ok)) { 563 opts->options |= OPTION_TS; 564 opts->tsval = tcb ? tcb->when : 0; 565 opts->tsecr = tp->rx_opt.ts_recent; 566 size += TCPOLEN_TSTAMP_ALIGNED; 567 } 568 569 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 570 if (unlikely(eff_sacks)) { 571 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 572 opts->num_sack_blocks = 573 min_t(unsigned, eff_sacks, 574 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 575 TCPOLEN_SACK_PERBLOCK); 576 size += TCPOLEN_SACK_BASE_ALIGNED + 577 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 578 } 579 580 return size; 581} 582 583/* This routine actually transmits TCP packets queued in by 584 * tcp_do_sendmsg(). This is used by both the initial 585 * transmission and possible later retransmissions. 586 * All SKB's seen here are completely headerless. It is our 587 * job to build the TCP header, and pass the packet down to 588 * IP so it can do the same plus pass the packet off to the 589 * device. 590 * 591 * We are working here with either a clone of the original 592 * SKB, or a fresh unique copy made by the retransmit engine. 593 */ 594static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 595 gfp_t gfp_mask) 596{ 597 const struct inet_connection_sock *icsk = inet_csk(sk); 598 struct inet_sock *inet; 599 struct tcp_sock *tp; 600 struct tcp_skb_cb *tcb; 601 struct tcp_out_options opts; 602 unsigned tcp_options_size, tcp_header_size; 603 struct tcp_md5sig_key *md5; 604 __u8 *md5_hash_location; 605 struct tcphdr *th; 606 int err; 607 608 BUG_ON(!skb || !tcp_skb_pcount(skb)); 609 610 /* If congestion control is doing timestamping, we must 611 * take such a timestamp before we potentially clone/copy. 612 */ 613 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 614 __net_timestamp(skb); 615 616 if (likely(clone_it)) { 617 if (unlikely(skb_cloned(skb))) 618 skb = pskb_copy(skb, gfp_mask); 619 else 620 skb = skb_clone(skb, gfp_mask); 621 if (unlikely(!skb)) 622 return -ENOBUFS; 623 } 624 625 inet = inet_sk(sk); 626 tp = tcp_sk(sk); 627 tcb = TCP_SKB_CB(skb); 628 memset(&opts, 0, sizeof(opts)); 629 630 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) 631 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 632 else 633 tcp_options_size = tcp_established_options(sk, skb, &opts, 634 &md5); 635 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 636 637 if (tcp_packets_in_flight(tp) == 0) 638 tcp_ca_event(sk, CA_EVENT_TX_START); 639 640 skb_push(skb, tcp_header_size); 641 skb_reset_transport_header(skb); 642 skb_set_owner_w(skb, sk); 643 644 /* Build TCP header and checksum it. */ 645 th = tcp_hdr(skb); 646 th->source = inet->sport; 647 th->dest = inet->dport; 648 th->seq = htonl(tcb->seq); 649 th->ack_seq = htonl(tp->rcv_nxt); 650 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 651 tcb->flags); 652 653 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 654 /* RFC1323: The window in SYN & SYN/ACK segments 655 * is never scaled. 656 */ 657 th->window = htons(min(tp->rcv_wnd, 65535U)); 658 } else { 659 th->window = htons(tcp_select_window(sk)); 660 } 661 th->check = 0; 662 th->urg_ptr = 0; 663 664 /* The urg_mode check is necessary during a below snd_una win probe */ 665 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 666 if (before(tp->snd_up, tcb->seq + 0x10000)) { 667 th->urg_ptr = htons(tp->snd_up - tcb->seq); 668 th->urg = 1; 669 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 670 th->urg_ptr = 0xFFFF; 671 th->urg = 1; 672 } 673 } 674 675 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 676 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) 677 TCP_ECN_send(sk, skb, tcp_header_size); 678 679#ifdef CONFIG_TCP_MD5SIG 680 /* Calculate the MD5 hash, as we have all we need now */ 681 if (md5) { 682 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 683 tp->af_specific->calc_md5_hash(md5_hash_location, 684 md5, sk, NULL, skb); 685 } 686#endif 687 688 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 689 690 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 691 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 692 693 if (skb->len != tcp_header_size) 694 tcp_event_data_sent(tp, skb, sk); 695 696 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 697 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 698 699 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 700 if (likely(err <= 0)) 701 return err; 702 703 tcp_enter_cwr(sk, 1); 704 705 return net_xmit_eval(err); 706} 707 708/* This routine just queue's the buffer 709 * 710 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 711 * otherwise socket can stall. 712 */ 713static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 714{ 715 struct tcp_sock *tp = tcp_sk(sk); 716 717 /* Advance write_seq and place onto the write_queue. */ 718 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 719 skb_header_release(skb); 720 tcp_add_write_queue_tail(sk, skb); 721 sk->sk_wmem_queued += skb->truesize; 722 sk_mem_charge(sk, skb->truesize); 723} 724 725static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 726 unsigned int mss_now) 727{ 728 if (skb->len <= mss_now || !sk_can_gso(sk)) { 729 /* Avoid the costly divide in the normal 730 * non-TSO case. 731 */ 732 skb_shinfo(skb)->gso_segs = 1; 733 skb_shinfo(skb)->gso_size = 0; 734 skb_shinfo(skb)->gso_type = 0; 735 } else { 736 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 737 skb_shinfo(skb)->gso_size = mss_now; 738 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 739 } 740} 741 742/* When a modification to fackets out becomes necessary, we need to check 743 * skb is counted to fackets_out or not. 744 */ 745static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 746 int decr) 747{ 748 struct tcp_sock *tp = tcp_sk(sk); 749 750 if (!tp->sacked_out || tcp_is_reno(tp)) 751 return; 752 753 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 754 tp->fackets_out -= decr; 755} 756 757/* Function to create two new TCP segments. Shrinks the given segment 758 * to the specified size and appends a new segment with the rest of the 759 * packet to the list. This won't be called frequently, I hope. 760 * Remember, these are still headerless SKBs at this point. 761 */ 762int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 763 unsigned int mss_now) 764{ 765 struct tcp_sock *tp = tcp_sk(sk); 766 struct sk_buff *buff; 767 int nsize, old_factor; 768 int nlen; 769 u8 flags; 770 771 BUG_ON(len > skb->len); 772 773 nsize = skb_headlen(skb) - len; 774 if (nsize < 0) 775 nsize = 0; 776 777 if (skb_cloned(skb) && 778 skb_is_nonlinear(skb) && 779 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 780 return -ENOMEM; 781 782 /* Get a new skb... force flag on. */ 783 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 784 if (buff == NULL) 785 return -ENOMEM; /* We'll just try again later. */ 786 787 sk->sk_wmem_queued += buff->truesize; 788 sk_mem_charge(sk, buff->truesize); 789 nlen = skb->len - len - nsize; 790 buff->truesize += nlen; 791 skb->truesize -= nlen; 792 793 /* Correct the sequence numbers. */ 794 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 795 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 796 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 797 798 /* PSH and FIN should only be set in the second packet. */ 799 flags = TCP_SKB_CB(skb)->flags; 800 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 801 TCP_SKB_CB(buff)->flags = flags; 802 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 803 804 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 805 /* Copy and checksum data tail into the new buffer. */ 806 buff->csum = csum_partial_copy_nocheck(skb->data + len, 807 skb_put(buff, nsize), 808 nsize, 0); 809 810 skb_trim(skb, len); 811 812 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 813 } else { 814 skb->ip_summed = CHECKSUM_PARTIAL; 815 skb_split(skb, buff, len); 816 } 817 818 buff->ip_summed = skb->ip_summed; 819 820 /* Looks stupid, but our code really uses when of 821 * skbs, which it never sent before. --ANK 822 */ 823 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 824 buff->tstamp = skb->tstamp; 825 826 old_factor = tcp_skb_pcount(skb); 827 828 /* Fix up tso_factor for both original and new SKB. */ 829 tcp_set_skb_tso_segs(sk, skb, mss_now); 830 tcp_set_skb_tso_segs(sk, buff, mss_now); 831 832 /* If this packet has been sent out already, we must 833 * adjust the various packet counters. 834 */ 835 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 836 int diff = old_factor - tcp_skb_pcount(skb) - 837 tcp_skb_pcount(buff); 838 839 tp->packets_out -= diff; 840 841 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 842 tp->sacked_out -= diff; 843 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 844 tp->retrans_out -= diff; 845 846 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 847 tp->lost_out -= diff; 848 849 /* Adjust Reno SACK estimate. */ 850 if (tcp_is_reno(tp) && diff > 0) { 851 tcp_dec_pcount_approx_int(&tp->sacked_out, diff); 852 tcp_verify_left_out(tp); 853 } 854 tcp_adjust_fackets_out(sk, skb, diff); 855 856 if (tp->lost_skb_hint && 857 before(TCP_SKB_CB(skb)->seq, 858 TCP_SKB_CB(tp->lost_skb_hint)->seq) && 859 (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked)) 860 tp->lost_cnt_hint -= diff; 861 } 862 863 /* Link BUFF into the send queue. */ 864 skb_header_release(buff); 865 tcp_insert_write_queue_after(skb, buff, sk); 866 867 return 0; 868} 869 870/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 871 * eventually). The difference is that pulled data not copied, but 872 * immediately discarded. 873 */ 874static void __pskb_trim_head(struct sk_buff *skb, int len) 875{ 876 int i, k, eat; 877 878 eat = len; 879 k = 0; 880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 881 if (skb_shinfo(skb)->frags[i].size <= eat) { 882 put_page(skb_shinfo(skb)->frags[i].page); 883 eat -= skb_shinfo(skb)->frags[i].size; 884 } else { 885 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 886 if (eat) { 887 skb_shinfo(skb)->frags[k].page_offset += eat; 888 skb_shinfo(skb)->frags[k].size -= eat; 889 eat = 0; 890 } 891 k++; 892 } 893 } 894 skb_shinfo(skb)->nr_frags = k; 895 896 skb_reset_tail_pointer(skb); 897 skb->data_len -= len; 898 skb->len = skb->data_len; 899} 900 901int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 902{ 903 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 904 return -ENOMEM; 905 906 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 907 if (unlikely(len < skb_headlen(skb))) 908 __skb_pull(skb, len); 909 else 910 __pskb_trim_head(skb, len - skb_headlen(skb)); 911 912 TCP_SKB_CB(skb)->seq += len; 913 skb->ip_summed = CHECKSUM_PARTIAL; 914 915 skb->truesize -= len; 916 sk->sk_wmem_queued -= len; 917 sk_mem_uncharge(sk, len); 918 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 919 920 /* Any change of skb->len requires recalculation of tso 921 * factor and mss. 922 */ 923 if (tcp_skb_pcount(skb) > 1) 924 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 925 926 return 0; 927} 928 929/* Not accounting for SACKs here. */ 930int tcp_mtu_to_mss(struct sock *sk, int pmtu) 931{ 932 struct tcp_sock *tp = tcp_sk(sk); 933 struct inet_connection_sock *icsk = inet_csk(sk); 934 int mss_now; 935 936 /* Calculate base mss without TCP options: 937 It is MMS_S - sizeof(tcphdr) of rfc1122 938 */ 939 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 940 941 /* Clamp it (mss_clamp does not include tcp options) */ 942 if (mss_now > tp->rx_opt.mss_clamp) 943 mss_now = tp->rx_opt.mss_clamp; 944 945 /* Now subtract optional transport overhead */ 946 mss_now -= icsk->icsk_ext_hdr_len; 947 948 /* Then reserve room for full set of TCP options and 8 bytes of data */ 949 if (mss_now < 48) 950 mss_now = 48; 951 952 /* Now subtract TCP options size, not including SACKs */ 953 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 954 955 return mss_now; 956} 957 958/* Inverse of above */ 959int tcp_mss_to_mtu(struct sock *sk, int mss) 960{ 961 struct tcp_sock *tp = tcp_sk(sk); 962 struct inet_connection_sock *icsk = inet_csk(sk); 963 int mtu; 964 965 mtu = mss + 966 tp->tcp_header_len + 967 icsk->icsk_ext_hdr_len + 968 icsk->icsk_af_ops->net_header_len; 969 970 return mtu; 971} 972 973void tcp_mtup_init(struct sock *sk) 974{ 975 struct tcp_sock *tp = tcp_sk(sk); 976 struct inet_connection_sock *icsk = inet_csk(sk); 977 978 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 979 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 980 icsk->icsk_af_ops->net_header_len; 981 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 982 icsk->icsk_mtup.probe_size = 0; 983} 984 985/* This function synchronize snd mss to current pmtu/exthdr set. 986 987 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 988 for TCP options, but includes only bare TCP header. 989 990 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 991 It is minimum of user_mss and mss received with SYN. 992 It also does not include TCP options. 993 994 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 995 996 tp->mss_cache is current effective sending mss, including 997 all tcp options except for SACKs. It is evaluated, 998 taking into account current pmtu, but never exceeds 999 tp->rx_opt.mss_clamp. 1000 1001 NOTE1. rfc1122 clearly states that advertised MSS 1002 DOES NOT include either tcp or ip options. 1003 1004 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1005 are READ ONLY outside this function. --ANK (980731) 1006 */ 1007unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1008{ 1009 struct tcp_sock *tp = tcp_sk(sk); 1010 struct inet_connection_sock *icsk = inet_csk(sk); 1011 int mss_now; 1012 1013 if (icsk->icsk_mtup.search_high > pmtu) 1014 icsk->icsk_mtup.search_high = pmtu; 1015 1016 mss_now = tcp_mtu_to_mss(sk, pmtu); 1017 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1018 1019 /* And store cached results */ 1020 icsk->icsk_pmtu_cookie = pmtu; 1021 if (icsk->icsk_mtup.enabled) 1022 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1023 tp->mss_cache = mss_now; 1024 1025 return mss_now; 1026} 1027 1028/* Compute the current effective MSS, taking SACKs and IP options, 1029 * and even PMTU discovery events into account. 1030 */ 1031unsigned int tcp_current_mss(struct sock *sk) 1032{ 1033 struct tcp_sock *tp = tcp_sk(sk); 1034 struct dst_entry *dst = __sk_dst_get(sk); 1035 u32 mss_now; 1036 unsigned header_len; 1037 struct tcp_out_options opts; 1038 struct tcp_md5sig_key *md5; 1039 1040 mss_now = tp->mss_cache; 1041 1042 if (dst) { 1043 u32 mtu = dst_mtu(dst); 1044 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1045 mss_now = tcp_sync_mss(sk, mtu); 1046 } 1047 1048 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1049 sizeof(struct tcphdr); 1050 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1051 * some common options. If this is an odd packet (because we have SACK 1052 * blocks etc) then our calculated header_len will be different, and 1053 * we have to adjust mss_now correspondingly */ 1054 if (header_len != tp->tcp_header_len) { 1055 int delta = (int) header_len - tp->tcp_header_len; 1056 mss_now -= delta; 1057 } 1058 1059 return mss_now; 1060} 1061 1062/* Congestion window validation. (RFC2861) */ 1063static void tcp_cwnd_validate(struct sock *sk) 1064{ 1065 struct tcp_sock *tp = tcp_sk(sk); 1066 1067 if (tp->packets_out >= tp->snd_cwnd) { 1068 /* Network is feed fully. */ 1069 tp->snd_cwnd_used = 0; 1070 tp->snd_cwnd_stamp = tcp_time_stamp; 1071 } else { 1072 /* Network starves. */ 1073 if (tp->packets_out > tp->snd_cwnd_used) 1074 tp->snd_cwnd_used = tp->packets_out; 1075 1076 if (sysctl_tcp_slow_start_after_idle && 1077 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1078 tcp_cwnd_application_limited(sk); 1079 } 1080} 1081 1082/* Returns the portion of skb which can be sent right away without 1083 * introducing MSS oddities to segment boundaries. In rare cases where 1084 * mss_now != mss_cache, we will request caller to create a small skb 1085 * per input skb which could be mostly avoided here (if desired). 1086 * 1087 * We explicitly want to create a request for splitting write queue tail 1088 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1089 * thus all the complexity (cwnd_len is always MSS multiple which we 1090 * return whenever allowed by the other factors). Basically we need the 1091 * modulo only when the receiver window alone is the limiting factor or 1092 * when we would be allowed to send the split-due-to-Nagle skb fully. 1093 */ 1094static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1095 unsigned int mss_now, unsigned int cwnd) 1096{ 1097 struct tcp_sock *tp = tcp_sk(sk); 1098 u32 needed, window, cwnd_len; 1099 1100 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1101 cwnd_len = mss_now * cwnd; 1102 1103 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1104 return cwnd_len; 1105 1106 needed = min(skb->len, window); 1107 1108 if (cwnd_len <= needed) 1109 return cwnd_len; 1110 1111 return needed - needed % mss_now; 1112} 1113 1114/* Can at least one segment of SKB be sent right now, according to the 1115 * congestion window rules? If so, return how many segments are allowed. 1116 */ 1117static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1118 struct sk_buff *skb) 1119{ 1120 u32 in_flight, cwnd; 1121 1122 /* Don't be strict about the congestion window for the final FIN. */ 1123 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1124 tcp_skb_pcount(skb) == 1) 1125 return 1; 1126 1127 in_flight = tcp_packets_in_flight(tp); 1128 cwnd = tp->snd_cwnd; 1129 if (in_flight < cwnd) 1130 return (cwnd - in_flight); 1131 1132 return 0; 1133} 1134 1135/* This must be invoked the first time we consider transmitting 1136 * SKB onto the wire. 1137 */ 1138static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1139 unsigned int mss_now) 1140{ 1141 int tso_segs = tcp_skb_pcount(skb); 1142 1143 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1144 tcp_set_skb_tso_segs(sk, skb, mss_now); 1145 tso_segs = tcp_skb_pcount(skb); 1146 } 1147 return tso_segs; 1148} 1149 1150static inline int tcp_minshall_check(const struct tcp_sock *tp) 1151{ 1152 return after(tp->snd_sml, tp->snd_una) && 1153 !after(tp->snd_sml, tp->snd_nxt); 1154} 1155 1156/* Return 0, if packet can be sent now without violation Nagle's rules: 1157 * 1. It is full sized. 1158 * 2. Or it contains FIN. (already checked by caller) 1159 * 3. Or TCP_NODELAY was set. 1160 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1161 * With Minshall's modification: all sent small packets are ACKed. 1162 */ 1163static inline int tcp_nagle_check(const struct tcp_sock *tp, 1164 const struct sk_buff *skb, 1165 unsigned mss_now, int nonagle) 1166{ 1167 return (skb->len < mss_now && 1168 ((nonagle & TCP_NAGLE_CORK) || 1169 (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); 1170} 1171 1172/* Return non-zero if the Nagle test allows this packet to be 1173 * sent now. 1174 */ 1175static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1176 unsigned int cur_mss, int nonagle) 1177{ 1178 /* Nagle rule does not apply to frames, which sit in the middle of the 1179 * write_queue (they have no chances to get new data). 1180 * 1181 * This is implemented in the callers, where they modify the 'nonagle' 1182 * argument based upon the location of SKB in the send queue. 1183 */ 1184 if (nonagle & TCP_NAGLE_PUSH) 1185 return 1; 1186 1187 /* Don't use the nagle rule for urgent data (or for the final FIN). 1188 * Nagle can be ignored during F-RTO too (see RFC4138). 1189 */ 1190 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1191 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1192 return 1; 1193 1194 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1195 return 1; 1196 1197 return 0; 1198} 1199 1200/* Does at least the first segment of SKB fit into the send window? */ 1201static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1202 unsigned int cur_mss) 1203{ 1204 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1205 1206 if (skb->len > cur_mss) 1207 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1208 1209 return !after(end_seq, tcp_wnd_end(tp)); 1210} 1211 1212/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1213 * should be put on the wire right now. If so, it returns the number of 1214 * packets allowed by the congestion window. 1215 */ 1216static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1217 unsigned int cur_mss, int nonagle) 1218{ 1219 struct tcp_sock *tp = tcp_sk(sk); 1220 unsigned int cwnd_quota; 1221 1222 tcp_init_tso_segs(sk, skb, cur_mss); 1223 1224 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1225 return 0; 1226 1227 cwnd_quota = tcp_cwnd_test(tp, skb); 1228 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1229 cwnd_quota = 0; 1230 1231 return cwnd_quota; 1232} 1233 1234int tcp_may_send_now(struct sock *sk) 1235{ 1236 struct tcp_sock *tp = tcp_sk(sk); 1237 struct sk_buff *skb = tcp_send_head(sk); 1238 1239 return (skb && 1240 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1241 (tcp_skb_is_last(sk, skb) ? 1242 tp->nonagle : TCP_NAGLE_PUSH))); 1243} 1244 1245/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1246 * which is put after SKB on the list. It is very much like 1247 * tcp_fragment() except that it may make several kinds of assumptions 1248 * in order to speed up the splitting operation. In particular, we 1249 * know that all the data is in scatter-gather pages, and that the 1250 * packet has never been sent out before (and thus is not cloned). 1251 */ 1252static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1253 unsigned int mss_now) 1254{ 1255 struct sk_buff *buff; 1256 int nlen = skb->len - len; 1257 u8 flags; 1258 1259 /* All of a TSO frame must be composed of paged data. */ 1260 if (skb->len != skb->data_len) 1261 return tcp_fragment(sk, skb, len, mss_now); 1262 1263 buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC); 1264 if (unlikely(buff == NULL)) 1265 return -ENOMEM; 1266 1267 sk->sk_wmem_queued += buff->truesize; 1268 sk_mem_charge(sk, buff->truesize); 1269 buff->truesize += nlen; 1270 skb->truesize -= nlen; 1271 1272 /* Correct the sequence numbers. */ 1273 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1274 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1275 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1276 1277 /* PSH and FIN should only be set in the second packet. */ 1278 flags = TCP_SKB_CB(skb)->flags; 1279 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 1280 TCP_SKB_CB(buff)->flags = flags; 1281 1282 /* This packet was never sent out yet, so no SACK bits. */ 1283 TCP_SKB_CB(buff)->sacked = 0; 1284 1285 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1286 skb_split(skb, buff, len); 1287 1288 /* Fix up tso_factor for both original and new SKB. */ 1289 tcp_set_skb_tso_segs(sk, skb, mss_now); 1290 tcp_set_skb_tso_segs(sk, buff, mss_now); 1291 1292 /* Link BUFF into the send queue. */ 1293 skb_header_release(buff); 1294 tcp_insert_write_queue_after(skb, buff, sk); 1295 1296 return 0; 1297} 1298 1299/* Try to defer sending, if possible, in order to minimize the amount 1300 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1301 * 1302 * This algorithm is from John Heffner. 1303 */ 1304static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1305{ 1306 struct tcp_sock *tp = tcp_sk(sk); 1307 const struct inet_connection_sock *icsk = inet_csk(sk); 1308 u32 send_win, cong_win, limit, in_flight; 1309 1310 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1311 goto send_now; 1312 1313 if (icsk->icsk_ca_state != TCP_CA_Open) 1314 goto send_now; 1315 1316 /* Defer for less than two clock ticks. */ 1317 if (tp->tso_deferred && 1318 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1319 goto send_now; 1320 1321 in_flight = tcp_packets_in_flight(tp); 1322 1323 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1324 1325 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1326 1327 /* From in_flight test above, we know that cwnd > in_flight. */ 1328 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1329 1330 limit = min(send_win, cong_win); 1331 1332 /* If a full-sized TSO skb can be sent, do it. */ 1333 if (limit >= sk->sk_gso_max_size) 1334 goto send_now; 1335 1336 /* Middle in queue won't get any more data, full sendable already? */ 1337 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1338 goto send_now; 1339 1340 if (sysctl_tcp_tso_win_divisor) { 1341 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1342 1343 /* If at least some fraction of a window is available, 1344 * just use it. 1345 */ 1346 chunk /= sysctl_tcp_tso_win_divisor; 1347 if (limit >= chunk) 1348 goto send_now; 1349 } else { 1350 /* Different approach, try not to defer past a single 1351 * ACK. Receiver should ACK every other full sized 1352 * frame, so if we have space for more than 3 frames 1353 * then send now. 1354 */ 1355 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1356 goto send_now; 1357 } 1358 1359 /* Ok, it looks like it is advisable to defer. */ 1360 tp->tso_deferred = 1 | (jiffies << 1); 1361 1362 return 1; 1363 1364send_now: 1365 tp->tso_deferred = 0; 1366 return 0; 1367} 1368 1369/* Create a new MTU probe if we are ready. 1370 * Returns 0 if we should wait to probe (no cwnd available), 1371 * 1 if a probe was sent, 1372 * -1 otherwise 1373 */ 1374static int tcp_mtu_probe(struct sock *sk) 1375{ 1376 struct tcp_sock *tp = tcp_sk(sk); 1377 struct inet_connection_sock *icsk = inet_csk(sk); 1378 struct sk_buff *skb, *nskb, *next; 1379 int len; 1380 int probe_size; 1381 int size_needed; 1382 int copy; 1383 int mss_now; 1384 1385 /* Not currently probing/verifying, 1386 * not in recovery, 1387 * have enough cwnd, and 1388 * not SACKing (the variable headers throw things off) */ 1389 if (!icsk->icsk_mtup.enabled || 1390 icsk->icsk_mtup.probe_size || 1391 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1392 tp->snd_cwnd < 11 || 1393 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1394 return -1; 1395 1396 /* Very simple search strategy: just double the MSS. */ 1397 mss_now = tcp_current_mss(sk); 1398 probe_size = 2 * tp->mss_cache; 1399 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1400 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1401 /* TODO: set timer for probe_converge_event */ 1402 return -1; 1403 } 1404 1405 /* Have enough data in the send queue to probe? */ 1406 if (tp->write_seq - tp->snd_nxt < size_needed) 1407 return -1; 1408 1409 if (tp->snd_wnd < size_needed) 1410 return -1; 1411 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1412 return 0; 1413 1414 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1415 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1416 if (!tcp_packets_in_flight(tp)) 1417 return -1; 1418 else 1419 return 0; 1420 } 1421 1422 /* We're allowed to probe. Build it now. */ 1423 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1424 return -1; 1425 sk->sk_wmem_queued += nskb->truesize; 1426 sk_mem_charge(sk, nskb->truesize); 1427 1428 skb = tcp_send_head(sk); 1429 1430 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1431 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1432 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1433 TCP_SKB_CB(nskb)->sacked = 0; 1434 nskb->csum = 0; 1435 nskb->ip_summed = skb->ip_summed; 1436 1437 tcp_insert_write_queue_before(nskb, skb, sk); 1438 1439 len = 0; 1440 tcp_for_write_queue_from_safe(skb, next, sk) { 1441 copy = min_t(int, skb->len, probe_size - len); 1442 if (nskb->ip_summed) 1443 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1444 else 1445 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1446 skb_put(nskb, copy), 1447 copy, nskb->csum); 1448 1449 if (skb->len <= copy) { 1450 /* We've eaten all the data from this skb. 1451 * Throw it away. */ 1452 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1453 tcp_unlink_write_queue(skb, sk); 1454 sk_wmem_free_skb(sk, skb); 1455 } else { 1456 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1457 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1458 if (!skb_shinfo(skb)->nr_frags) { 1459 skb_pull(skb, copy); 1460 if (skb->ip_summed != CHECKSUM_PARTIAL) 1461 skb->csum = csum_partial(skb->data, 1462 skb->len, 0); 1463 } else { 1464 __pskb_trim_head(skb, copy); 1465 tcp_set_skb_tso_segs(sk, skb, mss_now); 1466 } 1467 TCP_SKB_CB(skb)->seq += copy; 1468 } 1469 1470 len += copy; 1471 1472 if (len >= probe_size) 1473 break; 1474 } 1475 tcp_init_tso_segs(sk, nskb, nskb->len); 1476 1477 /* We're ready to send. If this fails, the probe will 1478 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1479 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1480 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1481 /* Decrement cwnd here because we are sending 1482 * effectively two packets. */ 1483 tp->snd_cwnd--; 1484 tcp_event_new_data_sent(sk, nskb); 1485 1486 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1487 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1488 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1489 1490 return 1; 1491 } 1492 1493 return -1; 1494} 1495 1496/* This routine writes packets to the network. It advances the 1497 * send_head. This happens as incoming acks open up the remote 1498 * window for us. 1499 * 1500 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1501 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1502 * account rare use of URG, this is not a big flaw. 1503 * 1504 * Returns 1, if no segments are in flight and we have queued segments, but 1505 * cannot send anything now because of SWS or another problem. 1506 */ 1507static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1508 int push_one, gfp_t gfp) 1509{ 1510 struct tcp_sock *tp = tcp_sk(sk); 1511 struct sk_buff *skb; 1512 unsigned int tso_segs, sent_pkts; 1513 int cwnd_quota; 1514 int result; 1515 1516 sent_pkts = 0; 1517 1518 if (!push_one) { 1519 /* Do MTU probing. */ 1520 result = tcp_mtu_probe(sk); 1521 if (!result) { 1522 return 0; 1523 } else if (result > 0) { 1524 sent_pkts = 1; 1525 } 1526 } 1527 1528 while ((skb = tcp_send_head(sk))) { 1529 unsigned int limit; 1530 1531 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1532 BUG_ON(!tso_segs); 1533 1534 cwnd_quota = tcp_cwnd_test(tp, skb); 1535 if (!cwnd_quota) 1536 break; 1537 1538 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1539 break; 1540 1541 if (tso_segs == 1) { 1542 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1543 (tcp_skb_is_last(sk, skb) ? 1544 nonagle : TCP_NAGLE_PUSH)))) 1545 break; 1546 } else { 1547 if (!push_one && tcp_tso_should_defer(sk, skb)) 1548 break; 1549 } 1550 1551 limit = mss_now; 1552 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1553 limit = tcp_mss_split_point(sk, skb, mss_now, 1554 cwnd_quota); 1555 1556 if (skb->len > limit && 1557 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1558 break; 1559 1560 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1561 1562 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1563 break; 1564 1565 /* Advance the send_head. This one is sent out. 1566 * This call will increment packets_out. 1567 */ 1568 tcp_event_new_data_sent(sk, skb); 1569 1570 tcp_minshall_update(tp, mss_now, skb); 1571 sent_pkts++; 1572 1573 if (push_one) 1574 break; 1575 } 1576 1577 if (likely(sent_pkts)) { 1578 tcp_cwnd_validate(sk); 1579 return 0; 1580 } 1581 return !tp->packets_out && tcp_send_head(sk); 1582} 1583 1584/* Push out any pending frames which were held back due to 1585 * TCP_CORK or attempt at coalescing tiny packets. 1586 * The socket must be locked by the caller. 1587 */ 1588void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1589 int nonagle) 1590{ 1591 struct sk_buff *skb = tcp_send_head(sk); 1592 1593 if (!skb) 1594 return; 1595 1596 /* If we are closed, the bytes will have to remain here. 1597 * In time closedown will finish, we empty the write queue and 1598 * all will be happy. 1599 */ 1600 if (unlikely(sk->sk_state == TCP_CLOSE)) 1601 return; 1602 1603 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1604 tcp_check_probe_timer(sk); 1605} 1606 1607/* Send _single_ skb sitting at the send head. This function requires 1608 * true push pending frames to setup probe timer etc. 1609 */ 1610void tcp_push_one(struct sock *sk, unsigned int mss_now) 1611{ 1612 struct sk_buff *skb = tcp_send_head(sk); 1613 1614 BUG_ON(!skb || skb->len < mss_now); 1615 1616 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1617} 1618 1619/* This function returns the amount that we can raise the 1620 * usable window based on the following constraints 1621 * 1622 * 1. The window can never be shrunk once it is offered (RFC 793) 1623 * 2. We limit memory per socket 1624 * 1625 * RFC 1122: 1626 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1627 * RECV.NEXT + RCV.WIN fixed until: 1628 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1629 * 1630 * i.e. don't raise the right edge of the window until you can raise 1631 * it at least MSS bytes. 1632 * 1633 * Unfortunately, the recommended algorithm breaks header prediction, 1634 * since header prediction assumes th->window stays fixed. 1635 * 1636 * Strictly speaking, keeping th->window fixed violates the receiver 1637 * side SWS prevention criteria. The problem is that under this rule 1638 * a stream of single byte packets will cause the right side of the 1639 * window to always advance by a single byte. 1640 * 1641 * Of course, if the sender implements sender side SWS prevention 1642 * then this will not be a problem. 1643 * 1644 * BSD seems to make the following compromise: 1645 * 1646 * If the free space is less than the 1/4 of the maximum 1647 * space available and the free space is less than 1/2 mss, 1648 * then set the window to 0. 1649 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1650 * Otherwise, just prevent the window from shrinking 1651 * and from being larger than the largest representable value. 1652 * 1653 * This prevents incremental opening of the window in the regime 1654 * where TCP is limited by the speed of the reader side taking 1655 * data out of the TCP receive queue. It does nothing about 1656 * those cases where the window is constrained on the sender side 1657 * because the pipeline is full. 1658 * 1659 * BSD also seems to "accidentally" limit itself to windows that are a 1660 * multiple of MSS, at least until the free space gets quite small. 1661 * This would appear to be a side effect of the mbuf implementation. 1662 * Combining these two algorithms results in the observed behavior 1663 * of having a fixed window size at almost all times. 1664 * 1665 * Below we obtain similar behavior by forcing the offered window to 1666 * a multiple of the mss when it is feasible to do so. 1667 * 1668 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1669 * Regular options like TIMESTAMP are taken into account. 1670 */ 1671u32 __tcp_select_window(struct sock *sk) 1672{ 1673 struct inet_connection_sock *icsk = inet_csk(sk); 1674 struct tcp_sock *tp = tcp_sk(sk); 1675 /* MSS for the peer's data. Previous versions used mss_clamp 1676 * here. I don't know if the value based on our guesses 1677 * of peer's MSS is better for the performance. It's more correct 1678 * but may be worse for the performance because of rcv_mss 1679 * fluctuations. --SAW 1998/11/1 1680 */ 1681 int mss = icsk->icsk_ack.rcv_mss; 1682 int free_space = tcp_space(sk); 1683 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1684 int window; 1685 1686 if (mss > full_space) 1687 mss = full_space; 1688 1689 if (free_space < (full_space >> 1)) { 1690 icsk->icsk_ack.quick = 0; 1691 1692 if (tcp_memory_pressure) 1693 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1694 4U * tp->advmss); 1695 1696 if (free_space < mss) 1697 return 0; 1698 } 1699 1700 if (free_space > tp->rcv_ssthresh) 1701 free_space = tp->rcv_ssthresh; 1702 1703 /* Don't do rounding if we are using window scaling, since the 1704 * scaled window will not line up with the MSS boundary anyway. 1705 */ 1706 window = tp->rcv_wnd; 1707 if (tp->rx_opt.rcv_wscale) { 1708 window = free_space; 1709 1710 /* Advertise enough space so that it won't get scaled away. 1711 * Import case: prevent zero window announcement if 1712 * 1<<rcv_wscale > mss. 1713 */ 1714 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1715 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1716 << tp->rx_opt.rcv_wscale); 1717 } else { 1718 /* Get the largest window that is a nice multiple of mss. 1719 * Window clamp already applied above. 1720 * If our current window offering is within 1 mss of the 1721 * free space we just keep it. This prevents the divide 1722 * and multiply from happening most of the time. 1723 * We also don't do any window rounding when the free space 1724 * is too small. 1725 */ 1726 if (window <= free_space - mss || window > free_space) 1727 window = (free_space / mss) * mss; 1728 else if (mss == full_space && 1729 free_space > window + (full_space >> 1)) 1730 window = free_space; 1731 } 1732 1733 return window; 1734} 1735 1736/* Collapses two adjacent SKB's during retransmission. */ 1737static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1738{ 1739 struct tcp_sock *tp = tcp_sk(sk); 1740 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1741 int skb_size, next_skb_size; 1742 1743 skb_size = skb->len; 1744 next_skb_size = next_skb->len; 1745 1746 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1747 1748 tcp_highest_sack_combine(sk, next_skb, skb); 1749 1750 tcp_unlink_write_queue(next_skb, sk); 1751 1752 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1753 next_skb_size); 1754 1755 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1756 skb->ip_summed = CHECKSUM_PARTIAL; 1757 1758 if (skb->ip_summed != CHECKSUM_PARTIAL) 1759 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1760 1761 /* Update sequence range on original skb. */ 1762 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1763 1764 /* Merge over control information. This moves PSH/FIN etc. over */ 1765 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1766 1767 /* All done, get rid of second SKB and account for it so 1768 * packet counting does not break. 1769 */ 1770 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1771 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS) 1772 tp->retrans_out -= tcp_skb_pcount(next_skb); 1773 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST) 1774 tp->lost_out -= tcp_skb_pcount(next_skb); 1775 /* Reno case is special. Sigh... */ 1776 if (tcp_is_reno(tp) && tp->sacked_out) 1777 tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 1778 1779 tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb)); 1780 tp->packets_out -= tcp_skb_pcount(next_skb); 1781 1782 /* changed transmit queue under us so clear hints */ 1783 tcp_clear_retrans_hints_partial(tp); 1784 if (next_skb == tp->retransmit_skb_hint) 1785 tp->retransmit_skb_hint = skb; 1786 1787 sk_wmem_free_skb(sk, next_skb); 1788} 1789 1790static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 1791{ 1792 if (tcp_skb_pcount(skb) > 1) 1793 return 0; 1794 /* TODO: SACK collapsing could be used to remove this condition */ 1795 if (skb_shinfo(skb)->nr_frags != 0) 1796 return 0; 1797 if (skb_cloned(skb)) 1798 return 0; 1799 if (skb == tcp_send_head(sk)) 1800 return 0; 1801 /* Some heurestics for collapsing over SACK'd could be invented */ 1802 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1803 return 0; 1804 1805 return 1; 1806} 1807 1808static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 1809 int space) 1810{ 1811 struct tcp_sock *tp = tcp_sk(sk); 1812 struct sk_buff *skb = to, *tmp; 1813 int first = 1; 1814 1815 if (!sysctl_tcp_retrans_collapse) 1816 return; 1817 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) 1818 return; 1819 1820 tcp_for_write_queue_from_safe(skb, tmp, sk) { 1821 if (!tcp_can_collapse(sk, skb)) 1822 break; 1823 1824 space -= skb->len; 1825 1826 if (first) { 1827 first = 0; 1828 continue; 1829 } 1830 1831 if (space < 0) 1832 break; 1833 /* Punt if not enough space exists in the first SKB for 1834 * the data in the second 1835 */ 1836 if (skb->len > skb_tailroom(to)) 1837 break; 1838 1839 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 1840 break; 1841 1842 tcp_collapse_retrans(sk, to); 1843 } 1844} 1845 1846/* This retransmits one SKB. Policy decisions and retransmit queue 1847 * state updates are done by the caller. Returns non-zero if an 1848 * error occurred which prevented the send. 1849 */ 1850int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1851{ 1852 struct tcp_sock *tp = tcp_sk(sk); 1853 struct inet_connection_sock *icsk = inet_csk(sk); 1854 unsigned int cur_mss; 1855 int err; 1856 1857 /* Inconslusive MTU probe */ 1858 if (icsk->icsk_mtup.probe_size) { 1859 icsk->icsk_mtup.probe_size = 0; 1860 } 1861 1862 /* Do not sent more than we queued. 1/4 is reserved for possible 1863 * copying overhead: fragmentation, tunneling, mangling etc. 1864 */ 1865 if (atomic_read(&sk->sk_wmem_alloc) > 1866 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1867 return -EAGAIN; 1868 1869 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1870 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1871 BUG(); 1872 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1873 return -ENOMEM; 1874 } 1875 1876 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1877 return -EHOSTUNREACH; /* Routing failure or similar. */ 1878 1879 cur_mss = tcp_current_mss(sk); 1880 1881 /* If receiver has shrunk his window, and skb is out of 1882 * new window, do not retransmit it. The exception is the 1883 * case, when window is shrunk to zero. In this case 1884 * our retransmit serves as a zero window probe. 1885 */ 1886 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) 1887 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1888 return -EAGAIN; 1889 1890 if (skb->len > cur_mss) { 1891 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1892 return -ENOMEM; /* We'll try again later. */ 1893 } else { 1894 tcp_init_tso_segs(sk, skb, cur_mss); 1895 } 1896 1897 tcp_retrans_try_collapse(sk, skb, cur_mss); 1898 1899 /* Some Solaris stacks overoptimize and ignore the FIN on a 1900 * retransmit when old data is attached. So strip it off 1901 * since it is cheap to do so and saves bytes on the network. 1902 */ 1903 if (skb->len > 0 && 1904 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1905 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1906 if (!pskb_trim(skb, 0)) { 1907 /* Reuse, even though it does some unnecessary work */ 1908 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 1909 TCP_SKB_CB(skb)->flags); 1910 skb->ip_summed = CHECKSUM_NONE; 1911 } 1912 } 1913 1914 /* Make a copy, if the first transmission SKB clone we made 1915 * is still in somebody's hands, else make a clone. 1916 */ 1917 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1918 1919 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1920 1921 if (err == 0) { 1922 /* Update global TCP statistics. */ 1923 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 1924 1925 tp->total_retrans++; 1926 1927#if FASTRETRANS_DEBUG > 0 1928 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1929 if (net_ratelimit()) 1930 printk(KERN_DEBUG "retrans_out leaked.\n"); 1931 } 1932#endif 1933 if (!tp->retrans_out) 1934 tp->lost_retrans_low = tp->snd_nxt; 1935 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1936 tp->retrans_out += tcp_skb_pcount(skb); 1937 1938 /* Save stamp of the first retransmit. */ 1939 if (!tp->retrans_stamp) 1940 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1941 1942 tp->undo_retrans++; 1943 1944 /* snd_nxt is stored to detect loss of retransmitted segment, 1945 * see tcp_input.c tcp_sacktag_write_queue(). 1946 */ 1947 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1948 } 1949 return err; 1950} 1951 1952static int tcp_can_forward_retransmit(struct sock *sk) 1953{ 1954 const struct inet_connection_sock *icsk = inet_csk(sk); 1955 struct tcp_sock *tp = tcp_sk(sk); 1956 1957 /* Forward retransmissions are possible only during Recovery. */ 1958 if (icsk->icsk_ca_state != TCP_CA_Recovery) 1959 return 0; 1960 1961 /* No forward retransmissions in Reno are possible. */ 1962 if (tcp_is_reno(tp)) 1963 return 0; 1964 1965 /* Yeah, we have to make difficult choice between forward transmission 1966 * and retransmission... Both ways have their merits... 1967 * 1968 * For now we do not retransmit anything, while we have some new 1969 * segments to send. In the other cases, follow rule 3 for 1970 * NextSeg() specified in RFC3517. 1971 */ 1972 1973 if (tcp_may_send_now(sk)) 1974 return 0; 1975 1976 return 1; 1977} 1978 1979/* This gets called after a retransmit timeout, and the initially 1980 * retransmitted data is acknowledged. It tries to continue 1981 * resending the rest of the retransmit queue, until either 1982 * we've sent it all or the congestion window limit is reached. 1983 * If doing SACK, the first ACK which comes back for a timeout 1984 * based retransmit packet might feed us FACK information again. 1985 * If so, we use it to avoid unnecessarily retransmissions. 1986 */ 1987void tcp_xmit_retransmit_queue(struct sock *sk) 1988{ 1989 const struct inet_connection_sock *icsk = inet_csk(sk); 1990 struct tcp_sock *tp = tcp_sk(sk); 1991 struct sk_buff *skb; 1992 struct sk_buff *hole = NULL; 1993 u32 last_lost; 1994 int mib_idx; 1995 int fwd_rexmitting = 0; 1996 1997 if (!tp->lost_out) 1998 tp->retransmit_high = tp->snd_una; 1999 2000 if (tp->retransmit_skb_hint) { 2001 skb = tp->retransmit_skb_hint; 2002 last_lost = TCP_SKB_CB(skb)->end_seq; 2003 if (after(last_lost, tp->retransmit_high)) 2004 last_lost = tp->retransmit_high; 2005 } else { 2006 skb = tcp_write_queue_head(sk); 2007 last_lost = tp->snd_una; 2008 } 2009 2010 tcp_for_write_queue_from(skb, sk) { 2011 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2012 2013 if (skb == tcp_send_head(sk)) 2014 break; 2015 /* we could do better than to assign each time */ 2016 if (hole == NULL) 2017 tp->retransmit_skb_hint = skb; 2018 2019 /* Assume this retransmit will generate 2020 * only one packet for congestion window 2021 * calculation purposes. This works because 2022 * tcp_retransmit_skb() will chop up the 2023 * packet to be MSS sized and all the 2024 * packet counting works out. 2025 */ 2026 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2027 return; 2028 2029 if (fwd_rexmitting) { 2030begin_fwd: 2031 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2032 break; 2033 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2034 2035 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2036 tp->retransmit_high = last_lost; 2037 if (!tcp_can_forward_retransmit(sk)) 2038 break; 2039 /* Backtrack if necessary to non-L'ed skb */ 2040 if (hole != NULL) { 2041 skb = hole; 2042 hole = NULL; 2043 } 2044 fwd_rexmitting = 1; 2045 goto begin_fwd; 2046 2047 } else if (!(sacked & TCPCB_LOST)) { 2048 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2049 hole = skb; 2050 continue; 2051 2052 } else { 2053 last_lost = TCP_SKB_CB(skb)->end_seq; 2054 if (icsk->icsk_ca_state != TCP_CA_Loss) 2055 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2056 else 2057 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2058 } 2059 2060 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2061 continue; 2062 2063 if (tcp_retransmit_skb(sk, skb)) 2064 return; 2065 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2066 2067 if (skb == tcp_write_queue_head(sk)) 2068 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2069 inet_csk(sk)->icsk_rto, 2070 TCP_RTO_MAX); 2071 } 2072} 2073 2074/* Send a fin. The caller locks the socket for us. This cannot be 2075 * allowed to fail queueing a FIN frame under any circumstances. 2076 */ 2077void tcp_send_fin(struct sock *sk) 2078{ 2079 struct tcp_sock *tp = tcp_sk(sk); 2080 struct sk_buff *skb = tcp_write_queue_tail(sk); 2081 int mss_now; 2082 2083 /* Optimization, tack on the FIN if we have a queue of 2084 * unsent frames. But be careful about outgoing SACKS 2085 * and IP options. 2086 */ 2087 mss_now = tcp_current_mss(sk); 2088 2089 if (tcp_send_head(sk) != NULL) { 2090 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2091 TCP_SKB_CB(skb)->end_seq++; 2092 tp->write_seq++; 2093 } else { 2094 /* Socket is locked, keep trying until memory is available. */ 2095 for (;;) { 2096 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 2097 if (skb) 2098 break; 2099 yield(); 2100 } 2101 2102 /* Reserve space for headers and prepare control bits. */ 2103 skb_reserve(skb, MAX_TCP_HEADER); 2104 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2105 tcp_init_nondata_skb(skb, tp->write_seq, 2106 TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 2107 tcp_queue_skb(sk, skb); 2108 } 2109 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2110} 2111 2112/* We get here when a process closes a file descriptor (either due to 2113 * an explicit close() or as a byproduct of exit()'ing) and there 2114 * was unread data in the receive queue. This behavior is recommended 2115 * by RFC 2525, section 2.17. -DaveM 2116 */ 2117void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2118{ 2119 struct sk_buff *skb; 2120 2121 /* NOTE: No TCP options attached and we never retransmit this. */ 2122 skb = alloc_skb(MAX_TCP_HEADER, priority); 2123 if (!skb) { 2124 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2125 return; 2126 } 2127 2128 /* Reserve space for headers and prepare control bits. */ 2129 skb_reserve(skb, MAX_TCP_HEADER); 2130 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2131 TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 2132 /* Send it off. */ 2133 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2134 if (tcp_transmit_skb(sk, skb, 0, priority)) 2135 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2136 2137 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2138} 2139 2140/* WARNING: This routine must only be called when we have already sent 2141 * a SYN packet that crossed the incoming SYN that caused this routine 2142 * to get called. If this assumption fails then the initial rcv_wnd 2143 * and rcv_wscale values will not be correct. 2144 */ 2145int tcp_send_synack(struct sock *sk) 2146{ 2147 struct sk_buff *skb; 2148 2149 skb = tcp_write_queue_head(sk); 2150 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { 2151 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2152 return -EFAULT; 2153 } 2154 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { 2155 if (skb_cloned(skb)) { 2156 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2157 if (nskb == NULL) 2158 return -ENOMEM; 2159 tcp_unlink_write_queue(skb, sk); 2160 skb_header_release(nskb); 2161 __tcp_add_write_queue_head(sk, nskb); 2162 sk_wmem_free_skb(sk, skb); 2163 sk->sk_wmem_queued += nskb->truesize; 2164 sk_mem_charge(sk, nskb->truesize); 2165 skb = nskb; 2166 } 2167 2168 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2169 TCP_ECN_send_synack(tcp_sk(sk), skb); 2170 } 2171 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2172 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2173} 2174 2175/* 2176 * Prepare a SYN-ACK. 2177 */ 2178struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2179 struct request_sock *req) 2180{ 2181 struct inet_request_sock *ireq = inet_rsk(req); 2182 struct tcp_sock *tp = tcp_sk(sk); 2183 struct tcphdr *th; 2184 int tcp_header_size; 2185 struct tcp_out_options opts; 2186 struct sk_buff *skb; 2187 struct tcp_md5sig_key *md5; 2188 __u8 *md5_hash_location; 2189 int mss; 2190 2191 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2192 if (skb == NULL) 2193 return NULL; 2194 2195 /* Reserve space for headers. */ 2196 skb_reserve(skb, MAX_TCP_HEADER); 2197 2198 skb->dst = dst_clone(dst); 2199 2200 mss = dst_metric(dst, RTAX_ADVMSS); 2201 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2202 mss = tp->rx_opt.user_mss; 2203 2204 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2205 __u8 rcv_wscale; 2206 /* Set this up on the first call only */ 2207 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2208 /* tcp_full_space because it is guaranteed to be the first packet */ 2209 tcp_select_initial_window(tcp_full_space(sk), 2210 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2211 &req->rcv_wnd, 2212 &req->window_clamp, 2213 ireq->wscale_ok, 2214 &rcv_wscale); 2215 ireq->rcv_wscale = rcv_wscale; 2216 } 2217 2218 memset(&opts, 0, sizeof(opts)); 2219#ifdef CONFIG_SYN_COOKIES 2220 if (unlikely(req->cookie_ts)) 2221 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2222 else 2223#endif 2224 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2225 tcp_header_size = tcp_synack_options(sk, req, mss, 2226 skb, &opts, &md5) + 2227 sizeof(struct tcphdr); 2228 2229 skb_push(skb, tcp_header_size); 2230 skb_reset_transport_header(skb); 2231 2232 th = tcp_hdr(skb); 2233 memset(th, 0, sizeof(struct tcphdr)); 2234 th->syn = 1; 2235 th->ack = 1; 2236 TCP_ECN_make_synack(req, th); 2237 th->source = ireq->loc_port; 2238 th->dest = ireq->rmt_port; 2239 /* Setting of flags are superfluous here for callers (and ECE is 2240 * not even correctly set) 2241 */ 2242 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2243 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2244 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2245 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2246 2247 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2248 th->window = htons(min(req->rcv_wnd, 65535U)); 2249 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 2250 th->doff = (tcp_header_size >> 2); 2251 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2252 2253#ifdef CONFIG_TCP_MD5SIG 2254 /* Okay, we have all we need - do the md5 hash if needed */ 2255 if (md5) { 2256 tp->af_specific->calc_md5_hash(md5_hash_location, 2257 md5, NULL, req, skb); 2258 } 2259#endif 2260 2261 return skb; 2262} 2263 2264/* 2265 * Do all connect socket setups that can be done AF independent. 2266 */ 2267static void tcp_connect_init(struct sock *sk) 2268{ 2269 struct dst_entry *dst = __sk_dst_get(sk); 2270 struct tcp_sock *tp = tcp_sk(sk); 2271 __u8 rcv_wscale; 2272 2273 /* We'll fix this up when we get a response from the other end. 2274 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2275 */ 2276 tp->tcp_header_len = sizeof(struct tcphdr) + 2277 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2278 2279#ifdef CONFIG_TCP_MD5SIG 2280 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2281 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2282#endif 2283 2284 /* If user gave his TCP_MAXSEG, record it to clamp */ 2285 if (tp->rx_opt.user_mss) 2286 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2287 tp->max_window = 0; 2288 tcp_mtup_init(sk); 2289 tcp_sync_mss(sk, dst_mtu(dst)); 2290 2291 if (!tp->window_clamp) 2292 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2293 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2294 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2295 tp->advmss = tp->rx_opt.user_mss; 2296 2297 tcp_initialize_rcv_mss(sk); 2298 2299 tcp_select_initial_window(tcp_full_space(sk), 2300 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2301 &tp->rcv_wnd, 2302 &tp->window_clamp, 2303 sysctl_tcp_window_scaling, 2304 &rcv_wscale); 2305 2306 tp->rx_opt.rcv_wscale = rcv_wscale; 2307 tp->rcv_ssthresh = tp->rcv_wnd; 2308 2309 sk->sk_err = 0; 2310 sock_reset_flag(sk, SOCK_DONE); 2311 tp->snd_wnd = 0; 2312 tcp_init_wl(tp, 0); 2313 tp->snd_una = tp->write_seq; 2314 tp->snd_sml = tp->write_seq; 2315 tp->snd_up = tp->write_seq; 2316 tp->rcv_nxt = 0; 2317 tp->rcv_wup = 0; 2318 tp->copied_seq = 0; 2319 2320 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2321 inet_csk(sk)->icsk_retransmits = 0; 2322 tcp_clear_retrans(tp); 2323} 2324 2325/* 2326 * Build a SYN and send it off. 2327 */ 2328int tcp_connect(struct sock *sk) 2329{ 2330 struct tcp_sock *tp = tcp_sk(sk); 2331 struct sk_buff *buff; 2332 2333 tcp_connect_init(sk); 2334 2335 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2336 if (unlikely(buff == NULL)) 2337 return -ENOBUFS; 2338 2339 /* Reserve space for headers. */ 2340 skb_reserve(buff, MAX_TCP_HEADER); 2341 2342 tp->snd_nxt = tp->write_seq; 2343 tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); 2344 TCP_ECN_send_syn(sk, buff); 2345 2346 /* Send it off. */ 2347 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2348 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2349 skb_header_release(buff); 2350 __tcp_add_write_queue_tail(sk, buff); 2351 sk->sk_wmem_queued += buff->truesize; 2352 sk_mem_charge(sk, buff->truesize); 2353 tp->packets_out += tcp_skb_pcount(buff); 2354 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2355 2356 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2357 * in order to make this packet get counted in tcpOutSegs. 2358 */ 2359 tp->snd_nxt = tp->write_seq; 2360 tp->pushed_seq = tp->write_seq; 2361 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2362 2363 /* Timer for repeating the SYN until an answer. */ 2364 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2365 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2366 return 0; 2367} 2368 2369/* Send out a delayed ack, the caller does the policy checking 2370 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2371 * for details. 2372 */ 2373void tcp_send_delayed_ack(struct sock *sk) 2374{ 2375 struct inet_connection_sock *icsk = inet_csk(sk); 2376 int ato = icsk->icsk_ack.ato; 2377 unsigned long timeout; 2378 2379 if (ato > TCP_DELACK_MIN) { 2380 const struct tcp_sock *tp = tcp_sk(sk); 2381 int max_ato = HZ / 2; 2382 2383 if (icsk->icsk_ack.pingpong || 2384 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2385 max_ato = TCP_DELACK_MAX; 2386 2387 /* Slow path, intersegment interval is "high". */ 2388 2389 /* If some rtt estimate is known, use it to bound delayed ack. 2390 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2391 * directly. 2392 */ 2393 if (tp->srtt) { 2394 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2395 2396 if (rtt < max_ato) 2397 max_ato = rtt; 2398 } 2399 2400 ato = min(ato, max_ato); 2401 } 2402 2403 /* Stay within the limit we were given */ 2404 timeout = jiffies + ato; 2405 2406 /* Use new timeout only if there wasn't a older one earlier. */ 2407 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2408 /* If delack timer was blocked or is about to expire, 2409 * send ACK now. 2410 */ 2411 if (icsk->icsk_ack.blocked || 2412 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2413 tcp_send_ack(sk); 2414 return; 2415 } 2416 2417 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2418 timeout = icsk->icsk_ack.timeout; 2419 } 2420 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2421 icsk->icsk_ack.timeout = timeout; 2422 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2423} 2424 2425/* This routine sends an ack and also updates the window. */ 2426void tcp_send_ack(struct sock *sk) 2427{ 2428 struct sk_buff *buff; 2429 2430 /* If we have been reset, we may not send again. */ 2431 if (sk->sk_state == TCP_CLOSE) 2432 return; 2433 2434 /* We are not putting this on the write queue, so 2435 * tcp_transmit_skb() will set the ownership to this 2436 * sock. 2437 */ 2438 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2439 if (buff == NULL) { 2440 inet_csk_schedule_ack(sk); 2441 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2442 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2443 TCP_DELACK_MAX, TCP_RTO_MAX); 2444 return; 2445 } 2446 2447 /* Reserve space for headers and prepare control bits. */ 2448 skb_reserve(buff, MAX_TCP_HEADER); 2449 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); 2450 2451 /* Send it off, this clears delayed acks for us. */ 2452 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2453 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2454} 2455 2456/* This routine sends a packet with an out of date sequence 2457 * number. It assumes the other end will try to ack it. 2458 * 2459 * Question: what should we make while urgent mode? 2460 * 4.4BSD forces sending single byte of data. We cannot send 2461 * out of window data, because we have SND.NXT==SND.MAX... 2462 * 2463 * Current solution: to send TWO zero-length segments in urgent mode: 2464 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2465 * out-of-date with SND.UNA-1 to probe window. 2466 */ 2467static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2468{ 2469 struct tcp_sock *tp = tcp_sk(sk); 2470 struct sk_buff *skb; 2471 2472 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2473 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2474 if (skb == NULL) 2475 return -1; 2476 2477 /* Reserve space for headers and set control bits. */ 2478 skb_reserve(skb, MAX_TCP_HEADER); 2479 /* Use a previous sequence. This should cause the other 2480 * end to send an ack. Don't queue or clone SKB, just 2481 * send it. 2482 */ 2483 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); 2484 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2485 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2486} 2487 2488int tcp_write_wakeup(struct sock *sk) 2489{ 2490 struct tcp_sock *tp = tcp_sk(sk); 2491 struct sk_buff *skb; 2492 2493 if (sk->sk_state == TCP_CLOSE) 2494 return -1; 2495 2496 if ((skb = tcp_send_head(sk)) != NULL && 2497 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2498 int err; 2499 unsigned int mss = tcp_current_mss(sk); 2500 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2501 2502 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2503 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2504 2505 /* We are probing the opening of a window 2506 * but the window size is != 0 2507 * must have been a result SWS avoidance ( sender ) 2508 */ 2509 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2510 skb->len > mss) { 2511 seg_size = min(seg_size, mss); 2512 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2513 if (tcp_fragment(sk, skb, seg_size, mss)) 2514 return -1; 2515 } else if (!tcp_skb_pcount(skb)) 2516 tcp_set_skb_tso_segs(sk, skb, mss); 2517 2518 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2519 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2520 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2521 if (!err) 2522 tcp_event_new_data_sent(sk, skb); 2523 return err; 2524 } else { 2525 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2526 tcp_xmit_probe_skb(sk, 1); 2527 return tcp_xmit_probe_skb(sk, 0); 2528 } 2529} 2530 2531/* A window probe timeout has occurred. If window is not closed send 2532 * a partial packet else a zero probe. 2533 */ 2534void tcp_send_probe0(struct sock *sk) 2535{ 2536 struct inet_connection_sock *icsk = inet_csk(sk); 2537 struct tcp_sock *tp = tcp_sk(sk); 2538 int err; 2539 2540 err = tcp_write_wakeup(sk); 2541 2542 if (tp->packets_out || !tcp_send_head(sk)) { 2543 /* Cancel probe timer, if it is not required. */ 2544 icsk->icsk_probes_out = 0; 2545 icsk->icsk_backoff = 0; 2546 return; 2547 } 2548 2549 if (err <= 0) { 2550 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2551 icsk->icsk_backoff++; 2552 icsk->icsk_probes_out++; 2553 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2554 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2555 TCP_RTO_MAX); 2556 } else { 2557 /* If packet was not sent due to local congestion, 2558 * do not backoff and do not remember icsk_probes_out. 2559 * Let local senders to fight for local resources. 2560 * 2561 * Use accumulated backoff yet. 2562 */ 2563 if (!icsk->icsk_probes_out) 2564 icsk->icsk_probes_out = 1; 2565 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2566 min(icsk->icsk_rto << icsk->icsk_backoff, 2567 TCP_RESOURCE_PROBE_INTERVAL), 2568 TCP_RTO_MAX); 2569 } 2570} 2571 2572EXPORT_SYMBOL(tcp_select_initial_window); 2573EXPORT_SYMBOL(tcp_connect); 2574EXPORT_SYMBOL(tcp_make_synack); 2575EXPORT_SYMBOL(tcp_simple_retransmit); 2576EXPORT_SYMBOL(tcp_sync_mss); 2577EXPORT_SYMBOL(tcp_mtup_init); 2578