1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21/* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37#define pr_fmt(fmt) "TCP: " fmt 38 39#include <net/tcp.h> 40 41#include <linux/compiler.h> 42#include <linux/gfp.h> 43#include <linux/module.h> 44 45/* People can turn this off for buggy TCP's found in printers etc. */ 46int sysctl_tcp_retrans_collapse __read_mostly = 1; 47 48/* People can turn this on to work with those rare, broken TCPs that 49 * interpret the window field as a signed quantity. 50 */ 51int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 52 53/* Default TSQ limit of two TSO segments */ 54int sysctl_tcp_limit_output_bytes __read_mostly = 131072; 55 56/* This limits the percentage of the congestion window which we 57 * will allow a single TSO frame to consume. Building TSO frames 58 * which are too large can cause TCP streams to be bursty. 59 */ 60int sysctl_tcp_tso_win_divisor __read_mostly = 3; 61 62int sysctl_tcp_mtu_probing __read_mostly = 0; 63int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 64 65/* By default, RFC2861 behavior. */ 66int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 67 68static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 69 int push_one, gfp_t gfp); 70 71/* Account for new data that has been sent to the network. */ 72static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 73{ 74 struct inet_connection_sock *icsk = inet_csk(sk); 75 struct tcp_sock *tp = tcp_sk(sk); 76 unsigned int prior_packets = tp->packets_out; 77 78 tcp_advance_send_head(sk, skb); 79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 80 81 tp->packets_out += tcp_skb_pcount(skb); 82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 84 tcp_rearm_rto(sk); 85 } 86} 87 88/* SND.NXT, if window was not shrunk. 89 * If window has been shrunk, what should we make? It is not clear at all. 90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 92 * invalid. OK, let's make this for now: 93 */ 94static inline __u32 tcp_acceptable_seq(const struct sock *sk) 95{ 96 const struct tcp_sock *tp = tcp_sk(sk); 97 98 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 99 return tp->snd_nxt; 100 else 101 return tcp_wnd_end(tp); 102} 103 104/* Calculate mss to advertise in SYN segment. 105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 106 * 107 * 1. It is independent of path mtu. 108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 110 * attached devices, because some buggy hosts are confused by 111 * large MSS. 112 * 4. We do not make 3, we advertise MSS, calculated from first 113 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 114 * This may be overridden via information stored in routing table. 115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 116 * probably even Jumbo". 117 */ 118static __u16 tcp_advertise_mss(struct sock *sk) 119{ 120 struct tcp_sock *tp = tcp_sk(sk); 121 const struct dst_entry *dst = __sk_dst_get(sk); 122 int mss = tp->advmss; 123 124 if (dst) { 125 unsigned int metric = dst_metric_advmss(dst); 126 127 if (metric < mss) { 128 mss = metric; 129 tp->advmss = mss; 130 } 131 } 132 133 return (__u16)mss; 134} 135 136/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 137 * This is the first part of cwnd validation mechanism. */ 138static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 139{ 140 struct tcp_sock *tp = tcp_sk(sk); 141 s32 delta = tcp_time_stamp - tp->lsndtime; 142 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 143 u32 cwnd = tp->snd_cwnd; 144 145 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 146 147 tp->snd_ssthresh = tcp_current_ssthresh(sk); 148 restart_cwnd = min(restart_cwnd, cwnd); 149 150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 151 cwnd >>= 1; 152 tp->snd_cwnd = max(cwnd, restart_cwnd); 153 tp->snd_cwnd_stamp = tcp_time_stamp; 154 tp->snd_cwnd_used = 0; 155} 156 157/* Congestion state accounting after a packet has been sent. */ 158static void tcp_event_data_sent(struct tcp_sock *tp, 159 struct sock *sk) 160{ 161 struct inet_connection_sock *icsk = inet_csk(sk); 162 const u32 now = tcp_time_stamp; 163 164 if (sysctl_tcp_slow_start_after_idle && 165 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 166 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 167 168 tp->lsndtime = now; 169 170 /* If it is a reply for ato after last received 171 * packet, enter pingpong mode. 172 */ 173 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 174 icsk->icsk_ack.pingpong = 1; 175} 176 177/* Account for an ACK we sent. */ 178static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 179{ 180 tcp_dec_quickack_mode(sk, pkts); 181 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 182} 183 184/* Determine a window scaling and initial window to offer. 185 * Based on the assumption that the given amount of space 186 * will be offered. Store the results in the tp structure. 187 * NOTE: for smooth operation initial space offering should 188 * be a multiple of mss if possible. We assume here that mss >= 1. 189 * This MUST be enforced by all callers. 190 */ 191void tcp_select_initial_window(int __space, __u32 mss, 192 __u32 *rcv_wnd, __u32 *window_clamp, 193 int wscale_ok, __u8 *rcv_wscale, 194 __u32 init_rcv_wnd) 195{ 196 unsigned int space = (__space < 0 ? 0 : __space); 197 198 /* If no clamp set the clamp to the max possible scaled window */ 199 if (*window_clamp == 0) 200 (*window_clamp) = (65535 << 14); 201 space = min(*window_clamp, space); 202 203 /* Quantize space offering to a multiple of mss if possible. */ 204 if (space > mss) 205 space = (space / mss) * mss; 206 207 /* NOTE: offering an initial window larger than 32767 208 * will break some buggy TCP stacks. If the admin tells us 209 * it is likely we could be speaking with such a buggy stack 210 * we will truncate our initial window offering to 32K-1 211 * unless the remote has sent us a window scaling option, 212 * which we interpret as a sign the remote TCP is not 213 * misinterpreting the window field as a signed quantity. 214 */ 215 if (sysctl_tcp_workaround_signed_windows) 216 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 217 else 218 (*rcv_wnd) = space; 219 220 (*rcv_wscale) = 0; 221 if (wscale_ok) { 222 /* Set window scaling on max possible window 223 * See RFC1323 for an explanation of the limit to 14 224 */ 225 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 226 space = min_t(u32, space, *window_clamp); 227 while (space > 65535 && (*rcv_wscale) < 14) { 228 space >>= 1; 229 (*rcv_wscale)++; 230 } 231 } 232 233 /* Set initial window to a value enough for senders starting with 234 * initial congestion window of sysctl_tcp_default_init_rwnd. Place 235 * a limit on the initial window when mss is larger than 1460. 236 */ 237 if (mss > (1 << *rcv_wscale)) { 238 int init_cwnd = sysctl_tcp_default_init_rwnd; 239 if (mss > 1460) 240 init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2); 241 /* when initializing use the value from init_rcv_wnd 242 * rather than the default from above 243 */ 244 if (init_rcv_wnd) 245 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 246 else 247 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); 248 } 249 250 /* Set the clamp no higher than max representable value */ 251 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 252} 253EXPORT_SYMBOL(tcp_select_initial_window); 254 255/* Chose a new window to advertise, update state in tcp_sock for the 256 * socket, and return result with RFC1323 scaling applied. The return 257 * value can be stuffed directly into th->window for an outgoing 258 * frame. 259 */ 260static u16 tcp_select_window(struct sock *sk) 261{ 262 struct tcp_sock *tp = tcp_sk(sk); 263 u32 cur_win = tcp_receive_window(tp); 264 u32 new_win = __tcp_select_window(sk); 265 266 /* Never shrink the offered window */ 267 if (new_win < cur_win) { 268 /* Danger Will Robinson! 269 * Don't update rcv_wup/rcv_wnd here or else 270 * we will not be able to advertise a zero 271 * window in time. --DaveM 272 * 273 * Relax Will Robinson. 274 */ 275 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 276 } 277 tp->rcv_wnd = new_win; 278 tp->rcv_wup = tp->rcv_nxt; 279 280 /* Make sure we do not exceed the maximum possible 281 * scaled window. 282 */ 283 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 284 new_win = min(new_win, MAX_TCP_WINDOW); 285 else 286 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 287 288 /* RFC1323 scaling applied */ 289 new_win >>= tp->rx_opt.rcv_wscale; 290 291 /* If we advertise zero window, disable fast path. */ 292 if (new_win == 0) 293 tp->pred_flags = 0; 294 295 return new_win; 296} 297 298/* Packet ECN state for a SYN-ACK */ 299static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 300{ 301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 302 if (!(tp->ecn_flags & TCP_ECN_OK)) 303 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 304} 305 306/* Packet ECN state for a SYN. */ 307static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 308{ 309 struct tcp_sock *tp = tcp_sk(sk); 310 311 tp->ecn_flags = 0; 312 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { 313 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 314 tp->ecn_flags = TCP_ECN_OK; 315 } 316} 317 318static __inline__ void 319TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 320{ 321 if (inet_rsk(req)->ecn_ok) 322 th->ece = 1; 323} 324 325/* Set up ECN state for a packet on a ESTABLISHED socket that is about to 326 * be sent. 327 */ 328static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 329 int tcp_header_len) 330{ 331 struct tcp_sock *tp = tcp_sk(sk); 332 333 if (tp->ecn_flags & TCP_ECN_OK) { 334 /* Not-retransmitted data segment: set ECT and inject CWR. */ 335 if (skb->len != tcp_header_len && 336 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 337 INET_ECN_xmit(sk); 338 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 339 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 340 tcp_hdr(skb)->cwr = 1; 341 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 342 } 343 } else { 344 /* ACK or retransmitted segment: clear ECT|CE */ 345 INET_ECN_dontxmit(sk); 346 } 347 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 348 tcp_hdr(skb)->ece = 1; 349 } 350} 351 352/* Constructs common control bits of non-data skb. If SYN/FIN is present, 353 * auto increment end seqno. 354 */ 355static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 356{ 357 skb->ip_summed = CHECKSUM_PARTIAL; 358 skb->csum = 0; 359 360 TCP_SKB_CB(skb)->tcp_flags = flags; 361 TCP_SKB_CB(skb)->sacked = 0; 362 363 skb_shinfo(skb)->gso_segs = 1; 364 skb_shinfo(skb)->gso_size = 0; 365 skb_shinfo(skb)->gso_type = 0; 366 367 TCP_SKB_CB(skb)->seq = seq; 368 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 369 seq++; 370 TCP_SKB_CB(skb)->end_seq = seq; 371} 372 373static inline bool tcp_urg_mode(const struct tcp_sock *tp) 374{ 375 return tp->snd_una != tp->snd_up; 376} 377 378#define OPTION_SACK_ADVERTISE (1 << 0) 379#define OPTION_TS (1 << 1) 380#define OPTION_MD5 (1 << 2) 381#define OPTION_WSCALE (1 << 3) 382#define OPTION_FAST_OPEN_COOKIE (1 << 8) 383 384struct tcp_out_options { 385 u16 options; /* bit field of OPTION_* */ 386 u16 mss; /* 0 to disable */ 387 u8 ws; /* window scale, 0 to disable */ 388 u8 num_sack_blocks; /* number of SACK blocks to include */ 389 u8 hash_size; /* bytes in hash_location */ 390 __u8 *hash_location; /* temporary pointer, overloaded */ 391 __u32 tsval, tsecr; /* need to include OPTION_TS */ 392 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 393}; 394 395/* Write previously computed TCP options to the packet. 396 * 397 * Beware: Something in the Internet is very sensitive to the ordering of 398 * TCP options, we learned this through the hard way, so be careful here. 399 * Luckily we can at least blame others for their non-compliance but from 400 * inter-operatibility perspective it seems that we're somewhat stuck with 401 * the ordering which we have been using if we want to keep working with 402 * those broken things (not that it currently hurts anybody as there isn't 403 * particular reason why the ordering would need to be changed). 404 * 405 * At least SACK_PERM as the first option is known to lead to a disaster 406 * (but it may well be that other scenarios fail similarly). 407 */ 408static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 409 struct tcp_out_options *opts) 410{ 411 u16 options = opts->options; /* mungable copy */ 412 413 if (unlikely(OPTION_MD5 & options)) { 414 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 415 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 416 /* overload cookie hash location */ 417 opts->hash_location = (__u8 *)ptr; 418 ptr += 4; 419 } 420 421 if (unlikely(opts->mss)) { 422 *ptr++ = htonl((TCPOPT_MSS << 24) | 423 (TCPOLEN_MSS << 16) | 424 opts->mss); 425 } 426 427 if (likely(OPTION_TS & options)) { 428 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 429 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 430 (TCPOLEN_SACK_PERM << 16) | 431 (TCPOPT_TIMESTAMP << 8) | 432 TCPOLEN_TIMESTAMP); 433 options &= ~OPTION_SACK_ADVERTISE; 434 } else { 435 *ptr++ = htonl((TCPOPT_NOP << 24) | 436 (TCPOPT_NOP << 16) | 437 (TCPOPT_TIMESTAMP << 8) | 438 TCPOLEN_TIMESTAMP); 439 } 440 *ptr++ = htonl(opts->tsval); 441 *ptr++ = htonl(opts->tsecr); 442 } 443 444 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 445 *ptr++ = htonl((TCPOPT_NOP << 24) | 446 (TCPOPT_NOP << 16) | 447 (TCPOPT_SACK_PERM << 8) | 448 TCPOLEN_SACK_PERM); 449 } 450 451 if (unlikely(OPTION_WSCALE & options)) { 452 *ptr++ = htonl((TCPOPT_NOP << 24) | 453 (TCPOPT_WINDOW << 16) | 454 (TCPOLEN_WINDOW << 8) | 455 opts->ws); 456 } 457 458 if (unlikely(opts->num_sack_blocks)) { 459 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 460 tp->duplicate_sack : tp->selective_acks; 461 int this_sack; 462 463 *ptr++ = htonl((TCPOPT_NOP << 24) | 464 (TCPOPT_NOP << 16) | 465 (TCPOPT_SACK << 8) | 466 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 467 TCPOLEN_SACK_PERBLOCK))); 468 469 for (this_sack = 0; this_sack < opts->num_sack_blocks; 470 ++this_sack) { 471 *ptr++ = htonl(sp[this_sack].start_seq); 472 *ptr++ = htonl(sp[this_sack].end_seq); 473 } 474 475 tp->rx_opt.dsack = 0; 476 } 477 478 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 479 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 480 481 *ptr++ = htonl((TCPOPT_EXP << 24) | 482 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | 483 TCPOPT_FASTOPEN_MAGIC); 484 485 memcpy(ptr, foc->val, foc->len); 486 if ((foc->len & 3) == 2) { 487 u8 *align = ((u8 *)ptr) + foc->len; 488 align[0] = align[1] = TCPOPT_NOP; 489 } 490 ptr += (foc->len + 3) >> 2; 491 } 492} 493 494/* Compute TCP options for SYN packets. This is not the final 495 * network wire format yet. 496 */ 497static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 498 struct tcp_out_options *opts, 499 struct tcp_md5sig_key **md5) 500{ 501 struct tcp_sock *tp = tcp_sk(sk); 502 unsigned int remaining = MAX_TCP_OPTION_SPACE; 503 struct tcp_fastopen_request *fastopen = tp->fastopen_req; 504 505#ifdef CONFIG_TCP_MD5SIG 506 *md5 = tp->af_specific->md5_lookup(sk, sk); 507 if (*md5) { 508 opts->options |= OPTION_MD5; 509 remaining -= TCPOLEN_MD5SIG_ALIGNED; 510 } 511#else 512 *md5 = NULL; 513#endif 514 515 /* We always get an MSS option. The option bytes which will be seen in 516 * normal data packets should timestamps be used, must be in the MSS 517 * advertised. But we subtract them from tp->mss_cache so that 518 * calculations in tcp_sendmsg are simpler etc. So account for this 519 * fact here if necessary. If we don't do this correctly, as a 520 * receiver we won't recognize data packets as being full sized when we 521 * should, and thus we won't abide by the delayed ACK rules correctly. 522 * SACKs don't matter, we never delay an ACK when we have any of those 523 * going out. */ 524 opts->mss = tcp_advertise_mss(sk); 525 remaining -= TCPOLEN_MSS_ALIGNED; 526 527 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 528 opts->options |= OPTION_TS; 529 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 530 opts->tsecr = tp->rx_opt.ts_recent; 531 remaining -= TCPOLEN_TSTAMP_ALIGNED; 532 } 533 if (likely(sysctl_tcp_window_scaling)) { 534 opts->ws = tp->rx_opt.rcv_wscale; 535 opts->options |= OPTION_WSCALE; 536 remaining -= TCPOLEN_WSCALE_ALIGNED; 537 } 538 if (likely(sysctl_tcp_sack)) { 539 opts->options |= OPTION_SACK_ADVERTISE; 540 if (unlikely(!(OPTION_TS & opts->options))) 541 remaining -= TCPOLEN_SACKPERM_ALIGNED; 542 } 543 544 if (fastopen && fastopen->cookie.len >= 0) { 545 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; 546 need = (need + 3) & ~3U; /* Align to 32 bits */ 547 if (remaining >= need) { 548 opts->options |= OPTION_FAST_OPEN_COOKIE; 549 opts->fastopen_cookie = &fastopen->cookie; 550 remaining -= need; 551 tp->syn_fastopen = 1; 552 } 553 } 554 555 return MAX_TCP_OPTION_SPACE - remaining; 556} 557 558/* Set up TCP options for SYN-ACKs. */ 559static unsigned int tcp_synack_options(struct sock *sk, 560 struct request_sock *req, 561 unsigned int mss, struct sk_buff *skb, 562 struct tcp_out_options *opts, 563 struct tcp_md5sig_key **md5, 564 struct tcp_fastopen_cookie *foc) 565{ 566 struct inet_request_sock *ireq = inet_rsk(req); 567 unsigned int remaining = MAX_TCP_OPTION_SPACE; 568 569#ifdef CONFIG_TCP_MD5SIG 570 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 571 if (*md5) { 572 opts->options |= OPTION_MD5; 573 remaining -= TCPOLEN_MD5SIG_ALIGNED; 574 575 /* We can't fit any SACK blocks in a packet with MD5 + TS 576 * options. There was discussion about disabling SACK 577 * rather than TS in order to fit in better with old, 578 * buggy kernels, but that was deemed to be unnecessary. 579 */ 580 ireq->tstamp_ok &= !ireq->sack_ok; 581 } 582#else 583 *md5 = NULL; 584#endif 585 586 /* We always send an MSS option. */ 587 opts->mss = mss; 588 remaining -= TCPOLEN_MSS_ALIGNED; 589 590 if (likely(ireq->wscale_ok)) { 591 opts->ws = ireq->rcv_wscale; 592 opts->options |= OPTION_WSCALE; 593 remaining -= TCPOLEN_WSCALE_ALIGNED; 594 } 595 if (likely(ireq->tstamp_ok)) { 596 opts->options |= OPTION_TS; 597 opts->tsval = TCP_SKB_CB(skb)->when; 598 opts->tsecr = req->ts_recent; 599 remaining -= TCPOLEN_TSTAMP_ALIGNED; 600 } 601 if (likely(ireq->sack_ok)) { 602 opts->options |= OPTION_SACK_ADVERTISE; 603 if (unlikely(!ireq->tstamp_ok)) 604 remaining -= TCPOLEN_SACKPERM_ALIGNED; 605 } 606 if (foc != NULL) { 607 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 608 need = (need + 3) & ~3U; /* Align to 32 bits */ 609 if (remaining >= need) { 610 opts->options |= OPTION_FAST_OPEN_COOKIE; 611 opts->fastopen_cookie = foc; 612 remaining -= need; 613 } 614 } 615 616 return MAX_TCP_OPTION_SPACE - remaining; 617} 618 619/* Compute TCP options for ESTABLISHED sockets. This is not the 620 * final wire format yet. 621 */ 622static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 623 struct tcp_out_options *opts, 624 struct tcp_md5sig_key **md5) 625{ 626 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 627 struct tcp_sock *tp = tcp_sk(sk); 628 unsigned int size = 0; 629 unsigned int eff_sacks; 630 631#ifdef CONFIG_TCP_MD5SIG 632 *md5 = tp->af_specific->md5_lookup(sk, sk); 633 if (unlikely(*md5)) { 634 opts->options |= OPTION_MD5; 635 size += TCPOLEN_MD5SIG_ALIGNED; 636 } 637#else 638 *md5 = NULL; 639#endif 640 641 if (likely(tp->rx_opt.tstamp_ok)) { 642 opts->options |= OPTION_TS; 643 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 644 opts->tsecr = tp->rx_opt.ts_recent; 645 size += TCPOLEN_TSTAMP_ALIGNED; 646 } 647 648 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 649 if (unlikely(eff_sacks)) { 650 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 651 opts->num_sack_blocks = 652 min_t(unsigned int, eff_sacks, 653 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 654 TCPOLEN_SACK_PERBLOCK); 655 size += TCPOLEN_SACK_BASE_ALIGNED + 656 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 657 } 658 659 return size; 660} 661 662 663/* TCP SMALL QUEUES (TSQ) 664 * 665 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 666 * to reduce RTT and bufferbloat. 667 * We do this using a special skb destructor (tcp_wfree). 668 * 669 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 670 * needs to be reallocated in a driver. 671 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc 672 * 673 * Since transmit from skb destructor is forbidden, we use a tasklet 674 * to process all sockets that eventually need to send more skbs. 675 * We use one tasklet per cpu, with its own queue of sockets. 676 */ 677struct tsq_tasklet { 678 struct tasklet_struct tasklet; 679 struct list_head head; /* queue of tcp sockets */ 680}; 681static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 682 683static void tcp_tsq_handler(struct sock *sk) 684{ 685 if ((1 << sk->sk_state) & 686 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 687 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 688 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 689} 690/* 691 * One tasklest per cpu tries to send more skbs. 692 * We run in tasklet context but need to disable irqs when 693 * transfering tsq->head because tcp_wfree() might 694 * interrupt us (non NAPI drivers) 695 */ 696static void tcp_tasklet_func(unsigned long data) 697{ 698 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 699 LIST_HEAD(list); 700 unsigned long flags; 701 struct list_head *q, *n; 702 struct tcp_sock *tp; 703 struct sock *sk; 704 705 local_irq_save(flags); 706 list_splice_init(&tsq->head, &list); 707 local_irq_restore(flags); 708 709 list_for_each_safe(q, n, &list) { 710 tp = list_entry(q, struct tcp_sock, tsq_node); 711 list_del(&tp->tsq_node); 712 713 sk = (struct sock *)tp; 714 bh_lock_sock(sk); 715 716 if (!sock_owned_by_user(sk)) { 717 tcp_tsq_handler(sk); 718 } else { 719 /* defer the work to tcp_release_cb() */ 720 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); 721 } 722 bh_unlock_sock(sk); 723 724 clear_bit(TSQ_QUEUED, &tp->tsq_flags); 725 sk_free(sk); 726 } 727} 728 729#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ 730 (1UL << TCP_WRITE_TIMER_DEFERRED) | \ 731 (1UL << TCP_DELACK_TIMER_DEFERRED) | \ 732 (1UL << TCP_MTU_REDUCED_DEFERRED)) 733/** 734 * tcp_release_cb - tcp release_sock() callback 735 * @sk: socket 736 * 737 * called from release_sock() to perform protocol dependent 738 * actions before socket release. 739 */ 740void tcp_release_cb(struct sock *sk) 741{ 742 struct tcp_sock *tp = tcp_sk(sk); 743 unsigned long flags, nflags; 744 745 /* perform an atomic operation only if at least one flag is set */ 746 do { 747 flags = tp->tsq_flags; 748 if (!(flags & TCP_DEFERRED_ALL)) 749 return; 750 nflags = flags & ~TCP_DEFERRED_ALL; 751 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); 752 753 if (flags & (1UL << TCP_TSQ_DEFERRED)) 754 tcp_tsq_handler(sk); 755 756 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 757 tcp_write_timer_handler(sk); 758 __sock_put(sk); 759 } 760 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { 761 tcp_delack_timer_handler(sk); 762 __sock_put(sk); 763 } 764 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 765 sk->sk_prot->mtu_reduced(sk); 766 __sock_put(sk); 767 } 768} 769EXPORT_SYMBOL(tcp_release_cb); 770 771void __init tcp_tasklet_init(void) 772{ 773 int i; 774 775 for_each_possible_cpu(i) { 776 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 777 778 INIT_LIST_HEAD(&tsq->head); 779 tasklet_init(&tsq->tasklet, 780 tcp_tasklet_func, 781 (unsigned long)tsq); 782 } 783} 784 785/* 786 * Write buffer destructor automatically called from kfree_skb. 787 * We cant xmit new skbs from this context, as we might already 788 * hold qdisc lock. 789 */ 790void tcp_wfree(struct sk_buff *skb) 791{ 792 struct sock *sk = skb->sk; 793 struct tcp_sock *tp = tcp_sk(sk); 794 795 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 796 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 797 unsigned long flags; 798 struct tsq_tasklet *tsq; 799 800 /* Keep a ref on socket. 801 * This last ref will be released in tcp_tasklet_func() 802 */ 803 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); 804 805 /* queue this socket to tasklet queue */ 806 local_irq_save(flags); 807 tsq = &__get_cpu_var(tsq_tasklet); 808 list_add(&tp->tsq_node, &tsq->head); 809 tasklet_schedule(&tsq->tasklet); 810 local_irq_restore(flags); 811 } else { 812 sock_wfree(skb); 813 } 814} 815 816/* This routine actually transmits TCP packets queued in by 817 * tcp_do_sendmsg(). This is used by both the initial 818 * transmission and possible later retransmissions. 819 * All SKB's seen here are completely headerless. It is our 820 * job to build the TCP header, and pass the packet down to 821 * IP so it can do the same plus pass the packet off to the 822 * device. 823 * 824 * We are working here with either a clone of the original 825 * SKB, or a fresh unique copy made by the retransmit engine. 826 */ 827static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 828 gfp_t gfp_mask) 829{ 830 const struct inet_connection_sock *icsk = inet_csk(sk); 831 struct inet_sock *inet; 832 struct tcp_sock *tp; 833 struct tcp_skb_cb *tcb; 834 struct tcp_out_options opts; 835 unsigned int tcp_options_size, tcp_header_size; 836 struct tcp_md5sig_key *md5; 837 struct tcphdr *th; 838 int err; 839 840 BUG_ON(!skb || !tcp_skb_pcount(skb)); 841 842 /* If congestion control is doing timestamping, we must 843 * take such a timestamp before we potentially clone/copy. 844 */ 845 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 846 __net_timestamp(skb); 847 848 if (likely(clone_it)) { 849 const struct sk_buff *fclone = skb + 1; 850 851 if (unlikely(skb->fclone == SKB_FCLONE_ORIG && 852 fclone->fclone == SKB_FCLONE_CLONE)) 853 NET_INC_STATS_BH(sock_net(sk), 854 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 855 856 if (unlikely(skb_cloned(skb))) 857 skb = pskb_copy(skb, gfp_mask); 858 else 859 skb = skb_clone(skb, gfp_mask); 860 if (unlikely(!skb)) 861 return -ENOBUFS; 862 } 863 864 inet = inet_sk(sk); 865 tp = tcp_sk(sk); 866 tcb = TCP_SKB_CB(skb); 867 memset(&opts, 0, sizeof(opts)); 868 869 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 870 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 871 else 872 tcp_options_size = tcp_established_options(sk, skb, &opts, 873 &md5); 874 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 875 876 if (tcp_packets_in_flight(tp) == 0) 877 tcp_ca_event(sk, CA_EVENT_TX_START); 878 879 /* if no packet is in qdisc/device queue, then allow XPS to select 880 * another queue. 881 */ 882 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; 883 884 skb_push(skb, tcp_header_size); 885 skb_reset_transport_header(skb); 886 887 skb_orphan(skb); 888 skb->sk = sk; 889 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? 890 tcp_wfree : sock_wfree; 891 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 892 893 /* Build TCP header and checksum it. */ 894 th = tcp_hdr(skb); 895 th->source = inet->inet_sport; 896 th->dest = inet->inet_dport; 897 th->seq = htonl(tcb->seq); 898 th->ack_seq = htonl(tp->rcv_nxt); 899 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 900 tcb->tcp_flags); 901 902 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 903 /* RFC1323: The window in SYN & SYN/ACK segments 904 * is never scaled. 905 */ 906 th->window = htons(min(tp->rcv_wnd, 65535U)); 907 } else { 908 th->window = htons(tcp_select_window(sk)); 909 } 910 th->check = 0; 911 th->urg_ptr = 0; 912 913 /* The urg_mode check is necessary during a below snd_una win probe */ 914 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 915 if (before(tp->snd_up, tcb->seq + 0x10000)) { 916 th->urg_ptr = htons(tp->snd_up - tcb->seq); 917 th->urg = 1; 918 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 919 th->urg_ptr = htons(0xFFFF); 920 th->urg = 1; 921 } 922 } 923 924 tcp_options_write((__be32 *)(th + 1), tp, &opts); 925 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 926 TCP_ECN_send(sk, skb, tcp_header_size); 927 928#ifdef CONFIG_TCP_MD5SIG 929 /* Calculate the MD5 hash, as we have all we need now */ 930 if (md5) { 931 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 932 tp->af_specific->calc_md5_hash(opts.hash_location, 933 md5, sk, NULL, skb); 934 } 935#endif 936 937 icsk->icsk_af_ops->send_check(sk, skb); 938 939 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 940 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 941 942 if (skb->len != tcp_header_size) 943 tcp_event_data_sent(tp, sk); 944 945 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 946 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 947 tcp_skb_pcount(skb)); 948 949 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 950 if (likely(err <= 0)) 951 return err; 952 953 tcp_enter_cwr(sk, 1); 954 955 return net_xmit_eval(err); 956} 957 958/* This routine just queues the buffer for sending. 959 * 960 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 961 * otherwise socket can stall. 962 */ 963static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 964{ 965 struct tcp_sock *tp = tcp_sk(sk); 966 967 /* Advance write_seq and place onto the write_queue. */ 968 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 969 skb_header_release(skb); 970 tcp_add_write_queue_tail(sk, skb); 971 sk->sk_wmem_queued += skb->truesize; 972 sk_mem_charge(sk, skb->truesize); 973} 974 975/* Initialize TSO segments for a packet. */ 976static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 977 unsigned int mss_now) 978{ 979 if (skb->len <= mss_now || !sk_can_gso(sk) || 980 skb->ip_summed == CHECKSUM_NONE) { 981 /* Avoid the costly divide in the normal 982 * non-TSO case. 983 */ 984 skb_shinfo(skb)->gso_segs = 1; 985 skb_shinfo(skb)->gso_size = 0; 986 skb_shinfo(skb)->gso_type = 0; 987 } else { 988 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 989 skb_shinfo(skb)->gso_size = mss_now; 990 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 991 } 992} 993 994/* When a modification to fackets out becomes necessary, we need to check 995 * skb is counted to fackets_out or not. 996 */ 997static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 998 int decr) 999{ 1000 struct tcp_sock *tp = tcp_sk(sk); 1001 1002 if (!tp->sacked_out || tcp_is_reno(tp)) 1003 return; 1004 1005 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 1006 tp->fackets_out -= decr; 1007} 1008 1009/* Pcount in the middle of the write queue got changed, we need to do various 1010 * tweaks to fix counters 1011 */ 1012static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1013{ 1014 struct tcp_sock *tp = tcp_sk(sk); 1015 1016 tp->packets_out -= decr; 1017 1018 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1019 tp->sacked_out -= decr; 1020 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1021 tp->retrans_out -= decr; 1022 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1023 tp->lost_out -= decr; 1024 1025 /* Reno case is special. Sigh... */ 1026 if (tcp_is_reno(tp) && decr > 0) 1027 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1028 1029 tcp_adjust_fackets_out(sk, skb, decr); 1030 1031 if (tp->lost_skb_hint && 1032 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 1033 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1034 tp->lost_cnt_hint -= decr; 1035 1036 tcp_verify_left_out(tp); 1037} 1038 1039/* Function to create two new TCP segments. Shrinks the given segment 1040 * to the specified size and appends a new segment with the rest of the 1041 * packet to the list. This won't be called frequently, I hope. 1042 * Remember, these are still headerless SKBs at this point. 1043 */ 1044int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1045 unsigned int mss_now) 1046{ 1047 struct tcp_sock *tp = tcp_sk(sk); 1048 struct sk_buff *buff; 1049 int nsize, old_factor; 1050 int nlen; 1051 u8 flags; 1052 1053 if (WARN_ON(len > skb->len)) 1054 return -EINVAL; 1055 1056 nsize = skb_headlen(skb) - len; 1057 if (nsize < 0) 1058 nsize = 0; 1059 1060 if (skb_cloned(skb) && 1061 skb_is_nonlinear(skb) && 1062 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1063 return -ENOMEM; 1064 1065 /* Get a new skb... force flag on. */ 1066 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1067 if (buff == NULL) 1068 return -ENOMEM; /* We'll just try again later. */ 1069 1070 sk->sk_wmem_queued += buff->truesize; 1071 sk_mem_charge(sk, buff->truesize); 1072 nlen = skb->len - len - nsize; 1073 buff->truesize += nlen; 1074 skb->truesize -= nlen; 1075 1076 /* Correct the sequence numbers. */ 1077 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1078 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1079 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1080 1081 /* PSH and FIN should only be set in the second packet. */ 1082 flags = TCP_SKB_CB(skb)->tcp_flags; 1083 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1084 TCP_SKB_CB(buff)->tcp_flags = flags; 1085 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1086 1087 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1088 /* Copy and checksum data tail into the new buffer. */ 1089 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1090 skb_put(buff, nsize), 1091 nsize, 0); 1092 1093 skb_trim(skb, len); 1094 1095 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1096 } else { 1097 skb->ip_summed = CHECKSUM_PARTIAL; 1098 skb_split(skb, buff, len); 1099 } 1100 1101 buff->ip_summed = skb->ip_summed; 1102 1103 /* Looks stupid, but our code really uses when of 1104 * skbs, which it never sent before. --ANK 1105 */ 1106 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1107 buff->tstamp = skb->tstamp; 1108 1109 old_factor = tcp_skb_pcount(skb); 1110 1111 /* Fix up tso_factor for both original and new SKB. */ 1112 tcp_set_skb_tso_segs(sk, skb, mss_now); 1113 tcp_set_skb_tso_segs(sk, buff, mss_now); 1114 1115 /* If this packet has been sent out already, we must 1116 * adjust the various packet counters. 1117 */ 1118 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1119 int diff = old_factor - tcp_skb_pcount(skb) - 1120 tcp_skb_pcount(buff); 1121 1122 if (diff) 1123 tcp_adjust_pcount(sk, skb, diff); 1124 } 1125 1126 /* Link BUFF into the send queue. */ 1127 skb_header_release(buff); 1128 tcp_insert_write_queue_after(skb, buff, sk); 1129 1130 return 0; 1131} 1132 1133/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1134 * eventually). The difference is that pulled data not copied, but 1135 * immediately discarded. 1136 */ 1137static void __pskb_trim_head(struct sk_buff *skb, int len) 1138{ 1139 int i, k, eat; 1140 1141 eat = min_t(int, len, skb_headlen(skb)); 1142 if (eat) { 1143 __skb_pull(skb, eat); 1144 len -= eat; 1145 if (!len) 1146 return; 1147 } 1148 eat = len; 1149 k = 0; 1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1151 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1152 1153 if (size <= eat) { 1154 skb_frag_unref(skb, i); 1155 eat -= size; 1156 } else { 1157 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1158 if (eat) { 1159 skb_shinfo(skb)->frags[k].page_offset += eat; 1160 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1161 eat = 0; 1162 } 1163 k++; 1164 } 1165 } 1166 skb_shinfo(skb)->nr_frags = k; 1167 1168 skb_reset_tail_pointer(skb); 1169 skb->data_len -= len; 1170 skb->len = skb->data_len; 1171} 1172 1173/* Remove acked data from a packet in the transmit queue. */ 1174int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1175{ 1176 if (skb_unclone(skb, GFP_ATOMIC)) 1177 return -ENOMEM; 1178 1179 __pskb_trim_head(skb, len); 1180 1181 TCP_SKB_CB(skb)->seq += len; 1182 skb->ip_summed = CHECKSUM_PARTIAL; 1183 1184 skb->truesize -= len; 1185 sk->sk_wmem_queued -= len; 1186 sk_mem_uncharge(sk, len); 1187 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1188 1189 /* Any change of skb->len requires recalculation of tso factor. */ 1190 if (tcp_skb_pcount(skb) > 1) 1191 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); 1192 1193 return 0; 1194} 1195 1196/* Calculate MSS not accounting any TCP options. */ 1197static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 1198{ 1199 const struct tcp_sock *tp = tcp_sk(sk); 1200 const struct inet_connection_sock *icsk = inet_csk(sk); 1201 int mss_now; 1202 1203 /* Calculate base mss without TCP options: 1204 It is MMS_S - sizeof(tcphdr) of rfc1122 1205 */ 1206 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1207 1208 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1209 if (icsk->icsk_af_ops->net_frag_header_len) { 1210 const struct dst_entry *dst = __sk_dst_get(sk); 1211 1212 if (dst && dst_allfrag(dst)) 1213 mss_now -= icsk->icsk_af_ops->net_frag_header_len; 1214 } 1215 1216 /* Clamp it (mss_clamp does not include tcp options) */ 1217 if (mss_now > tp->rx_opt.mss_clamp) 1218 mss_now = tp->rx_opt.mss_clamp; 1219 1220 /* Now subtract optional transport overhead */ 1221 mss_now -= icsk->icsk_ext_hdr_len; 1222 1223 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1224 if (mss_now < 48) 1225 mss_now = 48; 1226 return mss_now; 1227} 1228 1229/* Calculate MSS. Not accounting for SACKs here. */ 1230int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1231{ 1232 /* Subtract TCP options size, not including SACKs */ 1233 return __tcp_mtu_to_mss(sk, pmtu) - 1234 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 1235} 1236 1237/* Inverse of above */ 1238int tcp_mss_to_mtu(struct sock *sk, int mss) 1239{ 1240 const struct tcp_sock *tp = tcp_sk(sk); 1241 const struct inet_connection_sock *icsk = inet_csk(sk); 1242 int mtu; 1243 1244 mtu = mss + 1245 tp->tcp_header_len + 1246 icsk->icsk_ext_hdr_len + 1247 icsk->icsk_af_ops->net_header_len; 1248 1249 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1250 if (icsk->icsk_af_ops->net_frag_header_len) { 1251 const struct dst_entry *dst = __sk_dst_get(sk); 1252 1253 if (dst && dst_allfrag(dst)) 1254 mtu += icsk->icsk_af_ops->net_frag_header_len; 1255 } 1256 return mtu; 1257} 1258 1259/* MTU probing init per socket */ 1260void tcp_mtup_init(struct sock *sk) 1261{ 1262 struct tcp_sock *tp = tcp_sk(sk); 1263 struct inet_connection_sock *icsk = inet_csk(sk); 1264 1265 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1266 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1267 icsk->icsk_af_ops->net_header_len; 1268 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1269 icsk->icsk_mtup.probe_size = 0; 1270} 1271EXPORT_SYMBOL(tcp_mtup_init); 1272 1273/* This function synchronize snd mss to current pmtu/exthdr set. 1274 1275 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1276 for TCP options, but includes only bare TCP header. 1277 1278 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1279 It is minimum of user_mss and mss received with SYN. 1280 It also does not include TCP options. 1281 1282 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1283 1284 tp->mss_cache is current effective sending mss, including 1285 all tcp options except for SACKs. It is evaluated, 1286 taking into account current pmtu, but never exceeds 1287 tp->rx_opt.mss_clamp. 1288 1289 NOTE1. rfc1122 clearly states that advertised MSS 1290 DOES NOT include either tcp or ip options. 1291 1292 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1293 are READ ONLY outside this function. --ANK (980731) 1294 */ 1295unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1296{ 1297 struct tcp_sock *tp = tcp_sk(sk); 1298 struct inet_connection_sock *icsk = inet_csk(sk); 1299 int mss_now; 1300 1301 if (icsk->icsk_mtup.search_high > pmtu) 1302 icsk->icsk_mtup.search_high = pmtu; 1303 1304 mss_now = tcp_mtu_to_mss(sk, pmtu); 1305 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1306 1307 /* And store cached results */ 1308 icsk->icsk_pmtu_cookie = pmtu; 1309 if (icsk->icsk_mtup.enabled) 1310 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1311 tp->mss_cache = mss_now; 1312 1313 return mss_now; 1314} 1315EXPORT_SYMBOL(tcp_sync_mss); 1316 1317/* Compute the current effective MSS, taking SACKs and IP options, 1318 * and even PMTU discovery events into account. 1319 */ 1320unsigned int tcp_current_mss(struct sock *sk) 1321{ 1322 const struct tcp_sock *tp = tcp_sk(sk); 1323 const struct dst_entry *dst = __sk_dst_get(sk); 1324 u32 mss_now; 1325 unsigned int header_len; 1326 struct tcp_out_options opts; 1327 struct tcp_md5sig_key *md5; 1328 1329 mss_now = tp->mss_cache; 1330 1331 if (dst) { 1332 u32 mtu = dst_mtu(dst); 1333 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1334 mss_now = tcp_sync_mss(sk, mtu); 1335 } 1336 1337 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1338 sizeof(struct tcphdr); 1339 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1340 * some common options. If this is an odd packet (because we have SACK 1341 * blocks etc) then our calculated header_len will be different, and 1342 * we have to adjust mss_now correspondingly */ 1343 if (header_len != tp->tcp_header_len) { 1344 int delta = (int) header_len - tp->tcp_header_len; 1345 mss_now -= delta; 1346 } 1347 1348 return mss_now; 1349} 1350 1351/* Congestion window validation. (RFC2861) */ 1352static void tcp_cwnd_validate(struct sock *sk) 1353{ 1354 struct tcp_sock *tp = tcp_sk(sk); 1355 1356 if (tp->packets_out >= tp->snd_cwnd) { 1357 /* Network is feed fully. */ 1358 tp->snd_cwnd_used = 0; 1359 tp->snd_cwnd_stamp = tcp_time_stamp; 1360 } else { 1361 /* Network starves. */ 1362 if (tp->packets_out > tp->snd_cwnd_used) 1363 tp->snd_cwnd_used = tp->packets_out; 1364 1365 if (sysctl_tcp_slow_start_after_idle && 1366 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1367 tcp_cwnd_application_limited(sk); 1368 } 1369} 1370 1371/* Returns the portion of skb which can be sent right away without 1372 * introducing MSS oddities to segment boundaries. In rare cases where 1373 * mss_now != mss_cache, we will request caller to create a small skb 1374 * per input skb which could be mostly avoided here (if desired). 1375 * 1376 * We explicitly want to create a request for splitting write queue tail 1377 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1378 * thus all the complexity (cwnd_len is always MSS multiple which we 1379 * return whenever allowed by the other factors). Basically we need the 1380 * modulo only when the receiver window alone is the limiting factor or 1381 * when we would be allowed to send the split-due-to-Nagle skb fully. 1382 */ 1383static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1384 unsigned int mss_now, unsigned int max_segs) 1385{ 1386 const struct tcp_sock *tp = tcp_sk(sk); 1387 u32 needed, window, max_len; 1388 1389 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1390 max_len = mss_now * max_segs; 1391 1392 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 1393 return max_len; 1394 1395 needed = min(skb->len, window); 1396 1397 if (max_len <= needed) 1398 return max_len; 1399 1400 return needed - needed % mss_now; 1401} 1402 1403/* Can at least one segment of SKB be sent right now, according to the 1404 * congestion window rules? If so, return how many segments are allowed. 1405 */ 1406static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1407 const struct sk_buff *skb) 1408{ 1409 u32 in_flight, cwnd; 1410 1411 /* Don't be strict about the congestion window for the final FIN. */ 1412 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 1413 tcp_skb_pcount(skb) == 1) 1414 return 1; 1415 1416 in_flight = tcp_packets_in_flight(tp); 1417 cwnd = tp->snd_cwnd; 1418 if (in_flight < cwnd) 1419 return (cwnd - in_flight); 1420 1421 return 0; 1422} 1423 1424/* Initialize TSO state of a skb. 1425 * This must be invoked the first time we consider transmitting 1426 * SKB onto the wire. 1427 */ 1428static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1429 unsigned int mss_now) 1430{ 1431 int tso_segs = tcp_skb_pcount(skb); 1432 1433 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1434 tcp_set_skb_tso_segs(sk, skb, mss_now); 1435 tso_segs = tcp_skb_pcount(skb); 1436 } 1437 return tso_segs; 1438} 1439 1440/* Minshall's variant of the Nagle send check. */ 1441static inline bool tcp_minshall_check(const struct tcp_sock *tp) 1442{ 1443 return after(tp->snd_sml, tp->snd_una) && 1444 !after(tp->snd_sml, tp->snd_nxt); 1445} 1446 1447/* Return false, if packet can be sent now without violation Nagle's rules: 1448 * 1. It is full sized. 1449 * 2. Or it contains FIN. (already checked by caller) 1450 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1451 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1452 * With Minshall's modification: all sent small packets are ACKed. 1453 */ 1454static inline bool tcp_nagle_check(const struct tcp_sock *tp, 1455 const struct sk_buff *skb, 1456 unsigned int mss_now, int nonagle) 1457{ 1458 return skb->len < mss_now && 1459 ((nonagle & TCP_NAGLE_CORK) || 1460 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1461} 1462 1463/* Return true if the Nagle test allows this packet to be 1464 * sent now. 1465 */ 1466static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1467 unsigned int cur_mss, int nonagle) 1468{ 1469 /* Nagle rule does not apply to frames, which sit in the middle of the 1470 * write_queue (they have no chances to get new data). 1471 * 1472 * This is implemented in the callers, where they modify the 'nonagle' 1473 * argument based upon the location of SKB in the send queue. 1474 */ 1475 if (nonagle & TCP_NAGLE_PUSH) 1476 return true; 1477 1478 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 1479 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1480 return true; 1481 1482 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1483 return true; 1484 1485 return false; 1486} 1487 1488/* Does at least the first segment of SKB fit into the send window? */ 1489static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1490 const struct sk_buff *skb, 1491 unsigned int cur_mss) 1492{ 1493 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1494 1495 if (skb->len > cur_mss) 1496 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1497 1498 return !after(end_seq, tcp_wnd_end(tp)); 1499} 1500 1501/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1502 * should be put on the wire right now. If so, it returns the number of 1503 * packets allowed by the congestion window. 1504 */ 1505static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1506 unsigned int cur_mss, int nonagle) 1507{ 1508 const struct tcp_sock *tp = tcp_sk(sk); 1509 unsigned int cwnd_quota; 1510 1511 tcp_init_tso_segs(sk, skb, cur_mss); 1512 1513 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1514 return 0; 1515 1516 cwnd_quota = tcp_cwnd_test(tp, skb); 1517 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1518 cwnd_quota = 0; 1519 1520 return cwnd_quota; 1521} 1522 1523/* Test if sending is allowed right now. */ 1524bool tcp_may_send_now(struct sock *sk) 1525{ 1526 const struct tcp_sock *tp = tcp_sk(sk); 1527 struct sk_buff *skb = tcp_send_head(sk); 1528 1529 return skb && 1530 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1531 (tcp_skb_is_last(sk, skb) ? 1532 tp->nonagle : TCP_NAGLE_PUSH)); 1533} 1534 1535/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1536 * which is put after SKB on the list. It is very much like 1537 * tcp_fragment() except that it may make several kinds of assumptions 1538 * in order to speed up the splitting operation. In particular, we 1539 * know that all the data is in scatter-gather pages, and that the 1540 * packet has never been sent out before (and thus is not cloned). 1541 */ 1542static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1543 unsigned int mss_now, gfp_t gfp) 1544{ 1545 struct sk_buff *buff; 1546 int nlen = skb->len - len; 1547 u8 flags; 1548 1549 /* All of a TSO frame must be composed of paged data. */ 1550 if (skb->len != skb->data_len) 1551 return tcp_fragment(sk, skb, len, mss_now); 1552 1553 buff = sk_stream_alloc_skb(sk, 0, gfp); 1554 if (unlikely(buff == NULL)) 1555 return -ENOMEM; 1556 1557 sk->sk_wmem_queued += buff->truesize; 1558 sk_mem_charge(sk, buff->truesize); 1559 buff->truesize += nlen; 1560 skb->truesize -= nlen; 1561 1562 /* Correct the sequence numbers. */ 1563 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1564 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1565 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1566 1567 /* PSH and FIN should only be set in the second packet. */ 1568 flags = TCP_SKB_CB(skb)->tcp_flags; 1569 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1570 TCP_SKB_CB(buff)->tcp_flags = flags; 1571 1572 /* This packet was never sent out yet, so no SACK bits. */ 1573 TCP_SKB_CB(buff)->sacked = 0; 1574 1575 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1576 skb_split(skb, buff, len); 1577 1578 /* Fix up tso_factor for both original and new SKB. */ 1579 tcp_set_skb_tso_segs(sk, skb, mss_now); 1580 tcp_set_skb_tso_segs(sk, buff, mss_now); 1581 1582 /* Link BUFF into the send queue. */ 1583 skb_header_release(buff); 1584 tcp_insert_write_queue_after(skb, buff, sk); 1585 1586 return 0; 1587} 1588 1589/* Try to defer sending, if possible, in order to minimize the amount 1590 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1591 * 1592 * This algorithm is from John Heffner. 1593 */ 1594static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1595{ 1596 struct tcp_sock *tp = tcp_sk(sk); 1597 const struct inet_connection_sock *icsk = inet_csk(sk); 1598 u32 send_win, cong_win, limit, in_flight; 1599 int win_divisor; 1600 1601 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1602 goto send_now; 1603 1604 if (icsk->icsk_ca_state != TCP_CA_Open) 1605 goto send_now; 1606 1607 /* Defer for less than two clock ticks. */ 1608 if (tp->tso_deferred && 1609 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1610 goto send_now; 1611 1612 in_flight = tcp_packets_in_flight(tp); 1613 1614 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1615 1616 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1617 1618 /* From in_flight test above, we know that cwnd > in_flight. */ 1619 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1620 1621 limit = min(send_win, cong_win); 1622 1623 /* If a full-sized TSO skb can be sent, do it. */ 1624 if (limit >= min_t(unsigned int, sk->sk_gso_max_size, 1625 sk->sk_gso_max_segs * tp->mss_cache)) 1626 goto send_now; 1627 1628 /* Middle in queue won't get any more data, full sendable already? */ 1629 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1630 goto send_now; 1631 1632 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1633 if (win_divisor) { 1634 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1635 1636 /* If at least some fraction of a window is available, 1637 * just use it. 1638 */ 1639 chunk /= win_divisor; 1640 if (limit >= chunk) 1641 goto send_now; 1642 } else { 1643 /* Different approach, try not to defer past a single 1644 * ACK. Receiver should ACK every other full sized 1645 * frame, so if we have space for more than 3 frames 1646 * then send now. 1647 */ 1648 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1649 goto send_now; 1650 } 1651 1652 /* Ok, it looks like it is advisable to defer. 1653 * Do not rearm the timer if already set to not break TCP ACK clocking. 1654 */ 1655 if (!tp->tso_deferred) 1656 tp->tso_deferred = 1 | (jiffies << 1); 1657 1658 return true; 1659 1660send_now: 1661 tp->tso_deferred = 0; 1662 return false; 1663} 1664 1665/* Create a new MTU probe if we are ready. 1666 * MTU probe is regularly attempting to increase the path MTU by 1667 * deliberately sending larger packets. This discovers routing 1668 * changes resulting in larger path MTUs. 1669 * 1670 * Returns 0 if we should wait to probe (no cwnd available), 1671 * 1 if a probe was sent, 1672 * -1 otherwise 1673 */ 1674static int tcp_mtu_probe(struct sock *sk) 1675{ 1676 struct tcp_sock *tp = tcp_sk(sk); 1677 struct inet_connection_sock *icsk = inet_csk(sk); 1678 struct sk_buff *skb, *nskb, *next; 1679 int len; 1680 int probe_size; 1681 int size_needed; 1682 int copy; 1683 int mss_now; 1684 1685 /* Not currently probing/verifying, 1686 * not in recovery, 1687 * have enough cwnd, and 1688 * not SACKing (the variable headers throw things off) */ 1689 if (!icsk->icsk_mtup.enabled || 1690 icsk->icsk_mtup.probe_size || 1691 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1692 tp->snd_cwnd < 11 || 1693 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1694 return -1; 1695 1696 /* Very simple search strategy: just double the MSS. */ 1697 mss_now = tcp_current_mss(sk); 1698 probe_size = 2 * tp->mss_cache; 1699 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1700 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1701 /* TODO: set timer for probe_converge_event */ 1702 return -1; 1703 } 1704 1705 /* Have enough data in the send queue to probe? */ 1706 if (tp->write_seq - tp->snd_nxt < size_needed) 1707 return -1; 1708 1709 if (tp->snd_wnd < size_needed) 1710 return -1; 1711 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1712 return 0; 1713 1714 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1715 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1716 if (!tcp_packets_in_flight(tp)) 1717 return -1; 1718 else 1719 return 0; 1720 } 1721 1722 /* We're allowed to probe. Build it now. */ 1723 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1724 return -1; 1725 sk->sk_wmem_queued += nskb->truesize; 1726 sk_mem_charge(sk, nskb->truesize); 1727 1728 skb = tcp_send_head(sk); 1729 1730 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1731 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1732 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 1733 TCP_SKB_CB(nskb)->sacked = 0; 1734 nskb->csum = 0; 1735 nskb->ip_summed = skb->ip_summed; 1736 1737 tcp_insert_write_queue_before(nskb, skb, sk); 1738 1739 len = 0; 1740 tcp_for_write_queue_from_safe(skb, next, sk) { 1741 copy = min_t(int, skb->len, probe_size - len); 1742 if (nskb->ip_summed) 1743 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1744 else 1745 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1746 skb_put(nskb, copy), 1747 copy, nskb->csum); 1748 1749 if (skb->len <= copy) { 1750 /* We've eaten all the data from this skb. 1751 * Throw it away. */ 1752 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1753 tcp_unlink_write_queue(skb, sk); 1754 sk_wmem_free_skb(sk, skb); 1755 } else { 1756 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1757 ~(TCPHDR_FIN|TCPHDR_PSH); 1758 if (!skb_shinfo(skb)->nr_frags) { 1759 skb_pull(skb, copy); 1760 if (skb->ip_summed != CHECKSUM_PARTIAL) 1761 skb->csum = csum_partial(skb->data, 1762 skb->len, 0); 1763 } else { 1764 __pskb_trim_head(skb, copy); 1765 tcp_set_skb_tso_segs(sk, skb, mss_now); 1766 } 1767 TCP_SKB_CB(skb)->seq += copy; 1768 } 1769 1770 len += copy; 1771 1772 if (len >= probe_size) 1773 break; 1774 } 1775 tcp_init_tso_segs(sk, nskb, nskb->len); 1776 1777 /* We're ready to send. If this fails, the probe will 1778 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1779 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1780 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1781 /* Decrement cwnd here because we are sending 1782 * effectively two packets. */ 1783 tp->snd_cwnd--; 1784 tcp_event_new_data_sent(sk, nskb); 1785 1786 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1787 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1788 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1789 1790 return 1; 1791 } 1792 1793 return -1; 1794} 1795 1796/* This routine writes packets to the network. It advances the 1797 * send_head. This happens as incoming acks open up the remote 1798 * window for us. 1799 * 1800 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1801 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1802 * account rare use of URG, this is not a big flaw. 1803 * 1804 * Send at most one packet when push_one > 0. Temporarily ignore 1805 * cwnd limit to force at most one packet out when push_one == 2. 1806 1807 * Returns true, if no segments are in flight and we have queued segments, 1808 * but cannot send anything now because of SWS or another problem. 1809 */ 1810static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1811 int push_one, gfp_t gfp) 1812{ 1813 struct tcp_sock *tp = tcp_sk(sk); 1814 struct sk_buff *skb; 1815 unsigned int tso_segs, sent_pkts; 1816 int cwnd_quota; 1817 int result; 1818 1819 sent_pkts = 0; 1820 1821 if (!push_one) { 1822 /* Do MTU probing. */ 1823 result = tcp_mtu_probe(sk); 1824 if (!result) { 1825 return false; 1826 } else if (result > 0) { 1827 sent_pkts = 1; 1828 } 1829 } 1830 1831 while ((skb = tcp_send_head(sk))) { 1832 unsigned int limit; 1833 1834 1835 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1836 BUG_ON(!tso_segs); 1837 1838 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 1839 goto repair; /* Skip network transmission */ 1840 1841 cwnd_quota = tcp_cwnd_test(tp, skb); 1842 if (!cwnd_quota) { 1843 if (push_one == 2) 1844 /* Force out a loss probe pkt. */ 1845 cwnd_quota = 1; 1846 else 1847 break; 1848 } 1849 1850 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1851 break; 1852 1853 if (tso_segs == 1) { 1854 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1855 (tcp_skb_is_last(sk, skb) ? 1856 nonagle : TCP_NAGLE_PUSH)))) 1857 break; 1858 } else { 1859 if (!push_one && tcp_tso_should_defer(sk, skb)) 1860 break; 1861 } 1862 1863 /* TSQ : sk_wmem_alloc accounts skb truesize, 1864 * including skb overhead. But thats OK. 1865 */ 1866 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { 1867 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1868 break; 1869 } 1870 limit = mss_now; 1871 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1872 limit = tcp_mss_split_point(sk, skb, mss_now, 1873 min_t(unsigned int, 1874 cwnd_quota, 1875 sk->sk_gso_max_segs)); 1876 1877 if (skb->len > limit && 1878 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1879 break; 1880 1881 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1882 1883 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1884 break; 1885 1886repair: 1887 /* Advance the send_head. This one is sent out. 1888 * This call will increment packets_out. 1889 */ 1890 tcp_event_new_data_sent(sk, skb); 1891 1892 tcp_minshall_update(tp, mss_now, skb); 1893 sent_pkts += tcp_skb_pcount(skb); 1894 1895 if (push_one) 1896 break; 1897 } 1898 1899 if (likely(sent_pkts)) { 1900 if (tcp_in_cwnd_reduction(sk)) 1901 tp->prr_out += sent_pkts; 1902 1903 /* Send one loss probe per tail loss episode. */ 1904 if (push_one != 2) 1905 tcp_schedule_loss_probe(sk); 1906 tcp_cwnd_validate(sk); 1907 return false; 1908 } 1909 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 1910} 1911 1912bool tcp_schedule_loss_probe(struct sock *sk) 1913{ 1914 struct inet_connection_sock *icsk = inet_csk(sk); 1915 struct tcp_sock *tp = tcp_sk(sk); 1916 u32 timeout, tlp_time_stamp, rto_time_stamp; 1917 u32 rtt = tp->srtt >> 3; 1918 1919 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) 1920 return false; 1921 /* No consecutive loss probes. */ 1922 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 1923 tcp_rearm_rto(sk); 1924 return false; 1925 } 1926 /* Don't do any loss probe on a Fast Open connection before 3WHS 1927 * finishes. 1928 */ 1929 if (sk->sk_state == TCP_SYN_RECV) 1930 return false; 1931 1932 /* TLP is only scheduled when next timer event is RTO. */ 1933 if (icsk->icsk_pending != ICSK_TIME_RETRANS) 1934 return false; 1935 1936 /* Schedule a loss probe in 2*RTT for SACK capable connections 1937 * in Open state, that are either limited by cwnd or application. 1938 */ 1939 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || 1940 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 1941 return false; 1942 1943 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 1944 tcp_send_head(sk)) 1945 return false; 1946 1947 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 1948 * for delayed ack when there's one outstanding packet. 1949 */ 1950 timeout = rtt << 1; 1951 if (tp->packets_out == 1) 1952 timeout = max_t(u32, timeout, 1953 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 1954 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 1955 1956 /* If RTO is shorter, just schedule TLP in its place. */ 1957 tlp_time_stamp = tcp_time_stamp + timeout; 1958 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 1959 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 1960 s32 delta = rto_time_stamp - tcp_time_stamp; 1961 if (delta > 0) 1962 timeout = delta; 1963 } 1964 1965 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 1966 TCP_RTO_MAX); 1967 return true; 1968} 1969 1970/* When probe timeout (PTO) fires, send a new segment if one exists, else 1971 * retransmit the last segment. 1972 */ 1973void tcp_send_loss_probe(struct sock *sk) 1974{ 1975 struct tcp_sock *tp = tcp_sk(sk); 1976 struct sk_buff *skb; 1977 int pcount; 1978 int mss = tcp_current_mss(sk); 1979 int err = -1; 1980 1981 if (tcp_send_head(sk) != NULL) { 1982 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 1983 goto rearm_timer; 1984 } 1985 1986 /* At most one outstanding TLP retransmission. */ 1987 if (tp->tlp_high_seq) 1988 goto rearm_timer; 1989 1990 /* Retransmit last segment. */ 1991 skb = tcp_write_queue_tail(sk); 1992 if (WARN_ON(!skb)) 1993 goto rearm_timer; 1994 1995 pcount = tcp_skb_pcount(skb); 1996 if (WARN_ON(!pcount)) 1997 goto rearm_timer; 1998 1999 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2000 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 2001 goto rearm_timer; 2002 skb = tcp_write_queue_tail(sk); 2003 } 2004 2005 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 2006 goto rearm_timer; 2007 2008 /* Probe with zero data doesn't trigger fast recovery. */ 2009 if (skb->len > 0) 2010 err = __tcp_retransmit_skb(sk, skb); 2011 2012 /* Record snd_nxt for loss detection. */ 2013 if (likely(!err)) 2014 tp->tlp_high_seq = tp->snd_nxt; 2015 2016rearm_timer: 2017 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2018 inet_csk(sk)->icsk_rto, 2019 TCP_RTO_MAX); 2020 2021 if (likely(!err)) 2022 NET_INC_STATS_BH(sock_net(sk), 2023 LINUX_MIB_TCPLOSSPROBES); 2024 return; 2025} 2026 2027/* Push out any pending frames which were held back due to 2028 * TCP_CORK or attempt at coalescing tiny packets. 2029 * The socket must be locked by the caller. 2030 */ 2031void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 2032 int nonagle) 2033{ 2034 /* If we are closed, the bytes will have to remain here. 2035 * In time closedown will finish, we empty the write queue and 2036 * all will be happy. 2037 */ 2038 if (unlikely(sk->sk_state == TCP_CLOSE)) 2039 return; 2040 2041 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 2042 sk_gfp_atomic(sk, GFP_ATOMIC))) 2043 tcp_check_probe_timer(sk); 2044} 2045 2046/* Send _single_ skb sitting at the send head. This function requires 2047 * true push pending frames to setup probe timer etc. 2048 */ 2049void tcp_push_one(struct sock *sk, unsigned int mss_now) 2050{ 2051 struct sk_buff *skb = tcp_send_head(sk); 2052 2053 BUG_ON(!skb || skb->len < mss_now); 2054 2055 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2056} 2057 2058/* This function returns the amount that we can raise the 2059 * usable window based on the following constraints 2060 * 2061 * 1. The window can never be shrunk once it is offered (RFC 793) 2062 * 2. We limit memory per socket 2063 * 2064 * RFC 1122: 2065 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 2066 * RECV.NEXT + RCV.WIN fixed until: 2067 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 2068 * 2069 * i.e. don't raise the right edge of the window until you can raise 2070 * it at least MSS bytes. 2071 * 2072 * Unfortunately, the recommended algorithm breaks header prediction, 2073 * since header prediction assumes th->window stays fixed. 2074 * 2075 * Strictly speaking, keeping th->window fixed violates the receiver 2076 * side SWS prevention criteria. The problem is that under this rule 2077 * a stream of single byte packets will cause the right side of the 2078 * window to always advance by a single byte. 2079 * 2080 * Of course, if the sender implements sender side SWS prevention 2081 * then this will not be a problem. 2082 * 2083 * BSD seems to make the following compromise: 2084 * 2085 * If the free space is less than the 1/4 of the maximum 2086 * space available and the free space is less than 1/2 mss, 2087 * then set the window to 0. 2088 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 2089 * Otherwise, just prevent the window from shrinking 2090 * and from being larger than the largest representable value. 2091 * 2092 * This prevents incremental opening of the window in the regime 2093 * where TCP is limited by the speed of the reader side taking 2094 * data out of the TCP receive queue. It does nothing about 2095 * those cases where the window is constrained on the sender side 2096 * because the pipeline is full. 2097 * 2098 * BSD also seems to "accidentally" limit itself to windows that are a 2099 * multiple of MSS, at least until the free space gets quite small. 2100 * This would appear to be a side effect of the mbuf implementation. 2101 * Combining these two algorithms results in the observed behavior 2102 * of having a fixed window size at almost all times. 2103 * 2104 * Below we obtain similar behavior by forcing the offered window to 2105 * a multiple of the mss when it is feasible to do so. 2106 * 2107 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 2108 * Regular options like TIMESTAMP are taken into account. 2109 */ 2110u32 __tcp_select_window(struct sock *sk) 2111{ 2112 struct inet_connection_sock *icsk = inet_csk(sk); 2113 struct tcp_sock *tp = tcp_sk(sk); 2114 /* MSS for the peer's data. Previous versions used mss_clamp 2115 * here. I don't know if the value based on our guesses 2116 * of peer's MSS is better for the performance. It's more correct 2117 * but may be worse for the performance because of rcv_mss 2118 * fluctuations. --SAW 1998/11/1 2119 */ 2120 int mss = icsk->icsk_ack.rcv_mss; 2121 int free_space = tcp_space(sk); 2122 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 2123 int window; 2124 2125 if (mss > full_space) 2126 mss = full_space; 2127 2128 if (free_space < (full_space >> 1)) { 2129 icsk->icsk_ack.quick = 0; 2130 2131 if (sk_under_memory_pressure(sk)) 2132 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2133 4U * tp->advmss); 2134 2135 if (free_space < mss) 2136 return 0; 2137 } 2138 2139 if (free_space > tp->rcv_ssthresh) 2140 free_space = tp->rcv_ssthresh; 2141 2142 /* Don't do rounding if we are using window scaling, since the 2143 * scaled window will not line up with the MSS boundary anyway. 2144 */ 2145 window = tp->rcv_wnd; 2146 if (tp->rx_opt.rcv_wscale) { 2147 window = free_space; 2148 2149 /* Advertise enough space so that it won't get scaled away. 2150 * Import case: prevent zero window announcement if 2151 * 1<<rcv_wscale > mss. 2152 */ 2153 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 2154 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 2155 << tp->rx_opt.rcv_wscale); 2156 } else { 2157 /* Get the largest window that is a nice multiple of mss. 2158 * Window clamp already applied above. 2159 * If our current window offering is within 1 mss of the 2160 * free space we just keep it. This prevents the divide 2161 * and multiply from happening most of the time. 2162 * We also don't do any window rounding when the free space 2163 * is too small. 2164 */ 2165 if (window <= free_space - mss || window > free_space) 2166 window = (free_space / mss) * mss; 2167 else if (mss == full_space && 2168 free_space > window + (full_space >> 1)) 2169 window = free_space; 2170 } 2171 2172 return window; 2173} 2174 2175/* Collapses two adjacent SKB's during retransmission. */ 2176static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2177{ 2178 struct tcp_sock *tp = tcp_sk(sk); 2179 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2180 int skb_size, next_skb_size; 2181 2182 skb_size = skb->len; 2183 next_skb_size = next_skb->len; 2184 2185 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 2186 2187 tcp_highest_sack_combine(sk, next_skb, skb); 2188 2189 tcp_unlink_write_queue(next_skb, sk); 2190 2191 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 2192 next_skb_size); 2193 2194 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 2195 skb->ip_summed = CHECKSUM_PARTIAL; 2196 2197 if (skb->ip_summed != CHECKSUM_PARTIAL) 2198 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 2199 2200 /* Update sequence range on original skb. */ 2201 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2202 2203 /* Merge over control information. This moves PSH/FIN etc. over */ 2204 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 2205 2206 /* All done, get rid of second SKB and account for it so 2207 * packet counting does not break. 2208 */ 2209 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2210 2211 /* changed transmit queue under us so clear hints */ 2212 tcp_clear_retrans_hints_partial(tp); 2213 if (next_skb == tp->retransmit_skb_hint) 2214 tp->retransmit_skb_hint = skb; 2215 2216 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2217 2218 sk_wmem_free_skb(sk, next_skb); 2219} 2220 2221/* Check if coalescing SKBs is legal. */ 2222static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2223{ 2224 if (tcp_skb_pcount(skb) > 1) 2225 return false; 2226 /* TODO: SACK collapsing could be used to remove this condition */ 2227 if (skb_shinfo(skb)->nr_frags != 0) 2228 return false; 2229 if (skb_cloned(skb)) 2230 return false; 2231 if (skb == tcp_send_head(sk)) 2232 return false; 2233 /* Some heurestics for collapsing over SACK'd could be invented */ 2234 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2235 return false; 2236 2237 return true; 2238} 2239 2240/* Collapse packets in the retransmit queue to make to create 2241 * less packets on the wire. This is only done on retransmission. 2242 */ 2243static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2244 int space) 2245{ 2246 struct tcp_sock *tp = tcp_sk(sk); 2247 struct sk_buff *skb = to, *tmp; 2248 bool first = true; 2249 2250 if (!sysctl_tcp_retrans_collapse) 2251 return; 2252 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2253 return; 2254 2255 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2256 if (!tcp_can_collapse(sk, skb)) 2257 break; 2258 2259 space -= skb->len; 2260 2261 if (first) { 2262 first = false; 2263 continue; 2264 } 2265 2266 if (space < 0) 2267 break; 2268 /* Punt if not enough space exists in the first SKB for 2269 * the data in the second 2270 */ 2271 if (skb->len > skb_availroom(to)) 2272 break; 2273 2274 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2275 break; 2276 2277 tcp_collapse_retrans(sk, to); 2278 } 2279} 2280 2281/* This retransmits one SKB. Policy decisions and retransmit queue 2282 * state updates are done by the caller. Returns non-zero if an 2283 * error occurred which prevented the send. 2284 */ 2285int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2286{ 2287 struct tcp_sock *tp = tcp_sk(sk); 2288 struct inet_connection_sock *icsk = inet_csk(sk); 2289 unsigned int cur_mss; 2290 2291 /* Inconslusive MTU probe */ 2292 if (icsk->icsk_mtup.probe_size) { 2293 icsk->icsk_mtup.probe_size = 0; 2294 } 2295 2296 /* Do not sent more than we queued. 1/4 is reserved for possible 2297 * copying overhead: fragmentation, tunneling, mangling etc. 2298 */ 2299 if (atomic_read(&sk->sk_wmem_alloc) > 2300 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2301 return -EAGAIN; 2302 2303 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2304 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2305 BUG(); 2306 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2307 return -ENOMEM; 2308 } 2309 2310 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2311 return -EHOSTUNREACH; /* Routing failure or similar. */ 2312 2313 cur_mss = tcp_current_mss(sk); 2314 2315 /* If receiver has shrunk his window, and skb is out of 2316 * new window, do not retransmit it. The exception is the 2317 * case, when window is shrunk to zero. In this case 2318 * our retransmit serves as a zero window probe. 2319 */ 2320 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2321 TCP_SKB_CB(skb)->seq != tp->snd_una) 2322 return -EAGAIN; 2323 2324 if (skb->len > cur_mss) { 2325 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2326 return -ENOMEM; /* We'll try again later. */ 2327 } else { 2328 int oldpcount = tcp_skb_pcount(skb); 2329 2330 if (unlikely(oldpcount > 1)) { 2331 tcp_init_tso_segs(sk, skb, cur_mss); 2332 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2333 } 2334 } 2335 2336 tcp_retrans_try_collapse(sk, skb, cur_mss); 2337 2338 /* Some Solaris stacks overoptimize and ignore the FIN on a 2339 * retransmit when old data is attached. So strip it off 2340 * since it is cheap to do so and saves bytes on the network. 2341 */ 2342 if (skb->len > 0 && 2343 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 2344 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2345 if (!pskb_trim(skb, 0)) { 2346 /* Reuse, even though it does some unnecessary work */ 2347 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2348 TCP_SKB_CB(skb)->tcp_flags); 2349 skb->ip_summed = CHECKSUM_NONE; 2350 } 2351 } 2352 2353 /* Make a copy, if the first transmission SKB clone we made 2354 * is still in somebody's hands, else make a clone. 2355 */ 2356 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2357 2358 /* make sure skb->data is aligned on arches that require it 2359 * and check if ack-trimming & collapsing extended the headroom 2360 * beyond what csum_start can cover. 2361 */ 2362 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2363 skb_headroom(skb) >= 0xFFFF)) { 2364 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2365 GFP_ATOMIC); 2366 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2367 -ENOBUFS; 2368 } else { 2369 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2370 } 2371} 2372 2373int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2374{ 2375 struct tcp_sock *tp = tcp_sk(sk); 2376 int err = __tcp_retransmit_skb(sk, skb); 2377 2378 if (err == 0) { 2379 /* Update global TCP statistics. */ 2380 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2381 2382 tp->total_retrans++; 2383 2384#if FASTRETRANS_DEBUG > 0 2385 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2386 net_dbg_ratelimited("retrans_out leaked\n"); 2387 } 2388#endif 2389 if (!tp->retrans_out) 2390 tp->lost_retrans_low = tp->snd_nxt; 2391 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2392 tp->retrans_out += tcp_skb_pcount(skb); 2393 2394 /* Save stamp of the first retransmit. */ 2395 if (!tp->retrans_stamp) 2396 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2397 2398 tp->undo_retrans += tcp_skb_pcount(skb); 2399 2400 /* snd_nxt is stored to detect loss of retransmitted segment, 2401 * see tcp_input.c tcp_sacktag_write_queue(). 2402 */ 2403 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2404 } 2405 return err; 2406} 2407 2408/* Check if we forward retransmits are possible in the current 2409 * window/congestion state. 2410 */ 2411static bool tcp_can_forward_retransmit(struct sock *sk) 2412{ 2413 const struct inet_connection_sock *icsk = inet_csk(sk); 2414 const struct tcp_sock *tp = tcp_sk(sk); 2415 2416 /* Forward retransmissions are possible only during Recovery. */ 2417 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2418 return false; 2419 2420 /* No forward retransmissions in Reno are possible. */ 2421 if (tcp_is_reno(tp)) 2422 return false; 2423 2424 /* Yeah, we have to make difficult choice between forward transmission 2425 * and retransmission... Both ways have their merits... 2426 * 2427 * For now we do not retransmit anything, while we have some new 2428 * segments to send. In the other cases, follow rule 3 for 2429 * NextSeg() specified in RFC3517. 2430 */ 2431 2432 if (tcp_may_send_now(sk)) 2433 return false; 2434 2435 return true; 2436} 2437 2438/* This gets called after a retransmit timeout, and the initially 2439 * retransmitted data is acknowledged. It tries to continue 2440 * resending the rest of the retransmit queue, until either 2441 * we've sent it all or the congestion window limit is reached. 2442 * If doing SACK, the first ACK which comes back for a timeout 2443 * based retransmit packet might feed us FACK information again. 2444 * If so, we use it to avoid unnecessarily retransmissions. 2445 */ 2446void tcp_xmit_retransmit_queue(struct sock *sk) 2447{ 2448 const struct inet_connection_sock *icsk = inet_csk(sk); 2449 struct tcp_sock *tp = tcp_sk(sk); 2450 struct sk_buff *skb; 2451 struct sk_buff *hole = NULL; 2452 u32 last_lost; 2453 int mib_idx; 2454 int fwd_rexmitting = 0; 2455 2456 if (!tp->packets_out) 2457 return; 2458 2459 if (!tp->lost_out) 2460 tp->retransmit_high = tp->snd_una; 2461 2462 if (tp->retransmit_skb_hint) { 2463 skb = tp->retransmit_skb_hint; 2464 last_lost = TCP_SKB_CB(skb)->end_seq; 2465 if (after(last_lost, tp->retransmit_high)) 2466 last_lost = tp->retransmit_high; 2467 } else { 2468 skb = tcp_write_queue_head(sk); 2469 last_lost = tp->snd_una; 2470 } 2471 2472 tcp_for_write_queue_from(skb, sk) { 2473 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2474 2475 if (skb == tcp_send_head(sk)) 2476 break; 2477 /* we could do better than to assign each time */ 2478 if (hole == NULL) 2479 tp->retransmit_skb_hint = skb; 2480 2481 /* Assume this retransmit will generate 2482 * only one packet for congestion window 2483 * calculation purposes. This works because 2484 * tcp_retransmit_skb() will chop up the 2485 * packet to be MSS sized and all the 2486 * packet counting works out. 2487 */ 2488 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2489 return; 2490 2491 if (fwd_rexmitting) { 2492begin_fwd: 2493 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2494 break; 2495 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2496 2497 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2498 tp->retransmit_high = last_lost; 2499 if (!tcp_can_forward_retransmit(sk)) 2500 break; 2501 /* Backtrack if necessary to non-L'ed skb */ 2502 if (hole != NULL) { 2503 skb = hole; 2504 hole = NULL; 2505 } 2506 fwd_rexmitting = 1; 2507 goto begin_fwd; 2508 2509 } else if (!(sacked & TCPCB_LOST)) { 2510 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2511 hole = skb; 2512 continue; 2513 2514 } else { 2515 last_lost = TCP_SKB_CB(skb)->end_seq; 2516 if (icsk->icsk_ca_state != TCP_CA_Loss) 2517 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2518 else 2519 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2520 } 2521 2522 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2523 continue; 2524 2525 if (tcp_retransmit_skb(sk, skb)) { 2526 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2527 return; 2528 } 2529 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2530 2531 if (tcp_in_cwnd_reduction(sk)) 2532 tp->prr_out += tcp_skb_pcount(skb); 2533 2534 if (skb == tcp_write_queue_head(sk)) 2535 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2536 inet_csk(sk)->icsk_rto, 2537 TCP_RTO_MAX); 2538 } 2539} 2540 2541/* Send a fin. The caller locks the socket for us. This cannot be 2542 * allowed to fail queueing a FIN frame under any circumstances. 2543 */ 2544void tcp_send_fin(struct sock *sk) 2545{ 2546 struct tcp_sock *tp = tcp_sk(sk); 2547 struct sk_buff *skb = tcp_write_queue_tail(sk); 2548 int mss_now; 2549 2550 /* Optimization, tack on the FIN if we have a queue of 2551 * unsent frames. But be careful about outgoing SACKS 2552 * and IP options. 2553 */ 2554 mss_now = tcp_current_mss(sk); 2555 2556 if (tcp_send_head(sk) != NULL) { 2557 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2558 TCP_SKB_CB(skb)->end_seq++; 2559 tp->write_seq++; 2560 } else { 2561 /* Socket is locked, keep trying until memory is available. */ 2562 for (;;) { 2563 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2564 sk->sk_allocation); 2565 if (skb) 2566 break; 2567 yield(); 2568 } 2569 2570 /* Reserve space for headers and prepare control bits. */ 2571 skb_reserve(skb, MAX_TCP_HEADER); 2572 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2573 tcp_init_nondata_skb(skb, tp->write_seq, 2574 TCPHDR_ACK | TCPHDR_FIN); 2575 tcp_queue_skb(sk, skb); 2576 } 2577 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2578} 2579 2580/* We get here when a process closes a file descriptor (either due to 2581 * an explicit close() or as a byproduct of exit()'ing) and there 2582 * was unread data in the receive queue. This behavior is recommended 2583 * by RFC 2525, section 2.17. -DaveM 2584 */ 2585void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2586{ 2587 struct sk_buff *skb; 2588 2589 /* NOTE: No TCP options attached and we never retransmit this. */ 2590 skb = alloc_skb(MAX_TCP_HEADER, priority); 2591 if (!skb) { 2592 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2593 return; 2594 } 2595 2596 /* Reserve space for headers and prepare control bits. */ 2597 skb_reserve(skb, MAX_TCP_HEADER); 2598 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2599 TCPHDR_ACK | TCPHDR_RST); 2600 /* Send it off. */ 2601 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2602 if (tcp_transmit_skb(sk, skb, 0, priority)) 2603 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2604 2605 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2606} 2607 2608/* Send a crossed SYN-ACK during socket establishment. 2609 * WARNING: This routine must only be called when we have already sent 2610 * a SYN packet that crossed the incoming SYN that caused this routine 2611 * to get called. If this assumption fails then the initial rcv_wnd 2612 * and rcv_wscale values will not be correct. 2613 */ 2614int tcp_send_synack(struct sock *sk) 2615{ 2616 struct sk_buff *skb; 2617 2618 skb = tcp_write_queue_head(sk); 2619 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2620 pr_debug("%s: wrong queue state\n", __func__); 2621 return -EFAULT; 2622 } 2623 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2624 if (skb_cloned(skb)) { 2625 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2626 if (nskb == NULL) 2627 return -ENOMEM; 2628 tcp_unlink_write_queue(skb, sk); 2629 skb_header_release(nskb); 2630 __tcp_add_write_queue_head(sk, nskb); 2631 sk_wmem_free_skb(sk, skb); 2632 sk->sk_wmem_queued += nskb->truesize; 2633 sk_mem_charge(sk, nskb->truesize); 2634 skb = nskb; 2635 } 2636 2637 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2638 TCP_ECN_send_synack(tcp_sk(sk), skb); 2639 } 2640 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2641 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2642} 2643 2644/** 2645 * tcp_make_synack - Prepare a SYN-ACK. 2646 * sk: listener socket 2647 * dst: dst entry attached to the SYNACK 2648 * req: request_sock pointer 2649 * 2650 * Allocate one skb and build a SYNACK packet. 2651 * @dst is consumed : Caller should not use it again. 2652 */ 2653struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2654 struct request_sock *req, 2655 struct tcp_fastopen_cookie *foc) 2656{ 2657 struct tcp_out_options opts; 2658 struct inet_request_sock *ireq = inet_rsk(req); 2659 struct tcp_sock *tp = tcp_sk(sk); 2660 struct tcphdr *th; 2661 struct sk_buff *skb; 2662 struct tcp_md5sig_key *md5; 2663 int tcp_header_size; 2664 int mss; 2665 2666 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2667 if (unlikely(!skb)) { 2668 dst_release(dst); 2669 return NULL; 2670 } 2671 /* Reserve space for headers. */ 2672 skb_reserve(skb, MAX_TCP_HEADER); 2673 2674 skb_dst_set(skb, dst); 2675 security_skb_owned_by(skb, sk); 2676 2677 mss = dst_metric_advmss(dst); 2678 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2679 mss = tp->rx_opt.user_mss; 2680 2681 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2682 __u8 rcv_wscale; 2683 /* Set this up on the first call only */ 2684 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2685 2686 /* limit the window selection if the user enforce a smaller rx buffer */ 2687 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2688 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2689 req->window_clamp = tcp_full_space(sk); 2690 2691 /* tcp_full_space because it is guaranteed to be the first packet */ 2692 tcp_select_initial_window(tcp_full_space(sk), 2693 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2694 &req->rcv_wnd, 2695 &req->window_clamp, 2696 ireq->wscale_ok, 2697 &rcv_wscale, 2698 dst_metric(dst, RTAX_INITRWND)); 2699 ireq->rcv_wscale = rcv_wscale; 2700 } 2701 2702 memset(&opts, 0, sizeof(opts)); 2703#ifdef CONFIG_SYN_COOKIES 2704 if (unlikely(req->cookie_ts)) 2705 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2706 else 2707#endif 2708 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2709 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2710 foc) + sizeof(*th); 2711 2712 skb_push(skb, tcp_header_size); 2713 skb_reset_transport_header(skb); 2714 2715 th = tcp_hdr(skb); 2716 memset(th, 0, sizeof(struct tcphdr)); 2717 th->syn = 1; 2718 th->ack = 1; 2719 TCP_ECN_make_synack(req, th); 2720 th->source = ireq->loc_port; 2721 th->dest = ireq->rmt_port; 2722 /* Setting of flags are superfluous here for callers (and ECE is 2723 * not even correctly set) 2724 */ 2725 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2726 TCPHDR_SYN | TCPHDR_ACK); 2727 2728 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2729 /* XXX data is queued and acked as is. No buffer/window check */ 2730 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 2731 2732 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2733 th->window = htons(min(req->rcv_wnd, 65535U)); 2734 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2735 th->doff = (tcp_header_size >> 2); 2736 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2737 2738#ifdef CONFIG_TCP_MD5SIG 2739 /* Okay, we have all we need - do the md5 hash if needed */ 2740 if (md5) { 2741 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 2742 md5, NULL, req, skb); 2743 } 2744#endif 2745 2746 return skb; 2747} 2748EXPORT_SYMBOL(tcp_make_synack); 2749 2750/* Do all connect socket setups that can be done AF independent. */ 2751void tcp_connect_init(struct sock *sk) 2752{ 2753 const struct dst_entry *dst = __sk_dst_get(sk); 2754 struct tcp_sock *tp = tcp_sk(sk); 2755 __u8 rcv_wscale; 2756 2757 /* We'll fix this up when we get a response from the other end. 2758 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2759 */ 2760 tp->tcp_header_len = sizeof(struct tcphdr) + 2761 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2762 2763#ifdef CONFIG_TCP_MD5SIG 2764 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2765 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2766#endif 2767 2768 /* If user gave his TCP_MAXSEG, record it to clamp */ 2769 if (tp->rx_opt.user_mss) 2770 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2771 tp->max_window = 0; 2772 tcp_mtup_init(sk); 2773 tcp_sync_mss(sk, dst_mtu(dst)); 2774 2775 if (!tp->window_clamp) 2776 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2777 tp->advmss = dst_metric_advmss(dst); 2778 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2779 tp->advmss = tp->rx_opt.user_mss; 2780 2781 tcp_initialize_rcv_mss(sk); 2782 2783 /* limit the window selection if the user enforce a smaller rx buffer */ 2784 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2785 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2786 tp->window_clamp = tcp_full_space(sk); 2787 2788 tcp_select_initial_window(tcp_full_space(sk), 2789 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2790 &tp->rcv_wnd, 2791 &tp->window_clamp, 2792 sysctl_tcp_window_scaling, 2793 &rcv_wscale, 2794 dst_metric(dst, RTAX_INITRWND)); 2795 2796 tp->rx_opt.rcv_wscale = rcv_wscale; 2797 tp->rcv_ssthresh = tp->rcv_wnd; 2798 2799 sk->sk_err = 0; 2800 sock_reset_flag(sk, SOCK_DONE); 2801 tp->snd_wnd = 0; 2802 tcp_init_wl(tp, 0); 2803 tp->snd_una = tp->write_seq; 2804 tp->snd_sml = tp->write_seq; 2805 tp->snd_up = tp->write_seq; 2806 tp->snd_nxt = tp->write_seq; 2807 2808 if (likely(!tp->repair)) 2809 tp->rcv_nxt = 0; 2810 tp->rcv_wup = tp->rcv_nxt; 2811 tp->copied_seq = tp->rcv_nxt; 2812 2813 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2814 inet_csk(sk)->icsk_retransmits = 0; 2815 tcp_clear_retrans(tp); 2816} 2817 2818static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 2819{ 2820 struct tcp_sock *tp = tcp_sk(sk); 2821 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2822 2823 tcb->end_seq += skb->len; 2824 skb_header_release(skb); 2825 __tcp_add_write_queue_tail(sk, skb); 2826 sk->sk_wmem_queued += skb->truesize; 2827 sk_mem_charge(sk, skb->truesize); 2828 tp->write_seq = tcb->end_seq; 2829 tp->packets_out += tcp_skb_pcount(skb); 2830} 2831 2832/* Build and send a SYN with data and (cached) Fast Open cookie. However, 2833 * queue a data-only packet after the regular SYN, such that regular SYNs 2834 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 2835 * only the SYN sequence, the data are retransmitted in the first ACK. 2836 * If cookie is not cached or other error occurs, falls back to send a 2837 * regular SYN with Fast Open cookie request option. 2838 */ 2839static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 2840{ 2841 struct tcp_sock *tp = tcp_sk(sk); 2842 struct tcp_fastopen_request *fo = tp->fastopen_req; 2843 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; 2844 struct sk_buff *syn_data = NULL, *data; 2845 unsigned long last_syn_loss = 0; 2846 2847 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 2848 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, 2849 &syn_loss, &last_syn_loss); 2850 /* Recurring FO SYN losses: revert to regular handshake temporarily */ 2851 if (syn_loss > 1 && 2852 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 2853 fo->cookie.len = -1; 2854 goto fallback; 2855 } 2856 2857 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) 2858 fo->cookie.len = -1; 2859 else if (fo->cookie.len <= 0) 2860 goto fallback; 2861 2862 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 2863 * user-MSS. Reserve maximum option space for middleboxes that add 2864 * private TCP options. The cost is reduced data space in SYN :( 2865 */ 2866 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2867 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2868 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2869 MAX_TCP_OPTION_SPACE; 2870 2871 syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2872 sk->sk_allocation); 2873 if (syn_data == NULL) 2874 goto fallback; 2875 2876 for (i = 0; i < iovlen && syn_data->len < space; ++i) { 2877 struct iovec *iov = &fo->data->msg_iov[i]; 2878 unsigned char __user *from = iov->iov_base; 2879 int len = iov->iov_len; 2880 2881 if (syn_data->len + len > space) 2882 len = space - syn_data->len; 2883 else if (i + 1 == iovlen) 2884 /* No more data pending in inet_wait_for_connect() */ 2885 fo->data = NULL; 2886 2887 if (skb_add_data(syn_data, from, len)) 2888 goto fallback; 2889 } 2890 2891 /* Queue a data-only packet after the regular SYN for retransmission */ 2892 data = pskb_copy(syn_data, sk->sk_allocation); 2893 if (data == NULL) 2894 goto fallback; 2895 TCP_SKB_CB(data)->seq++; 2896 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; 2897 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); 2898 tcp_connect_queue_skb(sk, data); 2899 fo->copied = data->len; 2900 2901 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { 2902 tp->syn_data = (fo->copied > 0); 2903 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 2904 goto done; 2905 } 2906 syn_data = NULL; 2907 2908fallback: 2909 /* Send a regular SYN with Fast Open cookie request option */ 2910 if (fo->cookie.len > 0) 2911 fo->cookie.len = 0; 2912 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 2913 if (err) 2914 tp->syn_fastopen = 0; 2915 kfree_skb(syn_data); 2916done: 2917 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 2918 return err; 2919} 2920 2921/* Build a SYN and send it off. */ 2922int tcp_connect(struct sock *sk) 2923{ 2924 struct tcp_sock *tp = tcp_sk(sk); 2925 struct sk_buff *buff; 2926 int err; 2927 2928 tcp_connect_init(sk); 2929 2930 if (unlikely(tp->repair)) { 2931 tcp_finish_connect(sk, NULL); 2932 return 0; 2933 } 2934 2935 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2936 if (unlikely(buff == NULL)) 2937 return -ENOBUFS; 2938 2939 /* Reserve space for headers. */ 2940 skb_reserve(buff, MAX_TCP_HEADER); 2941 2942 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2943 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 2944 tcp_connect_queue_skb(sk, buff); 2945 TCP_ECN_send_syn(sk, buff); 2946 2947 /* Send off SYN; include data in Fast Open. */ 2948 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 2949 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2950 if (err == -ECONNREFUSED) 2951 return err; 2952 2953 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2954 * in order to make this packet get counted in tcpOutSegs. 2955 */ 2956 tp->snd_nxt = tp->write_seq; 2957 tp->pushed_seq = tp->write_seq; 2958 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2959 2960 /* Timer for repeating the SYN until an answer. */ 2961 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2962 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2963 return 0; 2964} 2965EXPORT_SYMBOL(tcp_connect); 2966 2967/* Send out a delayed ack, the caller does the policy checking 2968 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2969 * for details. 2970 */ 2971void tcp_send_delayed_ack(struct sock *sk) 2972{ 2973 struct inet_connection_sock *icsk = inet_csk(sk); 2974 int ato = icsk->icsk_ack.ato; 2975 unsigned long timeout; 2976 2977 if (ato > TCP_DELACK_MIN) { 2978 const struct tcp_sock *tp = tcp_sk(sk); 2979 int max_ato = HZ / 2; 2980 2981 if (icsk->icsk_ack.pingpong || 2982 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2983 max_ato = TCP_DELACK_MAX; 2984 2985 /* Slow path, intersegment interval is "high". */ 2986 2987 /* If some rtt estimate is known, use it to bound delayed ack. 2988 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2989 * directly. 2990 */ 2991 if (tp->srtt) { 2992 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2993 2994 if (rtt < max_ato) 2995 max_ato = rtt; 2996 } 2997 2998 ato = min(ato, max_ato); 2999 } 3000 3001 /* Stay within the limit we were given */ 3002 timeout = jiffies + ato; 3003 3004 /* Use new timeout only if there wasn't a older one earlier. */ 3005 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 3006 /* If delack timer was blocked or is about to expire, 3007 * send ACK now. 3008 */ 3009 if (icsk->icsk_ack.blocked || 3010 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 3011 tcp_send_ack(sk); 3012 return; 3013 } 3014 3015 if (!time_before(timeout, icsk->icsk_ack.timeout)) 3016 timeout = icsk->icsk_ack.timeout; 3017 } 3018 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3019 icsk->icsk_ack.timeout = timeout; 3020 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 3021} 3022 3023/* This routine sends an ack and also updates the window. */ 3024void tcp_send_ack(struct sock *sk) 3025{ 3026 struct sk_buff *buff; 3027 3028 /* If we have been reset, we may not send again. */ 3029 if (sk->sk_state == TCP_CLOSE) 3030 return; 3031 3032 /* We are not putting this on the write queue, so 3033 * tcp_transmit_skb() will set the ownership to this 3034 * sock. 3035 */ 3036 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3037 if (buff == NULL) { 3038 inet_csk_schedule_ack(sk); 3039 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3040 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 3041 TCP_DELACK_MAX, TCP_RTO_MAX); 3042 return; 3043 } 3044 3045 /* Reserve space for headers and prepare control bits. */ 3046 skb_reserve(buff, MAX_TCP_HEADER); 3047 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3048 3049 /* Send it off, this clears delayed acks for us. */ 3050 TCP_SKB_CB(buff)->when = tcp_time_stamp; 3051 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3052} 3053 3054/* This routine sends a packet with an out of date sequence 3055 * number. It assumes the other end will try to ack it. 3056 * 3057 * Question: what should we make while urgent mode? 3058 * 4.4BSD forces sending single byte of data. We cannot send 3059 * out of window data, because we have SND.NXT==SND.MAX... 3060 * 3061 * Current solution: to send TWO zero-length segments in urgent mode: 3062 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 3063 * out-of-date with SND.UNA-1 to probe window. 3064 */ 3065static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 3066{ 3067 struct tcp_sock *tp = tcp_sk(sk); 3068 struct sk_buff *skb; 3069 3070 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 3071 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3072 if (skb == NULL) 3073 return -1; 3074 3075 /* Reserve space for headers and set control bits. */ 3076 skb_reserve(skb, MAX_TCP_HEADER); 3077 /* Use a previous sequence. This should cause the other 3078 * end to send an ack. Don't queue or clone SKB, just 3079 * send it. 3080 */ 3081 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3082 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3083 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3084} 3085 3086void tcp_send_window_probe(struct sock *sk) 3087{ 3088 if (sk->sk_state == TCP_ESTABLISHED) { 3089 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3090 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; 3091 tcp_xmit_probe_skb(sk, 0); 3092 } 3093} 3094 3095/* Initiate keepalive or window probe from timer. */ 3096int tcp_write_wakeup(struct sock *sk) 3097{ 3098 struct tcp_sock *tp = tcp_sk(sk); 3099 struct sk_buff *skb; 3100 3101 if (sk->sk_state == TCP_CLOSE) 3102 return -1; 3103 3104 if ((skb = tcp_send_head(sk)) != NULL && 3105 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 3106 int err; 3107 unsigned int mss = tcp_current_mss(sk); 3108 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3109 3110 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 3111 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 3112 3113 /* We are probing the opening of a window 3114 * but the window size is != 0 3115 * must have been a result SWS avoidance ( sender ) 3116 */ 3117 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 3118 skb->len > mss) { 3119 seg_size = min(seg_size, mss); 3120 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3121 if (tcp_fragment(sk, skb, seg_size, mss)) 3122 return -1; 3123 } else if (!tcp_skb_pcount(skb)) 3124 tcp_set_skb_tso_segs(sk, skb, mss); 3125 3126 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3127 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3128 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3129 if (!err) 3130 tcp_event_new_data_sent(sk, skb); 3131 return err; 3132 } else { 3133 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3134 tcp_xmit_probe_skb(sk, 1); 3135 return tcp_xmit_probe_skb(sk, 0); 3136 } 3137} 3138 3139/* A window probe timeout has occurred. If window is not closed send 3140 * a partial packet else a zero probe. 3141 */ 3142void tcp_send_probe0(struct sock *sk) 3143{ 3144 struct inet_connection_sock *icsk = inet_csk(sk); 3145 struct tcp_sock *tp = tcp_sk(sk); 3146 int err; 3147 3148 err = tcp_write_wakeup(sk); 3149 3150 if (tp->packets_out || !tcp_send_head(sk)) { 3151 /* Cancel probe timer, if it is not required. */ 3152 icsk->icsk_probes_out = 0; 3153 icsk->icsk_backoff = 0; 3154 return; 3155 } 3156 3157 if (err <= 0) { 3158 if (icsk->icsk_backoff < sysctl_tcp_retries2) 3159 icsk->icsk_backoff++; 3160 icsk->icsk_probes_out++; 3161 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3162 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3163 TCP_RTO_MAX); 3164 } else { 3165 /* If packet was not sent due to local congestion, 3166 * do not backoff and do not remember icsk_probes_out. 3167 * Let local senders to fight for local resources. 3168 * 3169 * Use accumulated backoff yet. 3170 */ 3171 if (!icsk->icsk_probes_out) 3172 icsk->icsk_probes_out = 1; 3173 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3174 min(icsk->icsk_rto << icsk->icsk_backoff, 3175 TCP_RESOURCE_PROBE_INTERVAL), 3176 TCP_RTO_MAX); 3177 } 3178} 3179