tcp_minisocks.c revision 3f419d2d487821093ee46e898b5f8747f9edc9cd
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21#include <linux/mm.h> 22#include <linux/module.h> 23#include <linux/slab.h> 24#include <linux/sysctl.h> 25#include <linux/workqueue.h> 26#include <net/tcp.h> 27#include <net/inet_common.h> 28#include <net/xfrm.h> 29 30int sysctl_tcp_syncookies __read_mostly = 1; 31EXPORT_SYMBOL(sysctl_tcp_syncookies); 32 33int sysctl_tcp_abort_on_overflow __read_mostly; 34 35struct inet_timewait_death_row tcp_death_row = { 36 .sysctl_max_tw_buckets = NR_FILE * 2, 37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, 38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), 39 .hashinfo = &tcp_hashinfo, 40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 41 (unsigned long)&tcp_death_row), 42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, 43 inet_twdr_twkill_work), 44/* Short-time timewait calendar */ 45 46 .twcal_hand = -1, 47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, 48 (unsigned long)&tcp_death_row), 49}; 50EXPORT_SYMBOL_GPL(tcp_death_row); 51 52/* VJ's idea. Save last timestamp seen from this destination 53 * and hold it at least for normal timewait interval to use for duplicate 54 * segment detection in subsequent connections, before they enter synchronized 55 * state. 56 */ 57 58static int tcp_remember_stamp(struct sock *sk) 59{ 60 const struct inet_connection_sock *icsk = inet_csk(sk); 61 struct tcp_sock *tp = tcp_sk(sk); 62 struct inet_peer *peer; 63 bool release_it; 64 65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it); 66 if (peer) { 67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || 68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && 69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { 70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; 71 peer->tcp_ts = tp->rx_opt.ts_recent; 72 } 73 if (release_it) 74 inet_putpeer(peer); 75 return 1; 76 } 77 78 return 0; 79} 80 81static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 82{ 83 if (seq == s_win) 84 return 1; 85 if (after(end_seq, s_win) && before(seq, e_win)) 86 return 1; 87 return seq == e_win && seq == end_seq; 88} 89 90/* 91 * * Main purpose of TIME-WAIT state is to close connection gracefully, 92 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 93 * (and, probably, tail of data) and one or more our ACKs are lost. 94 * * What is TIME-WAIT timeout? It is associated with maximal packet 95 * lifetime in the internet, which results in wrong conclusion, that 96 * it is set to catch "old duplicate segments" wandering out of their path. 97 * It is not quite correct. This timeout is calculated so that it exceeds 98 * maximal retransmission timeout enough to allow to lose one (or more) 99 * segments sent by peer and our ACKs. This time may be calculated from RTO. 100 * * When TIME-WAIT socket receives RST, it means that another end 101 * finally closed and we are allowed to kill TIME-WAIT too. 102 * * Second purpose of TIME-WAIT is catching old duplicate segments. 103 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 104 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 105 * * If we invented some more clever way to catch duplicates 106 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 107 * 108 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 109 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 110 * from the very beginning. 111 * 112 * NOTE. With recycling (and later with fin-wait-2) TW bucket 113 * is _not_ stateless. It means, that strictly speaking we must 114 * spinlock it. I do not want! Well, probability of misbehaviour 115 * is ridiculously low and, seems, we could use some mb() tricks 116 * to avoid misread sequence numbers, states etc. --ANK 117 */ 118enum tcp_tw_status 119tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 120 const struct tcphdr *th) 121{ 122 struct tcp_options_received tmp_opt; 123 u8 *hash_location; 124 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 125 int paws_reject = 0; 126 127 tmp_opt.saw_tstamp = 0; 128 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 129 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 130 131 if (tmp_opt.saw_tstamp) { 132 tmp_opt.ts_recent = tcptw->tw_ts_recent; 133 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 134 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 135 } 136 } 137 138 if (tw->tw_substate == TCP_FIN_WAIT2) { 139 /* Just repeat all the checks of tcp_rcv_state_process() */ 140 141 /* Out of window, send ACK */ 142 if (paws_reject || 143 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 144 tcptw->tw_rcv_nxt, 145 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 146 return TCP_TW_ACK; 147 148 if (th->rst) 149 goto kill; 150 151 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 152 goto kill_with_rst; 153 154 /* Dup ACK? */ 155 if (!th->ack || 156 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 157 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 158 inet_twsk_put(tw); 159 return TCP_TW_SUCCESS; 160 } 161 162 /* New data or FIN. If new data arrive after half-duplex close, 163 * reset. 164 */ 165 if (!th->fin || 166 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) { 167kill_with_rst: 168 inet_twsk_deschedule(tw, &tcp_death_row); 169 inet_twsk_put(tw); 170 return TCP_TW_RST; 171 } 172 173 /* FIN arrived, enter true time-wait state. */ 174 tw->tw_substate = TCP_TIME_WAIT; 175 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 176 if (tmp_opt.saw_tstamp) { 177 tcptw->tw_ts_recent_stamp = get_seconds(); 178 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 179 } 180 181 /* I am shamed, but failed to make it more elegant. 182 * Yes, it is direct reference to IP, which is impossible 183 * to generalize to IPv6. Taking into account that IPv6 184 * do not understand recycling in any case, it not 185 * a big problem in practice. --ANK */ 186 if (tw->tw_family == AF_INET && 187 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && 188 tcp_v4_tw_remember_stamp(tw)) 189 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, 190 TCP_TIMEWAIT_LEN); 191 else 192 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, 193 TCP_TIMEWAIT_LEN); 194 return TCP_TW_ACK; 195 } 196 197 /* 198 * Now real TIME-WAIT state. 199 * 200 * RFC 1122: 201 * "When a connection is [...] on TIME-WAIT state [...] 202 * [a TCP] MAY accept a new SYN from the remote TCP to 203 * reopen the connection directly, if it: 204 * 205 * (1) assigns its initial sequence number for the new 206 * connection to be larger than the largest sequence 207 * number it used on the previous connection incarnation, 208 * and 209 * 210 * (2) returns to TIME-WAIT state if the SYN turns out 211 * to be an old duplicate". 212 */ 213 214 if (!paws_reject && 215 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 216 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 217 /* In window segment, it may be only reset or bare ack. */ 218 219 if (th->rst) { 220 /* This is TIME_WAIT assassination, in two flavors. 221 * Oh well... nobody has a sufficient solution to this 222 * protocol bug yet. 223 */ 224 if (sysctl_tcp_rfc1337 == 0) { 225kill: 226 inet_twsk_deschedule(tw, &tcp_death_row); 227 inet_twsk_put(tw); 228 return TCP_TW_SUCCESS; 229 } 230 } 231 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, 232 TCP_TIMEWAIT_LEN); 233 234 if (tmp_opt.saw_tstamp) { 235 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 236 tcptw->tw_ts_recent_stamp = get_seconds(); 237 } 238 239 inet_twsk_put(tw); 240 return TCP_TW_SUCCESS; 241 } 242 243 /* Out of window segment. 244 245 All the segments are ACKed immediately. 246 247 The only exception is new SYN. We accept it, if it is 248 not old duplicate and we are not in danger to be killed 249 by delayed old duplicates. RFC check is that it has 250 newer sequence number works at rates <40Mbit/sec. 251 However, if paws works, it is reliable AND even more, 252 we even may relax silly seq space cutoff. 253 254 RED-PEN: we violate main RFC requirement, if this SYN will appear 255 old duplicate (i.e. we receive RST in reply to SYN-ACK), 256 we must return socket to time-wait state. It is not good, 257 but not fatal yet. 258 */ 259 260 if (th->syn && !th->rst && !th->ack && !paws_reject && 261 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 262 (tmp_opt.saw_tstamp && 263 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 264 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 265 if (isn == 0) 266 isn++; 267 TCP_SKB_CB(skb)->when = isn; 268 return TCP_TW_SYN; 269 } 270 271 if (paws_reject) 272 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 273 274 if (!th->rst) { 275 /* In this case we must reset the TIMEWAIT timer. 276 * 277 * If it is ACKless SYN it may be both old duplicate 278 * and new good SYN with random sequence number <rcv_nxt. 279 * Do not reschedule in the last case. 280 */ 281 if (paws_reject || th->ack) 282 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, 283 TCP_TIMEWAIT_LEN); 284 285 /* Send ACK. Note, we do not put the bucket, 286 * it will be released by caller. 287 */ 288 return TCP_TW_ACK; 289 } 290 inet_twsk_put(tw); 291 return TCP_TW_SUCCESS; 292} 293EXPORT_SYMBOL(tcp_timewait_state_process); 294 295/* 296 * Move a socket to time-wait or dead fin-wait-2 state. 297 */ 298void tcp_time_wait(struct sock *sk, int state, int timeo) 299{ 300 struct inet_timewait_sock *tw = NULL; 301 const struct inet_connection_sock *icsk = inet_csk(sk); 302 const struct tcp_sock *tp = tcp_sk(sk); 303 int recycle_ok = 0; 304 305 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 306 recycle_ok = tcp_remember_stamp(sk); 307 308 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) 309 tw = inet_twsk_alloc(sk, state); 310 311 if (tw != NULL) { 312 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 313 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 314 315 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 316 tcptw->tw_rcv_nxt = tp->rcv_nxt; 317 tcptw->tw_snd_nxt = tp->snd_nxt; 318 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 319 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 320 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 321 322#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 323 if (tw->tw_family == PF_INET6) { 324 struct ipv6_pinfo *np = inet6_sk(sk); 325 struct inet6_timewait_sock *tw6; 326 327 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); 328 tw6 = inet6_twsk((struct sock *)tw); 329 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); 330 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); 331 tw->tw_ipv6only = np->ipv6only; 332 } 333#endif 334 335#ifdef CONFIG_TCP_MD5SIG 336 /* 337 * The timewait bucket does not have the key DB from the 338 * sock structure. We just make a quick copy of the 339 * md5 key being used (if indeed we are using one) 340 * so the timewait ack generating code has the key. 341 */ 342 do { 343 struct tcp_md5sig_key *key; 344 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); 345 tcptw->tw_md5_keylen = 0; 346 key = tp->af_specific->md5_lookup(sk, sk); 347 if (key != NULL) { 348 memcpy(&tcptw->tw_md5_key, key->key, key->keylen); 349 tcptw->tw_md5_keylen = key->keylen; 350 if (tcp_alloc_md5sig_pool(sk) == NULL) 351 BUG(); 352 } 353 } while (0); 354#endif 355 356 /* Linkage updates. */ 357 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 358 359 /* Get the TIME_WAIT timeout firing. */ 360 if (timeo < rto) 361 timeo = rto; 362 363 if (recycle_ok) { 364 tw->tw_timeout = rto; 365 } else { 366 tw->tw_timeout = TCP_TIMEWAIT_LEN; 367 if (state == TCP_TIME_WAIT) 368 timeo = TCP_TIMEWAIT_LEN; 369 } 370 371 inet_twsk_schedule(tw, &tcp_death_row, timeo, 372 TCP_TIMEWAIT_LEN); 373 inet_twsk_put(tw); 374 } else { 375 /* Sorry, if we're out of memory, just CLOSE this 376 * socket up. We've got bigger problems than 377 * non-graceful socket closings. 378 */ 379 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); 380 } 381 382 tcp_update_metrics(sk); 383 tcp_done(sk); 384} 385 386void tcp_twsk_destructor(struct sock *sk) 387{ 388#ifdef CONFIG_TCP_MD5SIG 389 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 390 if (twsk->tw_md5_keylen) 391 tcp_free_md5sig_pool(); 392#endif 393} 394EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 395 396static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 397 struct request_sock *req) 398{ 399 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 400} 401 402/* This is not only more efficient than what we used to do, it eliminates 403 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 404 * 405 * Actually, we could lots of memory writes here. tp of listening 406 * socket contains all necessary default parameters. 407 */ 408struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) 409{ 410 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); 411 412 if (newsk != NULL) { 413 const struct inet_request_sock *ireq = inet_rsk(req); 414 struct tcp_request_sock *treq = tcp_rsk(req); 415 struct inet_connection_sock *newicsk = inet_csk(newsk); 416 struct tcp_sock *newtp = tcp_sk(newsk); 417 struct tcp_sock *oldtp = tcp_sk(sk); 418 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 419 420 /* TCP Cookie Transactions require space for the cookie pair, 421 * as it differs for each connection. There is no need to 422 * copy any s_data_payload stored at the original socket. 423 * Failure will prevent resuming the connection. 424 * 425 * Presumed copied, in order of appearance: 426 * cookie_in_always, cookie_out_never 427 */ 428 if (oldcvp != NULL) { 429 struct tcp_cookie_values *newcvp = 430 kzalloc(sizeof(*newtp->cookie_values), 431 GFP_ATOMIC); 432 433 if (newcvp != NULL) { 434 kref_init(&newcvp->kref); 435 newcvp->cookie_desired = 436 oldcvp->cookie_desired; 437 newtp->cookie_values = newcvp; 438 } else { 439 /* Not Yet Implemented */ 440 newtp->cookie_values = NULL; 441 } 442 } 443 444 /* Now setup tcp_sock */ 445 newtp->pred_flags = 0; 446 447 newtp->rcv_wup = newtp->copied_seq = 448 newtp->rcv_nxt = treq->rcv_isn + 1; 449 450 newtp->snd_sml = newtp->snd_una = 451 newtp->snd_nxt = newtp->snd_up = 452 treq->snt_isn + 1 + tcp_s_data_size(oldtp); 453 454 tcp_prequeue_init(newtp); 455 456 tcp_init_wl(newtp, treq->rcv_isn); 457 458 newtp->srtt = 0; 459 newtp->mdev = TCP_TIMEOUT_INIT; 460 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 461 462 newtp->packets_out = 0; 463 newtp->retrans_out = 0; 464 newtp->sacked_out = 0; 465 newtp->fackets_out = 0; 466 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 467 468 /* So many TCP implementations out there (incorrectly) count the 469 * initial SYN frame in their delayed-ACK and congestion control 470 * algorithms that we must have the following bandaid to talk 471 * efficiently to them. -DaveM 472 */ 473 newtp->snd_cwnd = 2; 474 newtp->snd_cwnd_cnt = 0; 475 newtp->bytes_acked = 0; 476 477 newtp->frto_counter = 0; 478 newtp->frto_highmark = 0; 479 480 newicsk->icsk_ca_ops = &tcp_init_congestion_ops; 481 482 tcp_set_ca_state(newsk, TCP_CA_Open); 483 tcp_init_xmit_timers(newsk); 484 skb_queue_head_init(&newtp->out_of_order_queue); 485 newtp->write_seq = newtp->pushed_seq = 486 treq->snt_isn + 1 + tcp_s_data_size(oldtp); 487 488 newtp->rx_opt.saw_tstamp = 0; 489 490 newtp->rx_opt.dsack = 0; 491 newtp->rx_opt.num_sacks = 0; 492 493 newtp->urg_data = 0; 494 495 if (sock_flag(newsk, SOCK_KEEPOPEN)) 496 inet_csk_reset_keepalive_timer(newsk, 497 keepalive_time_when(newtp)); 498 499 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 500 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 501 if (sysctl_tcp_fack) 502 tcp_enable_fack(newtp); 503 } 504 newtp->window_clamp = req->window_clamp; 505 newtp->rcv_ssthresh = req->rcv_wnd; 506 newtp->rcv_wnd = req->rcv_wnd; 507 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 508 if (newtp->rx_opt.wscale_ok) { 509 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 510 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 511 } else { 512 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 513 newtp->window_clamp = min(newtp->window_clamp, 65535U); 514 } 515 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 516 newtp->rx_opt.snd_wscale); 517 newtp->max_window = newtp->snd_wnd; 518 519 if (newtp->rx_opt.tstamp_ok) { 520 newtp->rx_opt.ts_recent = req->ts_recent; 521 newtp->rx_opt.ts_recent_stamp = get_seconds(); 522 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 523 } else { 524 newtp->rx_opt.ts_recent_stamp = 0; 525 newtp->tcp_header_len = sizeof(struct tcphdr); 526 } 527#ifdef CONFIG_TCP_MD5SIG 528 newtp->md5sig_info = NULL; /*XXX*/ 529 if (newtp->af_specific->md5_lookup(sk, newsk)) 530 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 531#endif 532 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 533 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 534 newtp->rx_opt.mss_clamp = req->mss; 535 TCP_ECN_openreq_child(newtp, req); 536 537 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 538 } 539 return newsk; 540} 541EXPORT_SYMBOL(tcp_create_openreq_child); 542 543/* 544 * Process an incoming packet for SYN_RECV sockets represented 545 * as a request_sock. 546 */ 547 548struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 549 struct request_sock *req, 550 struct request_sock **prev) 551{ 552 struct tcp_options_received tmp_opt; 553 u8 *hash_location; 554 struct sock *child; 555 const struct tcphdr *th = tcp_hdr(skb); 556 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 557 int paws_reject = 0; 558 559 tmp_opt.saw_tstamp = 0; 560 if (th->doff > (sizeof(struct tcphdr)>>2)) { 561 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 562 563 if (tmp_opt.saw_tstamp) { 564 tmp_opt.ts_recent = req->ts_recent; 565 /* We do not store true stamp, but it is not required, 566 * it can be estimated (approximately) 567 * from another data. 568 */ 569 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); 570 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 571 } 572 } 573 574 /* Check for pure retransmitted SYN. */ 575 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 576 flg == TCP_FLAG_SYN && 577 !paws_reject) { 578 /* 579 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 580 * this case on figure 6 and figure 8, but formal 581 * protocol description says NOTHING. 582 * To be more exact, it says that we should send ACK, 583 * because this segment (at least, if it has no data) 584 * is out of window. 585 * 586 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 587 * describe SYN-RECV state. All the description 588 * is wrong, we cannot believe to it and should 589 * rely only on common sense and implementation 590 * experience. 591 * 592 * Enforce "SYN-ACK" according to figure 8, figure 6 593 * of RFC793, fixed by RFC1122. 594 */ 595 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 596 return NULL; 597 } 598 599 /* Further reproduces section "SEGMENT ARRIVES" 600 for state SYN-RECEIVED of RFC793. 601 It is broken, however, it does not work only 602 when SYNs are crossed. 603 604 You would think that SYN crossing is impossible here, since 605 we should have a SYN_SENT socket (from connect()) on our end, 606 but this is not true if the crossed SYNs were sent to both 607 ends by a malicious third party. We must defend against this, 608 and to do that we first verify the ACK (as per RFC793, page 609 36) and reset if it is invalid. Is this a true full defense? 610 To convince ourselves, let us consider a way in which the ACK 611 test can still pass in this 'malicious crossed SYNs' case. 612 Malicious sender sends identical SYNs (and thus identical sequence 613 numbers) to both A and B: 614 615 A: gets SYN, seq=7 616 B: gets SYN, seq=7 617 618 By our good fortune, both A and B select the same initial 619 send sequence number of seven :-) 620 621 A: sends SYN|ACK, seq=7, ack_seq=8 622 B: sends SYN|ACK, seq=7, ack_seq=8 623 624 So we are now A eating this SYN|ACK, ACK test passes. So 625 does sequence test, SYN is truncated, and thus we consider 626 it a bare ACK. 627 628 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 629 bare ACK. Otherwise, we create an established connection. Both 630 ends (listening sockets) accept the new incoming connection and try 631 to talk to each other. 8-) 632 633 Note: This case is both harmless, and rare. Possibility is about the 634 same as us discovering intelligent life on another plant tomorrow. 635 636 But generally, we should (RFC lies!) to accept ACK 637 from SYNACK both here and in tcp_rcv_state_process(). 638 tcp_rcv_state_process() does not, hence, we do not too. 639 640 Note that the case is absolutely generic: 641 we cannot optimize anything here without 642 violating protocol. All the checks must be made 643 before attempt to create socket. 644 */ 645 646 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 647 * and the incoming segment acknowledges something not yet 648 * sent (the segment carries an unacceptable ACK) ... 649 * a reset is sent." 650 * 651 * Invalid ACK: reset will be sent by listening socket 652 */ 653 if ((flg & TCP_FLAG_ACK) && 654 (TCP_SKB_CB(skb)->ack_seq != 655 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) 656 return sk; 657 658 /* Also, it would be not so bad idea to check rcv_tsecr, which 659 * is essentially ACK extension and too early or too late values 660 * should cause reset in unsynchronized states. 661 */ 662 663 /* RFC793: "first check sequence number". */ 664 665 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 666 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { 667 /* Out of window: send ACK and drop. */ 668 if (!(flg & TCP_FLAG_RST)) 669 req->rsk_ops->send_ack(sk, skb, req); 670 if (paws_reject) 671 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 672 return NULL; 673 } 674 675 /* In sequence, PAWS is OK. */ 676 677 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) 678 req->ts_recent = tmp_opt.rcv_tsval; 679 680 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 681 /* Truncate SYN, it is out of window starting 682 at tcp_rsk(req)->rcv_isn + 1. */ 683 flg &= ~TCP_FLAG_SYN; 684 } 685 686 /* RFC793: "second check the RST bit" and 687 * "fourth, check the SYN bit" 688 */ 689 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 690 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 691 goto embryonic_reset; 692 } 693 694 /* ACK sequence verified above, just make sure ACK is 695 * set. If ACK not set, just silently drop the packet. 696 */ 697 if (!(flg & TCP_FLAG_ACK)) 698 return NULL; 699 700 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 701 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 702 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 703 inet_rsk(req)->acked = 1; 704 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 705 return NULL; 706 } 707 708 /* OK, ACK is valid, create big socket and 709 * feed this segment to it. It will repeat all 710 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 711 * ESTABLISHED STATE. If it will be dropped after 712 * socket is created, wait for troubles. 713 */ 714 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 715 if (child == NULL) 716 goto listen_overflow; 717 718 inet_csk_reqsk_queue_unlink(sk, req, prev); 719 inet_csk_reqsk_queue_removed(sk, req); 720 721 inet_csk_reqsk_queue_add(sk, req, child); 722 return child; 723 724listen_overflow: 725 if (!sysctl_tcp_abort_on_overflow) { 726 inet_rsk(req)->acked = 1; 727 return NULL; 728 } 729 730embryonic_reset: 731 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 732 if (!(flg & TCP_FLAG_RST)) 733 req->rsk_ops->send_reset(sk, skb); 734 735 inet_csk_reqsk_queue_drop(sk, req, prev); 736 return NULL; 737} 738EXPORT_SYMBOL(tcp_check_req); 739 740/* 741 * Queue segment on the new socket if the new socket is active, 742 * otherwise we just shortcircuit this and continue with 743 * the new socket. 744 */ 745 746int tcp_child_process(struct sock *parent, struct sock *child, 747 struct sk_buff *skb) 748{ 749 int ret = 0; 750 int state = child->sk_state; 751 752 if (!sock_owned_by_user(child)) { 753 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), 754 skb->len); 755 /* Wakeup parent, send SIGIO */ 756 if (state == TCP_SYN_RECV && child->sk_state != state) 757 parent->sk_data_ready(parent, 0); 758 } else { 759 /* Alas, it is possible again, because we do lookup 760 * in main socket hash table and lock on listening 761 * socket does not protect us more. 762 */ 763 __sk_add_backlog(child, skb); 764 } 765 766 bh_unlock_sock(child); 767 sock_put(child); 768 return ret; 769} 770EXPORT_SYMBOL(tcp_child_process); 771