inet_timewait_sock.c revision 7f635ab71eef8da012320c0092b662d6af8c1e69
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic TIME_WAIT sockets functions 7 * 8 * From code orinally in TCP 9 */ 10 11#include <linux/kernel.h> 12#include <net/inet_hashtables.h> 13#include <net/inet_timewait_sock.h> 14#include <net/ip.h> 15 16/* Must be called with locally disabled BHs. */ 17static void __inet_twsk_kill(struct inet_timewait_sock *tw, 18 struct inet_hashinfo *hashinfo) 19{ 20 struct inet_bind_hashbucket *bhead; 21 struct inet_bind_bucket *tb; 22 /* Unlink from established hashes. */ 23 rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); 24 25 write_lock(lock); 26 if (hlist_unhashed(&tw->tw_node)) { 27 write_unlock(lock); 28 return; 29 } 30 __hlist_del(&tw->tw_node); 31 sk_node_init(&tw->tw_node); 32 write_unlock(lock); 33 34 /* Disassociate with bind bucket. */ 35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, 36 hashinfo->bhash_size)]; 37 spin_lock(&bhead->lock); 38 tb = tw->tw_tb; 39 __hlist_del(&tw->tw_bind_node); 40 tw->tw_tb = NULL; 41 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 42 spin_unlock(&bhead->lock); 43#ifdef SOCK_REFCNT_DEBUG 44 if (atomic_read(&tw->tw_refcnt) != 1) { 45 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", 46 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); 47 } 48#endif 49 inet_twsk_put(tw); 50} 51 52void inet_twsk_put(struct inet_timewait_sock *tw) 53{ 54 if (atomic_dec_and_test(&tw->tw_refcnt)) { 55 struct module *owner = tw->tw_prot->owner; 56 twsk_destructor((struct sock *)tw); 57#ifdef SOCK_REFCNT_DEBUG 58 printk(KERN_DEBUG "%s timewait_sock %p released\n", 59 tw->tw_prot->name, tw); 60#endif 61 release_net(twsk_net(tw)); 62 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 63 module_put(owner); 64 } 65} 66EXPORT_SYMBOL_GPL(inet_twsk_put); 67 68/* 69 * Enter the time wait state. This is called with locally disabled BH. 70 * Essentially we whip up a timewait bucket, copy the relevant info into it 71 * from the SK, and mess with hash chains and list linkage. 72 */ 73void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 74 struct inet_hashinfo *hashinfo) 75{ 76 const struct inet_sock *inet = inet_sk(sk); 77 const struct inet_connection_sock *icsk = inet_csk(sk); 78 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); 79 rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 80 struct inet_bind_hashbucket *bhead; 81 /* Step 1: Put TW into bind hash. Original socket stays there too. 82 Note, that any socket with inet->num != 0 MUST be bound in 83 binding cache, even if it is closed. 84 */ 85 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, 86 hashinfo->bhash_size)]; 87 spin_lock(&bhead->lock); 88 tw->tw_tb = icsk->icsk_bind_hash; 89 BUG_TRAP(icsk->icsk_bind_hash); 90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 91 spin_unlock(&bhead->lock); 92 93 write_lock(lock); 94 95 /* Step 2: Remove SK from established hash. */ 96 if (__sk_del_node_init(sk)) 97 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 98 99 /* Step 3: Hash TW into TIMEWAIT chain. */ 100 inet_twsk_add_node(tw, &ehead->twchain); 101 atomic_inc(&tw->tw_refcnt); 102 103 write_unlock(lock); 104} 105 106EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 107 108struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) 109{ 110 struct inet_timewait_sock *tw = 111 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, 112 GFP_ATOMIC); 113 if (tw != NULL) { 114 const struct inet_sock *inet = inet_sk(sk); 115 116 /* Give us an identity. */ 117 tw->tw_daddr = inet->daddr; 118 tw->tw_rcv_saddr = inet->rcv_saddr; 119 tw->tw_bound_dev_if = sk->sk_bound_dev_if; 120 tw->tw_num = inet->num; 121 tw->tw_state = TCP_TIME_WAIT; 122 tw->tw_substate = state; 123 tw->tw_sport = inet->sport; 124 tw->tw_dport = inet->dport; 125 tw->tw_family = sk->sk_family; 126 tw->tw_reuse = sk->sk_reuse; 127 tw->tw_hash = sk->sk_hash; 128 tw->tw_ipv6only = 0; 129 tw->tw_prot = sk->sk_prot_creator; 130 twsk_net_set(tw, hold_net(sock_net(sk))); 131 atomic_set(&tw->tw_refcnt, 1); 132 inet_twsk_dead_node_init(tw); 133 __module_get(tw->tw_prot->owner); 134 } 135 136 return tw; 137} 138 139EXPORT_SYMBOL_GPL(inet_twsk_alloc); 140 141/* Returns non-zero if quota exceeded. */ 142static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, 143 const int slot) 144{ 145 struct inet_timewait_sock *tw; 146 struct hlist_node *node; 147 unsigned int killed; 148 int ret; 149 150 /* NOTE: compare this to previous version where lock 151 * was released after detaching chain. It was racy, 152 * because tw buckets are scheduled in not serialized context 153 * in 2.3 (with netfilter), and with softnet it is common, because 154 * soft irqs are not sequenced. 155 */ 156 killed = 0; 157 ret = 0; 158rescan: 159 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { 160 __inet_twsk_del_dead_node(tw); 161 spin_unlock(&twdr->death_lock); 162 __inet_twsk_kill(tw, twdr->hashinfo); 163 inet_twsk_put(tw); 164 killed++; 165 spin_lock(&twdr->death_lock); 166 if (killed > INET_TWDR_TWKILL_QUOTA) { 167 ret = 1; 168 break; 169 } 170 171 /* While we dropped twdr->death_lock, another cpu may have 172 * killed off the next TW bucket in the list, therefore 173 * do a fresh re-read of the hlist head node with the 174 * lock reacquired. We still use the hlist traversal 175 * macro in order to get the prefetches. 176 */ 177 goto rescan; 178 } 179 180 twdr->tw_count -= killed; 181 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed); 182 183 return ret; 184} 185 186void inet_twdr_hangman(unsigned long data) 187{ 188 struct inet_timewait_death_row *twdr; 189 int unsigned need_timer; 190 191 twdr = (struct inet_timewait_death_row *)data; 192 spin_lock(&twdr->death_lock); 193 194 if (twdr->tw_count == 0) 195 goto out; 196 197 need_timer = 0; 198 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { 199 twdr->thread_slots |= (1 << twdr->slot); 200 schedule_work(&twdr->twkill_work); 201 need_timer = 1; 202 } else { 203 /* We purged the entire slot, anything left? */ 204 if (twdr->tw_count) 205 need_timer = 1; 206 } 207 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); 208 if (need_timer) 209 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 210out: 211 spin_unlock(&twdr->death_lock); 212} 213 214EXPORT_SYMBOL_GPL(inet_twdr_hangman); 215 216void inet_twdr_twkill_work(struct work_struct *work) 217{ 218 struct inet_timewait_death_row *twdr = 219 container_of(work, struct inet_timewait_death_row, twkill_work); 220 int i; 221 222 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > 223 (sizeof(twdr->thread_slots) * 8)); 224 225 while (twdr->thread_slots) { 226 spin_lock_bh(&twdr->death_lock); 227 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { 228 if (!(twdr->thread_slots & (1 << i))) 229 continue; 230 231 while (inet_twdr_do_twkill_work(twdr, i) != 0) { 232 if (need_resched()) { 233 spin_unlock_bh(&twdr->death_lock); 234 schedule(); 235 spin_lock_bh(&twdr->death_lock); 236 } 237 } 238 239 twdr->thread_slots &= ~(1 << i); 240 } 241 spin_unlock_bh(&twdr->death_lock); 242 } 243} 244 245EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); 246 247/* These are always called from BH context. See callers in 248 * tcp_input.c to verify this. 249 */ 250 251/* This is for handling early-kills of TIME_WAIT sockets. */ 252void inet_twsk_deschedule(struct inet_timewait_sock *tw, 253 struct inet_timewait_death_row *twdr) 254{ 255 spin_lock(&twdr->death_lock); 256 if (inet_twsk_del_dead_node(tw)) { 257 inet_twsk_put(tw); 258 if (--twdr->tw_count == 0) 259 del_timer(&twdr->tw_timer); 260 } 261 spin_unlock(&twdr->death_lock); 262 __inet_twsk_kill(tw, twdr->hashinfo); 263} 264 265EXPORT_SYMBOL(inet_twsk_deschedule); 266 267void inet_twsk_schedule(struct inet_timewait_sock *tw, 268 struct inet_timewait_death_row *twdr, 269 const int timeo, const int timewait_len) 270{ 271 struct hlist_head *list; 272 int slot; 273 274 /* timeout := RTO * 3.5 275 * 276 * 3.5 = 1+2+0.5 to wait for two retransmits. 277 * 278 * RATIONALE: if FIN arrived and we entered TIME-WAIT state, 279 * our ACK acking that FIN can be lost. If N subsequent retransmitted 280 * FINs (or previous seqments) are lost (probability of such event 281 * is p^(N+1), where p is probability to lose single packet and 282 * time to detect the loss is about RTO*(2^N - 1) with exponential 283 * backoff). Normal timewait length is calculated so, that we 284 * waited at least for one retransmitted FIN (maximal RTO is 120sec). 285 * [ BTW Linux. following BSD, violates this requirement waiting 286 * only for 60sec, we should wait at least for 240 secs. 287 * Well, 240 consumes too much of resources 8) 288 * ] 289 * This interval is not reduced to catch old duplicate and 290 * responces to our wandering segments living for two MSLs. 291 * However, if we use PAWS to detect 292 * old duplicates, we can reduce the interval to bounds required 293 * by RTO, rather than MSL. So, if peer understands PAWS, we 294 * kill tw bucket after 3.5*RTO (it is important that this number 295 * is greater than TS tick!) and detect old duplicates with help 296 * of PAWS. 297 */ 298 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; 299 300 spin_lock(&twdr->death_lock); 301 302 /* Unlink it, if it was scheduled */ 303 if (inet_twsk_del_dead_node(tw)) 304 twdr->tw_count--; 305 else 306 atomic_inc(&tw->tw_refcnt); 307 308 if (slot >= INET_TWDR_RECYCLE_SLOTS) { 309 /* Schedule to slow timer */ 310 if (timeo >= timewait_len) { 311 slot = INET_TWDR_TWKILL_SLOTS - 1; 312 } else { 313 slot = DIV_ROUND_UP(timeo, twdr->period); 314 if (slot >= INET_TWDR_TWKILL_SLOTS) 315 slot = INET_TWDR_TWKILL_SLOTS - 1; 316 } 317 tw->tw_ttd = jiffies + timeo; 318 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); 319 list = &twdr->cells[slot]; 320 } else { 321 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); 322 323 if (twdr->twcal_hand < 0) { 324 twdr->twcal_hand = 0; 325 twdr->twcal_jiffie = jiffies; 326 twdr->twcal_timer.expires = twdr->twcal_jiffie + 327 (slot << INET_TWDR_RECYCLE_TICK); 328 add_timer(&twdr->twcal_timer); 329 } else { 330 if (time_after(twdr->twcal_timer.expires, 331 jiffies + (slot << INET_TWDR_RECYCLE_TICK))) 332 mod_timer(&twdr->twcal_timer, 333 jiffies + (slot << INET_TWDR_RECYCLE_TICK)); 334 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); 335 } 336 list = &twdr->twcal_row[slot]; 337 } 338 339 hlist_add_head(&tw->tw_death_node, list); 340 341 if (twdr->tw_count++ == 0) 342 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 343 spin_unlock(&twdr->death_lock); 344} 345 346EXPORT_SYMBOL_GPL(inet_twsk_schedule); 347 348void inet_twdr_twcal_tick(unsigned long data) 349{ 350 struct inet_timewait_death_row *twdr; 351 int n, slot; 352 unsigned long j; 353 unsigned long now = jiffies; 354 int killed = 0; 355 int adv = 0; 356 357 twdr = (struct inet_timewait_death_row *)data; 358 359 spin_lock(&twdr->death_lock); 360 if (twdr->twcal_hand < 0) 361 goto out; 362 363 slot = twdr->twcal_hand; 364 j = twdr->twcal_jiffie; 365 366 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { 367 if (time_before_eq(j, now)) { 368 struct hlist_node *node, *safe; 369 struct inet_timewait_sock *tw; 370 371 inet_twsk_for_each_inmate_safe(tw, node, safe, 372 &twdr->twcal_row[slot]) { 373 __inet_twsk_del_dead_node(tw); 374 __inet_twsk_kill(tw, twdr->hashinfo); 375 inet_twsk_put(tw); 376 killed++; 377 } 378 } else { 379 if (!adv) { 380 adv = 1; 381 twdr->twcal_jiffie = j; 382 twdr->twcal_hand = slot; 383 } 384 385 if (!hlist_empty(&twdr->twcal_row[slot])) { 386 mod_timer(&twdr->twcal_timer, j); 387 goto out; 388 } 389 } 390 j += 1 << INET_TWDR_RECYCLE_TICK; 391 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); 392 } 393 twdr->twcal_hand = -1; 394 395out: 396 if ((twdr->tw_count -= killed) == 0) 397 del_timer(&twdr->tw_timer); 398 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed); 399 spin_unlock(&twdr->death_lock); 400} 401 402EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 403