inet_timewait_sock.c revision f5715aea4564f233767ea1d944b2637a5fd7cd2e
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Generic TIME_WAIT sockets functions
7 *
8 *		From code orinally in TCP
9 */
10
11#include <linux/kernel.h>
12#include <net/inet_hashtables.h>
13#include <net/inet_timewait_sock.h>
14#include <net/ip.h>
15
16/* Must be called with locally disabled BHs. */
17static void __inet_twsk_kill(struct inet_timewait_sock *tw,
18			     struct inet_hashinfo *hashinfo)
19{
20	struct inet_bind_hashbucket *bhead;
21	struct inet_bind_bucket *tb;
22	/* Unlink from established hashes. */
23	rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
24
25	write_lock(lock);
26	if (hlist_unhashed(&tw->tw_node)) {
27		write_unlock(lock);
28		return;
29	}
30	__hlist_del(&tw->tw_node);
31	sk_node_init(&tw->tw_node);
32	write_unlock(lock);
33
34	/* Disassociate with bind bucket. */
35	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
36			hashinfo->bhash_size)];
37	spin_lock(&bhead->lock);
38	tb = tw->tw_tb;
39	__hlist_del(&tw->tw_bind_node);
40	tw->tw_tb = NULL;
41	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
42	spin_unlock(&bhead->lock);
43#ifdef SOCK_REFCNT_DEBUG
44	if (atomic_read(&tw->tw_refcnt) != 1) {
45		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
46		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
47	}
48#endif
49	inet_twsk_put(tw);
50}
51
52void inet_twsk_put(struct inet_timewait_sock *tw)
53{
54	if (atomic_dec_and_test(&tw->tw_refcnt)) {
55		struct module *owner = tw->tw_prot->owner;
56		twsk_destructor((struct sock *)tw);
57#ifdef SOCK_REFCNT_DEBUG
58		printk(KERN_DEBUG "%s timewait_sock %p released\n",
59		       tw->tw_prot->name, tw);
60#endif
61		release_net(twsk_net(tw));
62		kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
63		module_put(owner);
64	}
65}
66EXPORT_SYMBOL_GPL(inet_twsk_put);
67
68/*
69 * Enter the time wait state. This is called with locally disabled BH.
70 * Essentially we whip up a timewait bucket, copy the relevant info into it
71 * from the SK, and mess with hash chains and list linkage.
72 */
73void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
74			   struct inet_hashinfo *hashinfo)
75{
76	const struct inet_sock *inet = inet_sk(sk);
77	const struct inet_connection_sock *icsk = inet_csk(sk);
78	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
79	rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
80	struct inet_bind_hashbucket *bhead;
81	/* Step 1: Put TW into bind hash. Original socket stays there too.
82	   Note, that any socket with inet->num != 0 MUST be bound in
83	   binding cache, even if it is closed.
84	 */
85	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
86			hashinfo->bhash_size)];
87	spin_lock(&bhead->lock);
88	tw->tw_tb = icsk->icsk_bind_hash;
89	WARN_ON(!icsk->icsk_bind_hash);
90	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91	spin_unlock(&bhead->lock);
92
93	write_lock(lock);
94
95	/* Step 2: Remove SK from established hash. */
96	if (__sk_del_node_init(sk))
97		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
98
99	/* Step 3: Hash TW into TIMEWAIT chain. */
100	inet_twsk_add_node(tw, &ehead->twchain);
101	atomic_inc(&tw->tw_refcnt);
102
103	write_unlock(lock);
104}
105
106EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
107
108struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
109{
110	struct inet_timewait_sock *tw =
111		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
112				 GFP_ATOMIC);
113	if (tw != NULL) {
114		const struct inet_sock *inet = inet_sk(sk);
115
116		/* Give us an identity. */
117		tw->tw_daddr	    = inet->daddr;
118		tw->tw_rcv_saddr    = inet->rcv_saddr;
119		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
120		tw->tw_num	    = inet->num;
121		tw->tw_state	    = TCP_TIME_WAIT;
122		tw->tw_substate	    = state;
123		tw->tw_sport	    = inet->sport;
124		tw->tw_dport	    = inet->dport;
125		tw->tw_family	    = sk->sk_family;
126		tw->tw_reuse	    = sk->sk_reuse;
127		tw->tw_hash	    = sk->sk_hash;
128		tw->tw_ipv6only	    = 0;
129		tw->tw_transparent  = inet->transparent;
130		tw->tw_prot	    = sk->sk_prot_creator;
131		twsk_net_set(tw, hold_net(sock_net(sk)));
132		atomic_set(&tw->tw_refcnt, 1);
133		inet_twsk_dead_node_init(tw);
134		__module_get(tw->tw_prot->owner);
135	}
136
137	return tw;
138}
139
140EXPORT_SYMBOL_GPL(inet_twsk_alloc);
141
142/* Returns non-zero if quota exceeded.  */
143static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
144				    const int slot)
145{
146	struct inet_timewait_sock *tw;
147	struct hlist_node *node;
148	unsigned int killed;
149	int ret;
150
151	/* NOTE: compare this to previous version where lock
152	 * was released after detaching chain. It was racy,
153	 * because tw buckets are scheduled in not serialized context
154	 * in 2.3 (with netfilter), and with softnet it is common, because
155	 * soft irqs are not sequenced.
156	 */
157	killed = 0;
158	ret = 0;
159rescan:
160	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
161		__inet_twsk_del_dead_node(tw);
162		spin_unlock(&twdr->death_lock);
163		__inet_twsk_kill(tw, twdr->hashinfo);
164#ifdef CONFIG_NET_NS
165		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
166#endif
167		inet_twsk_put(tw);
168		killed++;
169		spin_lock(&twdr->death_lock);
170		if (killed > INET_TWDR_TWKILL_QUOTA) {
171			ret = 1;
172			break;
173		}
174
175		/* While we dropped twdr->death_lock, another cpu may have
176		 * killed off the next TW bucket in the list, therefore
177		 * do a fresh re-read of the hlist head node with the
178		 * lock reacquired.  We still use the hlist traversal
179		 * macro in order to get the prefetches.
180		 */
181		goto rescan;
182	}
183
184	twdr->tw_count -= killed;
185#ifndef CONFIG_NET_NS
186	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
187#endif
188	return ret;
189}
190
191void inet_twdr_hangman(unsigned long data)
192{
193	struct inet_timewait_death_row *twdr;
194	int unsigned need_timer;
195
196	twdr = (struct inet_timewait_death_row *)data;
197	spin_lock(&twdr->death_lock);
198
199	if (twdr->tw_count == 0)
200		goto out;
201
202	need_timer = 0;
203	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
204		twdr->thread_slots |= (1 << twdr->slot);
205		schedule_work(&twdr->twkill_work);
206		need_timer = 1;
207	} else {
208		/* We purged the entire slot, anything left?  */
209		if (twdr->tw_count)
210			need_timer = 1;
211	}
212	twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
213	if (need_timer)
214		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
215out:
216	spin_unlock(&twdr->death_lock);
217}
218
219EXPORT_SYMBOL_GPL(inet_twdr_hangman);
220
221void inet_twdr_twkill_work(struct work_struct *work)
222{
223	struct inet_timewait_death_row *twdr =
224		container_of(work, struct inet_timewait_death_row, twkill_work);
225	int i;
226
227	BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
228			(sizeof(twdr->thread_slots) * 8));
229
230	while (twdr->thread_slots) {
231		spin_lock_bh(&twdr->death_lock);
232		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
233			if (!(twdr->thread_slots & (1 << i)))
234				continue;
235
236			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
237				if (need_resched()) {
238					spin_unlock_bh(&twdr->death_lock);
239					schedule();
240					spin_lock_bh(&twdr->death_lock);
241				}
242			}
243
244			twdr->thread_slots &= ~(1 << i);
245		}
246		spin_unlock_bh(&twdr->death_lock);
247	}
248}
249
250EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
251
252/* These are always called from BH context.  See callers in
253 * tcp_input.c to verify this.
254 */
255
256/* This is for handling early-kills of TIME_WAIT sockets. */
257void inet_twsk_deschedule(struct inet_timewait_sock *tw,
258			  struct inet_timewait_death_row *twdr)
259{
260	spin_lock(&twdr->death_lock);
261	if (inet_twsk_del_dead_node(tw)) {
262		inet_twsk_put(tw);
263		if (--twdr->tw_count == 0)
264			del_timer(&twdr->tw_timer);
265	}
266	spin_unlock(&twdr->death_lock);
267	__inet_twsk_kill(tw, twdr->hashinfo);
268}
269
270EXPORT_SYMBOL(inet_twsk_deschedule);
271
272void inet_twsk_schedule(struct inet_timewait_sock *tw,
273		       struct inet_timewait_death_row *twdr,
274		       const int timeo, const int timewait_len)
275{
276	struct hlist_head *list;
277	int slot;
278
279	/* timeout := RTO * 3.5
280	 *
281	 * 3.5 = 1+2+0.5 to wait for two retransmits.
282	 *
283	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
284	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
285	 * FINs (or previous seqments) are lost (probability of such event
286	 * is p^(N+1), where p is probability to lose single packet and
287	 * time to detect the loss is about RTO*(2^N - 1) with exponential
288	 * backoff). Normal timewait length is calculated so, that we
289	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
290	 * [ BTW Linux. following BSD, violates this requirement waiting
291	 *   only for 60sec, we should wait at least for 240 secs.
292	 *   Well, 240 consumes too much of resources 8)
293	 * ]
294	 * This interval is not reduced to catch old duplicate and
295	 * responces to our wandering segments living for two MSLs.
296	 * However, if we use PAWS to detect
297	 * old duplicates, we can reduce the interval to bounds required
298	 * by RTO, rather than MSL. So, if peer understands PAWS, we
299	 * kill tw bucket after 3.5*RTO (it is important that this number
300	 * is greater than TS tick!) and detect old duplicates with help
301	 * of PAWS.
302	 */
303	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
304
305	spin_lock(&twdr->death_lock);
306
307	/* Unlink it, if it was scheduled */
308	if (inet_twsk_del_dead_node(tw))
309		twdr->tw_count--;
310	else
311		atomic_inc(&tw->tw_refcnt);
312
313	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
314		/* Schedule to slow timer */
315		if (timeo >= timewait_len) {
316			slot = INET_TWDR_TWKILL_SLOTS - 1;
317		} else {
318			slot = DIV_ROUND_UP(timeo, twdr->period);
319			if (slot >= INET_TWDR_TWKILL_SLOTS)
320				slot = INET_TWDR_TWKILL_SLOTS - 1;
321		}
322		tw->tw_ttd = jiffies + timeo;
323		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
324		list = &twdr->cells[slot];
325	} else {
326		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
327
328		if (twdr->twcal_hand < 0) {
329			twdr->twcal_hand = 0;
330			twdr->twcal_jiffie = jiffies;
331			twdr->twcal_timer.expires = twdr->twcal_jiffie +
332					      (slot << INET_TWDR_RECYCLE_TICK);
333			add_timer(&twdr->twcal_timer);
334		} else {
335			if (time_after(twdr->twcal_timer.expires,
336				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
337				mod_timer(&twdr->twcal_timer,
338					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
339			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
340		}
341		list = &twdr->twcal_row[slot];
342	}
343
344	hlist_add_head(&tw->tw_death_node, list);
345
346	if (twdr->tw_count++ == 0)
347		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
348	spin_unlock(&twdr->death_lock);
349}
350
351EXPORT_SYMBOL_GPL(inet_twsk_schedule);
352
353void inet_twdr_twcal_tick(unsigned long data)
354{
355	struct inet_timewait_death_row *twdr;
356	int n, slot;
357	unsigned long j;
358	unsigned long now = jiffies;
359	int killed = 0;
360	int adv = 0;
361
362	twdr = (struct inet_timewait_death_row *)data;
363
364	spin_lock(&twdr->death_lock);
365	if (twdr->twcal_hand < 0)
366		goto out;
367
368	slot = twdr->twcal_hand;
369	j = twdr->twcal_jiffie;
370
371	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
372		if (time_before_eq(j, now)) {
373			struct hlist_node *node, *safe;
374			struct inet_timewait_sock *tw;
375
376			inet_twsk_for_each_inmate_safe(tw, node, safe,
377						       &twdr->twcal_row[slot]) {
378				__inet_twsk_del_dead_node(tw);
379				__inet_twsk_kill(tw, twdr->hashinfo);
380#ifdef CONFIG_NET_NS
381				NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
382#endif
383				inet_twsk_put(tw);
384				killed++;
385			}
386		} else {
387			if (!adv) {
388				adv = 1;
389				twdr->twcal_jiffie = j;
390				twdr->twcal_hand = slot;
391			}
392
393			if (!hlist_empty(&twdr->twcal_row[slot])) {
394				mod_timer(&twdr->twcal_timer, j);
395				goto out;
396			}
397		}
398		j += 1 << INET_TWDR_RECYCLE_TICK;
399		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
400	}
401	twdr->twcal_hand = -1;
402
403out:
404	if ((twdr->tw_count -= killed) == 0)
405		del_timer(&twdr->tw_timer);
406#ifndef CONFIG_NET_NS
407	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
408#endif
409	spin_unlock(&twdr->death_lock);
410}
411
412EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
413
414void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
415		     struct inet_timewait_death_row *twdr, int family)
416{
417	struct inet_timewait_sock *tw;
418	struct sock *sk;
419	struct hlist_node *node;
420	int h;
421
422	local_bh_disable();
423	for (h = 0; h < (hashinfo->ehash_size); h++) {
424		struct inet_ehash_bucket *head =
425			inet_ehash_bucket(hashinfo, h);
426		rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
427restart:
428		write_lock(lock);
429		sk_for_each(sk, node, &head->twchain) {
430
431			tw = inet_twsk(sk);
432			if (!net_eq(twsk_net(tw), net) ||
433			    tw->tw_family != family)
434				continue;
435
436			atomic_inc(&tw->tw_refcnt);
437			write_unlock(lock);
438			inet_twsk_deschedule(tw, twdr);
439			inet_twsk_put(tw);
440
441			goto restart;
442		}
443		write_unlock(lock);
444	}
445	local_bh_enable();
446}
447EXPORT_SYMBOL_GPL(inet_twsk_purge);
448