inet_timewait_sock.c revision 696ab2d3bffc746fb8cf3712f066d42b9886aeed
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Generic TIME_WAIT sockets functions
7 *
8 *		From code orinally in TCP
9 */
10
11#include <linux/config.h>
12
13#include <net/inet_hashtables.h>
14#include <net/inet_timewait_sock.h>
15#include <net/ip.h>
16
17/* Must be called with locally disabled BHs. */
18void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo)
19{
20	struct inet_bind_hashbucket *bhead;
21	struct inet_bind_bucket *tb;
22	/* Unlink from established hashes. */
23	struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent];
24
25	write_lock(&ehead->lock);
26	if (hlist_unhashed(&tw->tw_node)) {
27		write_unlock(&ehead->lock);
28		return;
29	}
30	__hlist_del(&tw->tw_node);
31	sk_node_init(&tw->tw_node);
32	write_unlock(&ehead->lock);
33
34	/* Disassociate with bind bucket. */
35	bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
36	spin_lock(&bhead->lock);
37	tb = tw->tw_tb;
38	__hlist_del(&tw->tw_bind_node);
39	tw->tw_tb = NULL;
40	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
41	spin_unlock(&bhead->lock);
42#ifdef SOCK_REFCNT_DEBUG
43	if (atomic_read(&tw->tw_refcnt) != 1) {
44		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
45		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
46	}
47#endif
48	inet_twsk_put(tw);
49}
50
51EXPORT_SYMBOL_GPL(__inet_twsk_kill);
52
53/*
54 * Enter the time wait state. This is called with locally disabled BH.
55 * Essentially we whip up a timewait bucket, copy the relevant info into it
56 * from the SK, and mess with hash chains and list linkage.
57 */
58void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
59			   struct inet_hashinfo *hashinfo)
60{
61	const struct inet_sock *inet = inet_sk(sk);
62	const struct inet_connection_sock *icsk = inet_csk(sk);
63	struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
64	struct inet_bind_hashbucket *bhead;
65	/* Step 1: Put TW into bind hash. Original socket stays there too.
66	   Note, that any socket with inet->num != 0 MUST be bound in
67	   binding cache, even if it is closed.
68	 */
69	bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
70	spin_lock(&bhead->lock);
71	tw->tw_tb = icsk->icsk_bind_hash;
72	BUG_TRAP(icsk->icsk_bind_hash);
73	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
74	spin_unlock(&bhead->lock);
75
76	write_lock(&ehead->lock);
77
78	/* Step 2: Remove SK from established hash. */
79	if (__sk_del_node_init(sk))
80		sock_prot_dec_use(sk->sk_prot);
81
82	/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
83	inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain);
84	atomic_inc(&tw->tw_refcnt);
85
86	write_unlock(&ehead->lock);
87}
88
89EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
90
91struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
92{
93	struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab,
94							 SLAB_ATOMIC);
95	if (tw != NULL) {
96		const struct inet_sock *inet = inet_sk(sk);
97
98		/* Give us an identity. */
99		tw->tw_daddr	    = inet->daddr;
100		tw->tw_rcv_saddr    = inet->rcv_saddr;
101		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
102		tw->tw_num	    = inet->num;
103		tw->tw_state	    = TCP_TIME_WAIT;
104		tw->tw_substate	    = state;
105		tw->tw_sport	    = inet->sport;
106		tw->tw_dport	    = inet->dport;
107		tw->tw_family	    = sk->sk_family;
108		tw->tw_reuse	    = sk->sk_reuse;
109		tw->tw_hashent	    = sk->sk_hashent;
110		tw->tw_ipv6only	    = 0;
111		tw->tw_prot	    = sk->sk_prot_creator;
112		atomic_set(&tw->tw_refcnt, 1);
113		inet_twsk_dead_node_init(tw);
114	}
115
116	return tw;
117}
118
119EXPORT_SYMBOL_GPL(inet_twsk_alloc);
120
121/* Returns non-zero if quota exceeded.  */
122static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
123				    const int slot)
124{
125	struct inet_timewait_sock *tw;
126	struct hlist_node *node;
127	unsigned int killed;
128	int ret;
129
130	/* NOTE: compare this to previous version where lock
131	 * was released after detaching chain. It was racy,
132	 * because tw buckets are scheduled in not serialized context
133	 * in 2.3 (with netfilter), and with softnet it is common, because
134	 * soft irqs are not sequenced.
135	 */
136	killed = 0;
137	ret = 0;
138rescan:
139	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
140		__inet_twsk_del_dead_node(tw);
141		spin_unlock(&twdr->death_lock);
142		__inet_twsk_kill(tw, twdr->hashinfo);
143		inet_twsk_put(tw);
144		killed++;
145		spin_lock(&twdr->death_lock);
146		if (killed > INET_TWDR_TWKILL_QUOTA) {
147			ret = 1;
148			break;
149		}
150
151		/* While we dropped twdr->death_lock, another cpu may have
152		 * killed off the next TW bucket in the list, therefore
153		 * do a fresh re-read of the hlist head node with the
154		 * lock reacquired.  We still use the hlist traversal
155		 * macro in order to get the prefetches.
156		 */
157		goto rescan;
158	}
159
160	twdr->tw_count -= killed;
161	NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
162
163	return ret;
164}
165
166void inet_twdr_hangman(unsigned long data)
167{
168	struct inet_timewait_death_row *twdr;
169	int unsigned need_timer;
170
171	twdr = (struct inet_timewait_death_row *)data;
172	spin_lock(&twdr->death_lock);
173
174	if (twdr->tw_count == 0)
175		goto out;
176
177	need_timer = 0;
178	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
179		twdr->thread_slots |= (1 << twdr->slot);
180		mb();
181		schedule_work(&twdr->twkill_work);
182		need_timer = 1;
183	} else {
184		/* We purged the entire slot, anything left?  */
185		if (twdr->tw_count)
186			need_timer = 1;
187	}
188	twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
189	if (need_timer)
190		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
191out:
192	spin_unlock(&twdr->death_lock);
193}
194
195EXPORT_SYMBOL_GPL(inet_twdr_hangman);
196
197extern void twkill_slots_invalid(void);
198
199void inet_twdr_twkill_work(void *data)
200{
201	struct inet_timewait_death_row *twdr = data;
202	int i;
203
204	if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
205		twkill_slots_invalid();
206
207	while (twdr->thread_slots) {
208		spin_lock_bh(&twdr->death_lock);
209		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
210			if (!(twdr->thread_slots & (1 << i)))
211				continue;
212
213			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
214				if (need_resched()) {
215					spin_unlock_bh(&twdr->death_lock);
216					schedule();
217					spin_lock_bh(&twdr->death_lock);
218				}
219			}
220
221			twdr->thread_slots &= ~(1 << i);
222		}
223		spin_unlock_bh(&twdr->death_lock);
224	}
225}
226
227EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
228
229/* These are always called from BH context.  See callers in
230 * tcp_input.c to verify this.
231 */
232
233/* This is for handling early-kills of TIME_WAIT sockets. */
234void inet_twsk_deschedule(struct inet_timewait_sock *tw,
235			  struct inet_timewait_death_row *twdr)
236{
237	spin_lock(&twdr->death_lock);
238	if (inet_twsk_del_dead_node(tw)) {
239		inet_twsk_put(tw);
240		if (--twdr->tw_count == 0)
241			del_timer(&twdr->tw_timer);
242	}
243	spin_unlock(&twdr->death_lock);
244	__inet_twsk_kill(tw, twdr->hashinfo);
245}
246
247EXPORT_SYMBOL(inet_twsk_deschedule);
248
249void inet_twsk_schedule(struct inet_timewait_sock *tw,
250		       struct inet_timewait_death_row *twdr,
251		       const int timeo, const int timewait_len)
252{
253	struct hlist_head *list;
254	int slot;
255
256	/* timeout := RTO * 3.5
257	 *
258	 * 3.5 = 1+2+0.5 to wait for two retransmits.
259	 *
260	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
261	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
262	 * FINs (or previous seqments) are lost (probability of such event
263	 * is p^(N+1), where p is probability to lose single packet and
264	 * time to detect the loss is about RTO*(2^N - 1) with exponential
265	 * backoff). Normal timewait length is calculated so, that we
266	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
267	 * [ BTW Linux. following BSD, violates this requirement waiting
268	 *   only for 60sec, we should wait at least for 240 secs.
269	 *   Well, 240 consumes too much of resources 8)
270	 * ]
271	 * This interval is not reduced to catch old duplicate and
272	 * responces to our wandering segments living for two MSLs.
273	 * However, if we use PAWS to detect
274	 * old duplicates, we can reduce the interval to bounds required
275	 * by RTO, rather than MSL. So, if peer understands PAWS, we
276	 * kill tw bucket after 3.5*RTO (it is important that this number
277	 * is greater than TS tick!) and detect old duplicates with help
278	 * of PAWS.
279	 */
280	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
281
282	spin_lock(&twdr->death_lock);
283
284	/* Unlink it, if it was scheduled */
285	if (inet_twsk_del_dead_node(tw))
286		twdr->tw_count--;
287	else
288		atomic_inc(&tw->tw_refcnt);
289
290	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
291		/* Schedule to slow timer */
292		if (timeo >= timewait_len) {
293			slot = INET_TWDR_TWKILL_SLOTS - 1;
294		} else {
295			slot = (timeo + twdr->period - 1) / twdr->period;
296			if (slot >= INET_TWDR_TWKILL_SLOTS)
297				slot = INET_TWDR_TWKILL_SLOTS - 1;
298		}
299		tw->tw_ttd = jiffies + timeo;
300		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
301		list = &twdr->cells[slot];
302	} else {
303		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
304
305		if (twdr->twcal_hand < 0) {
306			twdr->twcal_hand = 0;
307			twdr->twcal_jiffie = jiffies;
308			twdr->twcal_timer.expires = twdr->twcal_jiffie +
309					      (slot << INET_TWDR_RECYCLE_TICK);
310			add_timer(&twdr->twcal_timer);
311		} else {
312			if (time_after(twdr->twcal_timer.expires,
313				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
314				mod_timer(&twdr->twcal_timer,
315					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
316			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
317		}
318		list = &twdr->twcal_row[slot];
319	}
320
321	hlist_add_head(&tw->tw_death_node, list);
322
323	if (twdr->tw_count++ == 0)
324		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
325	spin_unlock(&twdr->death_lock);
326}
327
328EXPORT_SYMBOL_GPL(inet_twsk_schedule);
329
330void inet_twdr_twcal_tick(unsigned long data)
331{
332	struct inet_timewait_death_row *twdr;
333	int n, slot;
334	unsigned long j;
335	unsigned long now = jiffies;
336	int killed = 0;
337	int adv = 0;
338
339	twdr = (struct inet_timewait_death_row *)data;
340
341	spin_lock(&twdr->death_lock);
342	if (twdr->twcal_hand < 0)
343		goto out;
344
345	slot = twdr->twcal_hand;
346	j = twdr->twcal_jiffie;
347
348	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
349		if (time_before_eq(j, now)) {
350			struct hlist_node *node, *safe;
351			struct inet_timewait_sock *tw;
352
353			inet_twsk_for_each_inmate_safe(tw, node, safe,
354						       &twdr->twcal_row[slot]) {
355				__inet_twsk_del_dead_node(tw);
356				__inet_twsk_kill(tw, twdr->hashinfo);
357				inet_twsk_put(tw);
358				killed++;
359			}
360		} else {
361			if (!adv) {
362				adv = 1;
363				twdr->twcal_jiffie = j;
364				twdr->twcal_hand = slot;
365			}
366
367			if (!hlist_empty(&twdr->twcal_row[slot])) {
368				mod_timer(&twdr->twcal_timer, j);
369				goto out;
370			}
371		}
372		j += 1 << INET_TWDR_RECYCLE_TICK;
373		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
374	}
375	twdr->twcal_hand = -1;
376
377out:
378	if ((twdr->tw_count -= killed) == 0)
379		del_timer(&twdr->tw_timer);
380	NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
381	spin_unlock(&twdr->death_lock);
382}
383
384EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
385