ip_fragment.c revision 1e4b82873af0f21002e37a81ef063d2e5410deb3
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		The IP fragmentation functionality.
7 *
8 * Version:	$Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11 *		Alan Cox <Alan.Cox@linux.org>
12 *
13 * Fixes:
14 *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
15 *		David S. Miller :	Begin massive cleanup...
16 *		Andi Kleen	:	Add sysctls.
17 *		xxxx		:	Overlapfrag bug.
18 *		Ultima          :       ip_expire() kernel panic.
19 *		Bill Hawes	:	Frag accounting and evictor fixes.
20 *		John McDonald	:	0 length frag bug.
21 *		Alexey Kuznetsov:	SMP races, threading, cleanup.
22 *		Patrick McHardy :	LRU queue of frag heads for evictor.
23 */
24
25#include <linux/compiler.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/jiffies.h>
30#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/ip.h>
33#include <linux/icmp.h>
34#include <linux/netdevice.h>
35#include <linux/jhash.h>
36#include <linux/random.h>
37#include <net/sock.h>
38#include <net/ip.h>
39#include <net/icmp.h>
40#include <net/checksum.h>
41#include <net/inetpeer.h>
42#include <net/inet_frag.h>
43#include <linux/tcp.h>
44#include <linux/udp.h>
45#include <linux/inet.h>
46#include <linux/netfilter_ipv4.h>
47
48/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
49 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
50 * as well. Or notify me, at least. --ANK
51 */
52
53int sysctl_ipfrag_max_dist __read_mostly = 64;
54
55struct ipfrag_skb_cb
56{
57	struct inet_skb_parm	h;
58	int			offset;
59};
60
61#define FRAG_CB(skb)	((struct ipfrag_skb_cb*)((skb)->cb))
62
63/* Describe an entry in the "incomplete datagrams" queue. */
64struct ipq {
65	struct inet_frag_queue q;
66
67	u32		user;
68	__be32		saddr;
69	__be32		daddr;
70	__be16		id;
71	u8		protocol;
72	int             iif;
73	unsigned int    rid;
74	struct inet_peer *peer;
75};
76
77struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
78	/*
79	 * Fragment cache limits. We will commit 256K at one time. Should we
80	 * cross that limit we will prune down to 192K. This should cope with
81	 * even the most extreme cases without allowing an attacker to
82	 * measurably harm machine performance.
83	 */
84	.high_thresh	 = 256 * 1024,
85	.low_thresh	 = 192 * 1024,
86
87	/*
88	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
89	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
90	 * by TTL.
91	 */
92	.timeout	 = IP_FRAG_TIME,
93	.secret_interval = 10 * 60 * HZ,
94};
95
96static struct inet_frags ip4_frags;
97
98int ip_frag_nqueues(void)
99{
100	return ip4_frags.nqueues;
101}
102
103int ip_frag_mem(void)
104{
105	return atomic_read(&ip4_frags.mem);
106}
107
108static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
109			 struct net_device *dev);
110
111static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
112{
113	return jhash_3words((__force u32)id << 16 | prot,
114			    (__force u32)saddr, (__force u32)daddr,
115			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
116}
117
118static unsigned int ip4_hashfn(struct inet_frag_queue *q)
119{
120	struct ipq *ipq;
121
122	ipq = container_of(q, struct ipq, q);
123	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
124}
125
126/* Memory Tracking Functions. */
127static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
128{
129	if (work)
130		*work -= skb->truesize;
131	atomic_sub(skb->truesize, &ip4_frags.mem);
132	kfree_skb(skb);
133}
134
135static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
136{
137	struct ipq *qp;
138
139	qp = container_of(q, struct ipq, q);
140	if (qp->peer)
141		inet_putpeer(qp->peer);
142	kfree(qp);
143}
144
145static __inline__ struct ipq *frag_alloc_queue(void)
146{
147	struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC);
148
149	if (!qp)
150		return NULL;
151	atomic_add(sizeof(struct ipq), &ip4_frags.mem);
152	return qp;
153}
154
155
156/* Destruction primitives. */
157
158static __inline__ void ipq_put(struct ipq *ipq, int *work)
159{
160	if (atomic_dec_and_test(&ipq->q.refcnt))
161		inet_frag_destroy(&ipq->q, &ip4_frags, work);
162}
163
164/* Kill ipq entry. It is not destroyed immediately,
165 * because caller (and someone more) holds reference count.
166 */
167static void ipq_kill(struct ipq *ipq)
168{
169	inet_frag_kill(&ipq->q, &ip4_frags);
170}
171
172/* Memory limiting on fragments.  Evictor trashes the oldest
173 * fragment queue until we are back under the threshold.
174 */
175static void ip_evictor(void)
176{
177	struct ipq *qp;
178	struct list_head *tmp;
179	int work;
180
181	work = atomic_read(&ip4_frags.mem) - ip4_frags_ctl.low_thresh;
182	if (work <= 0)
183		return;
184
185	while (work > 0) {
186		read_lock(&ip4_frags.lock);
187		if (list_empty(&ip4_frags.lru_list)) {
188			read_unlock(&ip4_frags.lock);
189			return;
190		}
191		tmp = ip4_frags.lru_list.next;
192		qp = list_entry(tmp, struct ipq, q.lru_list);
193		atomic_inc(&qp->q.refcnt);
194		read_unlock(&ip4_frags.lock);
195
196		spin_lock(&qp->q.lock);
197		if (!(qp->q.last_in&COMPLETE))
198			ipq_kill(qp);
199		spin_unlock(&qp->q.lock);
200
201		ipq_put(qp, &work);
202		IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
203	}
204}
205
206/*
207 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
208 */
209static void ip_expire(unsigned long arg)
210{
211	struct ipq *qp = (struct ipq *) arg;
212
213	spin_lock(&qp->q.lock);
214
215	if (qp->q.last_in & COMPLETE)
216		goto out;
217
218	ipq_kill(qp);
219
220	IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
221	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
222
223	if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) {
224		struct sk_buff *head = qp->q.fragments;
225		/* Send an ICMP "Fragment Reassembly Timeout" message. */
226		if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) {
227			icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
228			dev_put(head->dev);
229		}
230	}
231out:
232	spin_unlock(&qp->q.lock);
233	ipq_put(qp, NULL);
234}
235
236/* Creation primitives. */
237
238static struct ipq *ip_frag_intern(struct ipq *qp_in)
239{
240	struct ipq *qp;
241#ifdef CONFIG_SMP
242	struct hlist_node *n;
243#endif
244	unsigned int hash;
245
246	write_lock(&ip4_frags.lock);
247	hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
248			 qp_in->protocol);
249#ifdef CONFIG_SMP
250	/* With SMP race we have to recheck hash table, because
251	 * such entry could be created on other cpu, while we
252	 * promoted read lock to write lock.
253	 */
254	hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
255		if (qp->id == qp_in->id		&&
256		    qp->saddr == qp_in->saddr	&&
257		    qp->daddr == qp_in->daddr	&&
258		    qp->protocol == qp_in->protocol &&
259		    qp->user == qp_in->user) {
260			atomic_inc(&qp->q.refcnt);
261			write_unlock(&ip4_frags.lock);
262			qp_in->q.last_in |= COMPLETE;
263			ipq_put(qp_in, NULL);
264			return qp;
265		}
266	}
267#endif
268	qp = qp_in;
269
270	if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
271		atomic_inc(&qp->q.refcnt);
272
273	atomic_inc(&qp->q.refcnt);
274	hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
275	INIT_LIST_HEAD(&qp->q.lru_list);
276	list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
277	ip4_frags.nqueues++;
278	write_unlock(&ip4_frags.lock);
279	return qp;
280}
281
282/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
283static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
284{
285	struct ipq *qp;
286
287	if ((qp = frag_alloc_queue()) == NULL)
288		goto out_nomem;
289
290	qp->protocol = iph->protocol;
291	qp->q.last_in = 0;
292	qp->id = iph->id;
293	qp->saddr = iph->saddr;
294	qp->daddr = iph->daddr;
295	qp->user = user;
296	qp->q.len = 0;
297	qp->q.meat = 0;
298	qp->q.fragments = NULL;
299	qp->iif = 0;
300	qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL;
301
302	/* Initialize a timer for this entry. */
303	init_timer(&qp->q.timer);
304	qp->q.timer.data = (unsigned long) qp;	/* pointer to queue	*/
305	qp->q.timer.function = ip_expire;		/* expire function	*/
306	spin_lock_init(&qp->q.lock);
307	atomic_set(&qp->q.refcnt, 1);
308
309	return ip_frag_intern(qp);
310
311out_nomem:
312	LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
313	return NULL;
314}
315
316/* Find the correct entry in the "incomplete datagrams" queue for
317 * this IP datagram, and create new one, if nothing is found.
318 */
319static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
320{
321	__be16 id = iph->id;
322	__be32 saddr = iph->saddr;
323	__be32 daddr = iph->daddr;
324	__u8 protocol = iph->protocol;
325	unsigned int hash;
326	struct ipq *qp;
327	struct hlist_node *n;
328
329	read_lock(&ip4_frags.lock);
330	hash = ipqhashfn(id, saddr, daddr, protocol);
331	hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
332		if (qp->id == id		&&
333		    qp->saddr == saddr	&&
334		    qp->daddr == daddr	&&
335		    qp->protocol == protocol &&
336		    qp->user == user) {
337			atomic_inc(&qp->q.refcnt);
338			read_unlock(&ip4_frags.lock);
339			return qp;
340		}
341	}
342	read_unlock(&ip4_frags.lock);
343
344	return ip_frag_create(iph, user);
345}
346
347/* Is the fragment too far ahead to be part of ipq? */
348static inline int ip_frag_too_far(struct ipq *qp)
349{
350	struct inet_peer *peer = qp->peer;
351	unsigned int max = sysctl_ipfrag_max_dist;
352	unsigned int start, end;
353
354	int rc;
355
356	if (!peer || !max)
357		return 0;
358
359	start = qp->rid;
360	end = atomic_inc_return(&peer->rid);
361	qp->rid = end;
362
363	rc = qp->q.fragments && (end - start) > max;
364
365	if (rc) {
366		IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
367	}
368
369	return rc;
370}
371
372static int ip_frag_reinit(struct ipq *qp)
373{
374	struct sk_buff *fp;
375
376	if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
377		atomic_inc(&qp->q.refcnt);
378		return -ETIMEDOUT;
379	}
380
381	fp = qp->q.fragments;
382	do {
383		struct sk_buff *xp = fp->next;
384		frag_kfree_skb(fp, NULL);
385		fp = xp;
386	} while (fp);
387
388	qp->q.last_in = 0;
389	qp->q.len = 0;
390	qp->q.meat = 0;
391	qp->q.fragments = NULL;
392	qp->iif = 0;
393
394	return 0;
395}
396
397/* Add new segment to existing queue. */
398static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
399{
400	struct sk_buff *prev, *next;
401	struct net_device *dev;
402	int flags, offset;
403	int ihl, end;
404	int err = -ENOENT;
405
406	if (qp->q.last_in & COMPLETE)
407		goto err;
408
409	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
410	    unlikely(ip_frag_too_far(qp)) &&
411	    unlikely(err = ip_frag_reinit(qp))) {
412		ipq_kill(qp);
413		goto err;
414	}
415
416	offset = ntohs(ip_hdr(skb)->frag_off);
417	flags = offset & ~IP_OFFSET;
418	offset &= IP_OFFSET;
419	offset <<= 3;		/* offset is in 8-byte chunks */
420	ihl = ip_hdrlen(skb);
421
422	/* Determine the position of this fragment. */
423	end = offset + skb->len - ihl;
424	err = -EINVAL;
425
426	/* Is this the final fragment? */
427	if ((flags & IP_MF) == 0) {
428		/* If we already have some bits beyond end
429		 * or have different end, the segment is corrrupted.
430		 */
431		if (end < qp->q.len ||
432		    ((qp->q.last_in & LAST_IN) && end != qp->q.len))
433			goto err;
434		qp->q.last_in |= LAST_IN;
435		qp->q.len = end;
436	} else {
437		if (end&7) {
438			end &= ~7;
439			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
440				skb->ip_summed = CHECKSUM_NONE;
441		}
442		if (end > qp->q.len) {
443			/* Some bits beyond end -> corruption. */
444			if (qp->q.last_in & LAST_IN)
445				goto err;
446			qp->q.len = end;
447		}
448	}
449	if (end == offset)
450		goto err;
451
452	err = -ENOMEM;
453	if (pskb_pull(skb, ihl) == NULL)
454		goto err;
455
456	err = pskb_trim_rcsum(skb, end - offset);
457	if (err)
458		goto err;
459
460	/* Find out which fragments are in front and at the back of us
461	 * in the chain of fragments so far.  We must know where to put
462	 * this fragment, right?
463	 */
464	prev = NULL;
465	for (next = qp->q.fragments; next != NULL; next = next->next) {
466		if (FRAG_CB(next)->offset >= offset)
467			break;	/* bingo! */
468		prev = next;
469	}
470
471	/* We found where to put this one.  Check for overlap with
472	 * preceding fragment, and, if needed, align things so that
473	 * any overlaps are eliminated.
474	 */
475	if (prev) {
476		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
477
478		if (i > 0) {
479			offset += i;
480			err = -EINVAL;
481			if (end <= offset)
482				goto err;
483			err = -ENOMEM;
484			if (!pskb_pull(skb, i))
485				goto err;
486			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
487				skb->ip_summed = CHECKSUM_NONE;
488		}
489	}
490
491	err = -ENOMEM;
492
493	while (next && FRAG_CB(next)->offset < end) {
494		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
495
496		if (i < next->len) {
497			/* Eat head of the next overlapped fragment
498			 * and leave the loop. The next ones cannot overlap.
499			 */
500			if (!pskb_pull(next, i))
501				goto err;
502			FRAG_CB(next)->offset += i;
503			qp->q.meat -= i;
504			if (next->ip_summed != CHECKSUM_UNNECESSARY)
505				next->ip_summed = CHECKSUM_NONE;
506			break;
507		} else {
508			struct sk_buff *free_it = next;
509
510			/* Old fragment is completely overridden with
511			 * new one drop it.
512			 */
513			next = next->next;
514
515			if (prev)
516				prev->next = next;
517			else
518				qp->q.fragments = next;
519
520			qp->q.meat -= free_it->len;
521			frag_kfree_skb(free_it, NULL);
522		}
523	}
524
525	FRAG_CB(skb)->offset = offset;
526
527	/* Insert this fragment in the chain of fragments. */
528	skb->next = next;
529	if (prev)
530		prev->next = skb;
531	else
532		qp->q.fragments = skb;
533
534	dev = skb->dev;
535	if (dev) {
536		qp->iif = dev->ifindex;
537		skb->dev = NULL;
538	}
539	qp->q.stamp = skb->tstamp;
540	qp->q.meat += skb->len;
541	atomic_add(skb->truesize, &ip4_frags.mem);
542	if (offset == 0)
543		qp->q.last_in |= FIRST_IN;
544
545	if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
546		return ip_frag_reasm(qp, prev, dev);
547
548	write_lock(&ip4_frags.lock);
549	list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
550	write_unlock(&ip4_frags.lock);
551	return -EINPROGRESS;
552
553err:
554	kfree_skb(skb);
555	return err;
556}
557
558
559/* Build a new IP datagram from all its fragments. */
560
561static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
562			 struct net_device *dev)
563{
564	struct iphdr *iph;
565	struct sk_buff *fp, *head = qp->q.fragments;
566	int len;
567	int ihlen;
568	int err;
569
570	ipq_kill(qp);
571
572	/* Make the one we just received the head. */
573	if (prev) {
574		head = prev->next;
575		fp = skb_clone(head, GFP_ATOMIC);
576
577		if (!fp)
578			goto out_nomem;
579
580		fp->next = head->next;
581		prev->next = fp;
582
583		skb_morph(head, qp->q.fragments);
584		head->next = qp->q.fragments->next;
585
586		kfree_skb(qp->q.fragments);
587		qp->q.fragments = head;
588	}
589
590	BUG_TRAP(head != NULL);
591	BUG_TRAP(FRAG_CB(head)->offset == 0);
592
593	/* Allocate a new buffer for the datagram. */
594	ihlen = ip_hdrlen(head);
595	len = ihlen + qp->q.len;
596
597	err = -E2BIG;
598	if (len > 65535)
599		goto out_oversize;
600
601	/* Head of list must not be cloned. */
602	err = -ENOMEM;
603	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
604		goto out_nomem;
605
606	/* If the first fragment is fragmented itself, we split
607	 * it to two chunks: the first with data and paged part
608	 * and the second, holding only fragments. */
609	if (skb_shinfo(head)->frag_list) {
610		struct sk_buff *clone;
611		int i, plen = 0;
612
613		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
614			goto out_nomem;
615		clone->next = head->next;
616		head->next = clone;
617		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
618		skb_shinfo(head)->frag_list = NULL;
619		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
620			plen += skb_shinfo(head)->frags[i].size;
621		clone->len = clone->data_len = head->data_len - plen;
622		head->data_len -= clone->len;
623		head->len -= clone->len;
624		clone->csum = 0;
625		clone->ip_summed = head->ip_summed;
626		atomic_add(clone->truesize, &ip4_frags.mem);
627	}
628
629	skb_shinfo(head)->frag_list = head->next;
630	skb_push(head, head->data - skb_network_header(head));
631	atomic_sub(head->truesize, &ip4_frags.mem);
632
633	for (fp=head->next; fp; fp = fp->next) {
634		head->data_len += fp->len;
635		head->len += fp->len;
636		if (head->ip_summed != fp->ip_summed)
637			head->ip_summed = CHECKSUM_NONE;
638		else if (head->ip_summed == CHECKSUM_COMPLETE)
639			head->csum = csum_add(head->csum, fp->csum);
640		head->truesize += fp->truesize;
641		atomic_sub(fp->truesize, &ip4_frags.mem);
642	}
643
644	head->next = NULL;
645	head->dev = dev;
646	head->tstamp = qp->q.stamp;
647
648	iph = ip_hdr(head);
649	iph->frag_off = 0;
650	iph->tot_len = htons(len);
651	IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
652	qp->q.fragments = NULL;
653	return 0;
654
655out_nomem:
656	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
657			      "queue %p\n", qp);
658	goto out_fail;
659out_oversize:
660	if (net_ratelimit())
661		printk(KERN_INFO
662			"Oversized IP packet from %d.%d.%d.%d.\n",
663			NIPQUAD(qp->saddr));
664out_fail:
665	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
666	return err;
667}
668
669/* Process an incoming IP datagram fragment. */
670int ip_defrag(struct sk_buff *skb, u32 user)
671{
672	struct ipq *qp;
673
674	IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
675
676	/* Start by cleaning up the memory. */
677	if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh)
678		ip_evictor();
679
680	/* Lookup (or create) queue header */
681	if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
682		int ret;
683
684		spin_lock(&qp->q.lock);
685
686		ret = ip_frag_queue(qp, skb);
687
688		spin_unlock(&qp->q.lock);
689		ipq_put(qp, NULL);
690		return ret;
691	}
692
693	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
694	kfree_skb(skb);
695	return -ENOMEM;
696}
697
698void __init ipfrag_init(void)
699{
700	ip4_frags.ctl = &ip4_frags_ctl;
701	ip4_frags.hashfn = ip4_hashfn;
702	ip4_frags.destructor = ip4_frag_free;
703	ip4_frags.skb_free = NULL;
704	ip4_frags.qsize = sizeof(struct ipq);
705	inet_frags_init(&ip4_frags);
706}
707
708EXPORT_SYMBOL(ip_defrag);
709