ip_output.c revision 2e2f7aefa8a8ba4adb6ecee8cbb43fbe9ca4cc89
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		The Internet Protocol (IP) output module.
7 *
8 * Version:	$Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors:	Ross Biro
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Donald Becker, <becker@super.org>
13 *		Alan Cox, <Alan.Cox@linux.org>
14 *		Richard Underwood
15 *		Stefan Becker, <stefanb@yello.ping.de>
16 *		Jorge Cwik, <jorge@laser.satlink.net>
17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *		Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 *	See ip_input.c for original log
21 *
22 *	Fixes:
23 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
24 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
25 *		Bradford Johnson:	Fix faulty handling of some frames when
26 *					no route is found.
27 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
28 *					(in case if packet not accepted by
29 *					output firewall rules)
30 *		Mike McLagan	:	Routing by source
31 *		Alexey Kuznetsov:	use new route cache
32 *		Andi Kleen:		Fix broken PMTU recovery and remove
33 *					some redundant tests.
34 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
35 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
36 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
37 *					for decreased register pressure on x86
38 *					and more readibility.
39 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
40 *					silently drop skb instead of failing with -EPERM.
41 *		Detlev Wengorz	:	Copy protocol for fragments.
42 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
43 *					datagrams.
44 *		Hirokazu Takahashi:	sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
52#include <linux/sched.h>
53#include <linux/mm.h>
54#include <linux/string.h>
55#include <linux/errno.h>
56#include <linux/config.h>
57
58#include <linux/socket.h>
59#include <linux/sockios.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/proc_fs.h>
65#include <linux/stat.h>
66#include <linux/init.h>
67
68#include <net/snmp.h>
69#include <net/ip.h>
70#include <net/protocol.h>
71#include <net/route.h>
72#include <net/xfrm.h>
73#include <linux/skbuff.h>
74#include <net/sock.h>
75#include <net/arp.h>
76#include <net/icmp.h>
77#include <net/checksum.h>
78#include <net/inetpeer.h>
79#include <net/checksum.h>
80#include <linux/igmp.h>
81#include <linux/netfilter_ipv4.h>
82#include <linux/netfilter_bridge.h>
83#include <linux/mroute.h>
84#include <linux/netlink.h>
85#include <linux/tcp.h>
86
87int sysctl_ip_default_ttl = IPDEFTTL;
88
89/* Generate a checksum for an outgoing IP datagram. */
90__inline__ void ip_send_check(struct iphdr *iph)
91{
92	iph->check = 0;
93	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
94}
95
96/* dev_loopback_xmit for use with netfilter. */
97static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98{
99	newskb->mac.raw = newskb->data;
100	__skb_pull(newskb, newskb->nh.raw - newskb->data);
101	newskb->pkt_type = PACKET_LOOPBACK;
102	newskb->ip_summed = CHECKSUM_UNNECESSARY;
103	BUG_TRAP(newskb->dst);
104	netif_rx(newskb);
105	return 0;
106}
107
108static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109{
110	int ttl = inet->uc_ttl;
111
112	if (ttl < 0)
113		ttl = dst_metric(dst, RTAX_HOPLIMIT);
114	return ttl;
115}
116
117/*
118 *		Add an ip header to a skbuff and send it out.
119 *
120 */
121int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
122			  u32 saddr, u32 daddr, struct ip_options *opt)
123{
124	struct inet_sock *inet = inet_sk(sk);
125	struct rtable *rt = (struct rtable *)skb->dst;
126	struct iphdr *iph;
127
128	/* Build the IP header. */
129	if (opt)
130		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131	else
132		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
133
134	iph->version  = 4;
135	iph->ihl      = 5;
136	iph->tos      = inet->tos;
137	if (ip_dont_fragment(sk, &rt->u.dst))
138		iph->frag_off = htons(IP_DF);
139	else
140		iph->frag_off = 0;
141	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
142	iph->daddr    = rt->rt_dst;
143	iph->saddr    = rt->rt_src;
144	iph->protocol = sk->sk_protocol;
145	iph->tot_len  = htons(skb->len);
146	ip_select_ident(iph, &rt->u.dst, sk);
147	skb->nh.iph   = iph;
148
149	if (opt && opt->optlen) {
150		iph->ihl += opt->optlen>>2;
151		ip_options_build(skb, opt, daddr, rt, 0);
152	}
153	ip_send_check(iph);
154
155	skb->priority = sk->sk_priority;
156
157	/* Send it out. */
158	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
159		       dst_output);
160}
161
162EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163
164static inline int ip_finish_output2(struct sk_buff *skb)
165{
166	struct dst_entry *dst = skb->dst;
167	struct hh_cache *hh = dst->hh;
168	struct net_device *dev = dst->dev;
169	int hh_len = LL_RESERVED_SPACE(dev);
170
171	/* Be paranoid, rather than too clever. */
172	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
173		struct sk_buff *skb2;
174
175		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
176		if (skb2 == NULL) {
177			kfree_skb(skb);
178			return -ENOMEM;
179		}
180		if (skb->sk)
181			skb_set_owner_w(skb2, skb->sk);
182		kfree_skb(skb);
183		skb = skb2;
184	}
185
186	if (hh) {
187		int hh_alen;
188
189		read_lock_bh(&hh->hh_lock);
190		hh_alen = HH_DATA_ALIGN(hh->hh_len);
191  		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
192		read_unlock_bh(&hh->hh_lock);
193	        skb_push(skb, hh->hh_len);
194		return hh->hh_output(skb);
195	} else if (dst->neighbour)
196		return dst->neighbour->output(skb);
197
198	if (net_ratelimit())
199		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
200	kfree_skb(skb);
201	return -EINVAL;
202}
203
204static inline int ip_finish_output(struct sk_buff *skb)
205{
206#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
207	/* Policy lookup after SNAT yielded a new policy */
208	if (skb->dst->xfrm != NULL) {
209		IPCB(skb)->flags |= IPSKB_REROUTED;
210		return dst_output(skb);
211	}
212#endif
213	if (skb->len > dst_mtu(skb->dst) &&
214	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
215		return ip_fragment(skb, ip_finish_output2);
216	else
217		return ip_finish_output2(skb);
218}
219
220int ip_mc_output(struct sk_buff *skb)
221{
222	struct sock *sk = skb->sk;
223	struct rtable *rt = (struct rtable*)skb->dst;
224	struct net_device *dev = rt->u.dst.dev;
225
226	/*
227	 *	If the indicated interface is up and running, send the packet.
228	 */
229	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
230
231	skb->dev = dev;
232	skb->protocol = htons(ETH_P_IP);
233
234	/*
235	 *	Multicasts are looped back for other local users
236	 */
237
238	if (rt->rt_flags&RTCF_MULTICAST) {
239		if ((!sk || inet_sk(sk)->mc_loop)
240#ifdef CONFIG_IP_MROUTE
241		/* Small optimization: do not loopback not local frames,
242		   which returned after forwarding; they will be  dropped
243		   by ip_mr_input in any case.
244		   Note, that local frames are looped back to be delivered
245		   to local recipients.
246
247		   This check is duplicated in ip_mr_input at the moment.
248		 */
249		    && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
250#endif
251		) {
252			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
253			if (newskb)
254				NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
255					newskb->dev,
256					ip_dev_loopback_xmit);
257		}
258
259		/* Multicasts with ttl 0 must not go beyond the host */
260
261		if (skb->nh.iph->ttl == 0) {
262			kfree_skb(skb);
263			return 0;
264		}
265	}
266
267	if (rt->rt_flags&RTCF_BROADCAST) {
268		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
269		if (newskb)
270			NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
271				newskb->dev, ip_dev_loopback_xmit);
272	}
273
274	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275			    ip_finish_output,
276			    !(IPCB(skb)->flags & IPSKB_REROUTED));
277}
278
279int ip_output(struct sk_buff *skb)
280{
281	struct net_device *dev = skb->dst->dev;
282
283	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
284
285	skb->dev = dev;
286	skb->protocol = htons(ETH_P_IP);
287
288	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
289		            ip_finish_output,
290			    !(IPCB(skb)->flags & IPSKB_REROUTED));
291}
292
293int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
294{
295	struct sock *sk = skb->sk;
296	struct inet_sock *inet = inet_sk(sk);
297	struct ip_options *opt = inet->opt;
298	struct rtable *rt;
299	struct iphdr *iph;
300
301	/* Skip all of this if the packet is already routed,
302	 * f.e. by something like SCTP.
303	 */
304	rt = (struct rtable *) skb->dst;
305	if (rt != NULL)
306		goto packet_routed;
307
308	/* Make sure we can route this packet. */
309	rt = (struct rtable *)__sk_dst_check(sk, 0);
310	if (rt == NULL) {
311		u32 daddr;
312
313		/* Use correct destination address if we have options. */
314		daddr = inet->daddr;
315		if(opt && opt->srr)
316			daddr = opt->faddr;
317
318		{
319			struct flowi fl = { .oif = sk->sk_bound_dev_if,
320					    .nl_u = { .ip4_u =
321						      { .daddr = daddr,
322							.saddr = inet->saddr,
323							.tos = RT_CONN_FLAGS(sk) } },
324					    .proto = sk->sk_protocol,
325					    .uli_u = { .ports =
326						       { .sport = inet->sport,
327							 .dport = inet->dport } } };
328
329			/* If this fails, retransmit mechanism of transport layer will
330			 * keep trying until route appears or the connection times
331			 * itself out.
332			 */
333			if (ip_route_output_flow(&rt, &fl, sk, 0))
334				goto no_route;
335		}
336		sk_setup_caps(sk, &rt->u.dst);
337	}
338	skb->dst = dst_clone(&rt->u.dst);
339
340packet_routed:
341	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
342		goto no_route;
343
344	/* OK, we know where to send it, allocate and build IP header. */
345	iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
346	*((__u16 *)iph)	= htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
347	iph->tot_len = htons(skb->len);
348	if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
349		iph->frag_off = htons(IP_DF);
350	else
351		iph->frag_off = 0;
352	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
353	iph->protocol = sk->sk_protocol;
354	iph->saddr    = rt->rt_src;
355	iph->daddr    = rt->rt_dst;
356	skb->nh.iph   = iph;
357	/* Transport layer set skb->h.foo itself. */
358
359	if (opt && opt->optlen) {
360		iph->ihl += opt->optlen >> 2;
361		ip_options_build(skb, opt, inet->daddr, rt, 0);
362	}
363
364	ip_select_ident_more(iph, &rt->u.dst, sk,
365			     (skb_shinfo(skb)->tso_segs ?: 1) - 1);
366
367	/* Add an IP checksum. */
368	ip_send_check(iph);
369
370	skb->priority = sk->sk_priority;
371
372	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
373		       dst_output);
374
375no_route:
376	IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
377	kfree_skb(skb);
378	return -EHOSTUNREACH;
379}
380
381
382static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
383{
384	to->pkt_type = from->pkt_type;
385	to->priority = from->priority;
386	to->protocol = from->protocol;
387	dst_release(to->dst);
388	to->dst = dst_clone(from->dst);
389	to->dev = from->dev;
390
391	/* Copy the flags to each fragment. */
392	IPCB(to)->flags = IPCB(from)->flags;
393
394#ifdef CONFIG_NET_SCHED
395	to->tc_index = from->tc_index;
396#endif
397#ifdef CONFIG_NETFILTER
398	to->nfmark = from->nfmark;
399	/* Connection association is same as pre-frag packet */
400	nf_conntrack_put(to->nfct);
401	to->nfct = from->nfct;
402	nf_conntrack_get(to->nfct);
403	to->nfctinfo = from->nfctinfo;
404#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
405	to->ipvs_property = from->ipvs_property;
406#endif
407#ifdef CONFIG_BRIDGE_NETFILTER
408	nf_bridge_put(to->nf_bridge);
409	to->nf_bridge = from->nf_bridge;
410	nf_bridge_get(to->nf_bridge);
411#endif
412#endif
413}
414
415/*
416 *	This IP datagram is too large to be sent in one piece.  Break it up into
417 *	smaller pieces (each of size equal to IP header plus
418 *	a block of the data of the original IP data part) that will yet fit in a
419 *	single device frame, and queue such a frame for sending.
420 */
421
422int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
423{
424	struct iphdr *iph;
425	int raw = 0;
426	int ptr;
427	struct net_device *dev;
428	struct sk_buff *skb2;
429	unsigned int mtu, hlen, left, len, ll_rs;
430	int offset;
431	__be16 not_last_frag;
432	struct rtable *rt = (struct rtable*)skb->dst;
433	int err = 0;
434
435	dev = rt->u.dst.dev;
436
437	/*
438	 *	Point into the IP datagram header.
439	 */
440
441	iph = skb->nh.iph;
442
443	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
444		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
445			  htonl(dst_mtu(&rt->u.dst)));
446		kfree_skb(skb);
447		return -EMSGSIZE;
448	}
449
450	/*
451	 *	Setup starting values.
452	 */
453
454	hlen = iph->ihl * 4;
455	mtu = dst_mtu(&rt->u.dst) - hlen;	/* Size of data space */
456	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
457
458	/* When frag_list is given, use it. First, check its validity:
459	 * some transformers could create wrong frag_list or break existing
460	 * one, it is not prohibited. In this case fall back to copying.
461	 *
462	 * LATER: this step can be merged to real generation of fragments,
463	 * we can switch to copy when see the first bad fragment.
464	 */
465	if (skb_shinfo(skb)->frag_list) {
466		struct sk_buff *frag;
467		int first_len = skb_pagelen(skb);
468
469		if (first_len - hlen > mtu ||
470		    ((first_len - hlen) & 7) ||
471		    (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
472		    skb_cloned(skb))
473			goto slow_path;
474
475		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
476			/* Correct geometry. */
477			if (frag->len > mtu ||
478			    ((frag->len & 7) && frag->next) ||
479			    skb_headroom(frag) < hlen)
480			    goto slow_path;
481
482			/* Partially cloned skb? */
483			if (skb_shared(frag))
484				goto slow_path;
485
486			BUG_ON(frag->sk);
487			if (skb->sk) {
488				sock_hold(skb->sk);
489				frag->sk = skb->sk;
490				frag->destructor = sock_wfree;
491				skb->truesize -= frag->truesize;
492			}
493		}
494
495		/* Everything is OK. Generate! */
496
497		err = 0;
498		offset = 0;
499		frag = skb_shinfo(skb)->frag_list;
500		skb_shinfo(skb)->frag_list = NULL;
501		skb->data_len = first_len - skb_headlen(skb);
502		skb->len = first_len;
503		iph->tot_len = htons(first_len);
504		iph->frag_off = htons(IP_MF);
505		ip_send_check(iph);
506
507		for (;;) {
508			/* Prepare header of the next frame,
509			 * before previous one went down. */
510			if (frag) {
511				frag->ip_summed = CHECKSUM_NONE;
512				frag->h.raw = frag->data;
513				frag->nh.raw = __skb_push(frag, hlen);
514				memcpy(frag->nh.raw, iph, hlen);
515				iph = frag->nh.iph;
516				iph->tot_len = htons(frag->len);
517				ip_copy_metadata(frag, skb);
518				if (offset == 0)
519					ip_options_fragment(frag);
520				offset += skb->len - hlen;
521				iph->frag_off = htons(offset>>3);
522				if (frag->next != NULL)
523					iph->frag_off |= htons(IP_MF);
524				/* Ready, complete checksum */
525				ip_send_check(iph);
526			}
527
528			err = output(skb);
529
530			if (err || !frag)
531				break;
532
533			skb = frag;
534			frag = skb->next;
535			skb->next = NULL;
536		}
537
538		if (err == 0) {
539			IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
540			return 0;
541		}
542
543		while (frag) {
544			skb = frag->next;
545			kfree_skb(frag);
546			frag = skb;
547		}
548		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
549		return err;
550	}
551
552slow_path:
553	left = skb->len - hlen;		/* Space per frame */
554	ptr = raw + hlen;		/* Where to start from */
555
556#ifdef CONFIG_BRIDGE_NETFILTER
557	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
558	 * we need to make room for the encapsulating header */
559	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
560	mtu -= nf_bridge_pad(skb);
561#else
562	ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
563#endif
564	/*
565	 *	Fragment the datagram.
566	 */
567
568	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
569	not_last_frag = iph->frag_off & htons(IP_MF);
570
571	/*
572	 *	Keep copying data until we run out.
573	 */
574
575	while(left > 0)	{
576		len = left;
577		/* IF: it doesn't fit, use 'mtu' - the data space left */
578		if (len > mtu)
579			len = mtu;
580		/* IF: we are not sending upto and including the packet end
581		   then align the next start on an eight byte boundary */
582		if (len < left)	{
583			len &= ~7;
584		}
585		/*
586		 *	Allocate buffer.
587		 */
588
589		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
590			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
591			err = -ENOMEM;
592			goto fail;
593		}
594
595		/*
596		 *	Set up data on packet
597		 */
598
599		ip_copy_metadata(skb2, skb);
600		skb_reserve(skb2, ll_rs);
601		skb_put(skb2, len + hlen);
602		skb2->nh.raw = skb2->data;
603		skb2->h.raw = skb2->data + hlen;
604
605		/*
606		 *	Charge the memory for the fragment to any owner
607		 *	it might possess
608		 */
609
610		if (skb->sk)
611			skb_set_owner_w(skb2, skb->sk);
612
613		/*
614		 *	Copy the packet header into the new buffer.
615		 */
616
617		memcpy(skb2->nh.raw, skb->data, hlen);
618
619		/*
620		 *	Copy a block of the IP datagram.
621		 */
622		if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
623			BUG();
624		left -= len;
625
626		/*
627		 *	Fill in the new header fields.
628		 */
629		iph = skb2->nh.iph;
630		iph->frag_off = htons((offset >> 3));
631
632		/* ANK: dirty, but effective trick. Upgrade options only if
633		 * the segment to be fragmented was THE FIRST (otherwise,
634		 * options are already fixed) and make it ONCE
635		 * on the initial skb, so that all the following fragments
636		 * will inherit fixed options.
637		 */
638		if (offset == 0)
639			ip_options_fragment(skb);
640
641		/*
642		 *	Added AC : If we are fragmenting a fragment that's not the
643		 *		   last fragment then keep MF on each bit
644		 */
645		if (left > 0 || not_last_frag)
646			iph->frag_off |= htons(IP_MF);
647		ptr += len;
648		offset += len;
649
650		/*
651		 *	Put this fragment into the sending queue.
652		 */
653
654		IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
655
656		iph->tot_len = htons(len + hlen);
657
658		ip_send_check(iph);
659
660		err = output(skb2);
661		if (err)
662			goto fail;
663	}
664	kfree_skb(skb);
665	IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
666	return err;
667
668fail:
669	kfree_skb(skb);
670	IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
671	return err;
672}
673
674EXPORT_SYMBOL(ip_fragment);
675
676int
677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678{
679	struct iovec *iov = from;
680
681	if (skb->ip_summed == CHECKSUM_HW) {
682		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
683			return -EFAULT;
684	} else {
685		unsigned int csum = 0;
686		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
687			return -EFAULT;
688		skb->csum = csum_block_add(skb->csum, csum, odd);
689	}
690	return 0;
691}
692
693static inline unsigned int
694csum_page(struct page *page, int offset, int copy)
695{
696	char *kaddr;
697	unsigned int csum;
698	kaddr = kmap(page);
699	csum = csum_partial(kaddr + offset, copy, 0);
700	kunmap(page);
701	return csum;
702}
703
704static inline int ip_ufo_append_data(struct sock *sk,
705			int getfrag(void *from, char *to, int offset, int len,
706			       int odd, struct sk_buff *skb),
707			void *from, int length, int hh_len, int fragheaderlen,
708			int transhdrlen, int mtu,unsigned int flags)
709{
710	struct sk_buff *skb;
711	int err;
712
713	/* There is support for UDP fragmentation offload by network
714	 * device, so create one single skb packet containing complete
715	 * udp datagram
716	 */
717	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718		skb = sock_alloc_send_skb(sk,
719			hh_len + fragheaderlen + transhdrlen + 20,
720			(flags & MSG_DONTWAIT), &err);
721
722		if (skb == NULL)
723			return err;
724
725		/* reserve space for Hardware header */
726		skb_reserve(skb, hh_len);
727
728		/* create space for UDP/IP header */
729		skb_put(skb,fragheaderlen + transhdrlen);
730
731		/* initialize network header pointer */
732		skb->nh.raw = skb->data;
733
734		/* initialize protocol header pointer */
735		skb->h.raw = skb->data + fragheaderlen;
736
737		skb->ip_summed = CHECKSUM_HW;
738		skb->csum = 0;
739		sk->sk_sndmsg_off = 0;
740	}
741
742	err = skb_append_datato_frags(sk,skb, getfrag, from,
743			       (length - transhdrlen));
744	if (!err) {
745		/* specify the length of each IP datagram fragment*/
746		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
747		__skb_queue_tail(&sk->sk_write_queue, skb);
748
749		return 0;
750	}
751	/* There is not enough support do UFO ,
752	 * so follow normal path
753	 */
754	kfree_skb(skb);
755	return err;
756}
757
758/*
759 *	ip_append_data() and ip_append_page() can make one large IP datagram
760 *	from many pieces of data. Each pieces will be holded on the socket
761 *	until ip_push_pending_frames() is called. Each piece can be a page
762 *	or non-page data.
763 *
764 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
765 *	this interface potentially.
766 *
767 *	LATER: length must be adjusted by pad at tail, when it is required.
768 */
769int ip_append_data(struct sock *sk,
770		   int getfrag(void *from, char *to, int offset, int len,
771			       int odd, struct sk_buff *skb),
772		   void *from, int length, int transhdrlen,
773		   struct ipcm_cookie *ipc, struct rtable *rt,
774		   unsigned int flags)
775{
776	struct inet_sock *inet = inet_sk(sk);
777	struct sk_buff *skb;
778
779	struct ip_options *opt = NULL;
780	int hh_len;
781	int exthdrlen;
782	int mtu;
783	int copy;
784	int err;
785	int offset = 0;
786	unsigned int maxfraglen, fragheaderlen;
787	int csummode = CHECKSUM_NONE;
788
789	if (flags&MSG_PROBE)
790		return 0;
791
792	if (skb_queue_empty(&sk->sk_write_queue)) {
793		/*
794		 * setup for corking.
795		 */
796		opt = ipc->opt;
797		if (opt) {
798			if (inet->cork.opt == NULL) {
799				inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
800				if (unlikely(inet->cork.opt == NULL))
801					return -ENOBUFS;
802			}
803			memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
804			inet->cork.flags |= IPCORK_OPT;
805			inet->cork.addr = ipc->addr;
806		}
807		dst_hold(&rt->u.dst);
808		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
809		inet->cork.rt = rt;
810		inet->cork.length = 0;
811		sk->sk_sndmsg_page = NULL;
812		sk->sk_sndmsg_off = 0;
813		if ((exthdrlen = rt->u.dst.header_len) != 0) {
814			length += exthdrlen;
815			transhdrlen += exthdrlen;
816		}
817	} else {
818		rt = inet->cork.rt;
819		if (inet->cork.flags & IPCORK_OPT)
820			opt = inet->cork.opt;
821
822		transhdrlen = 0;
823		exthdrlen = 0;
824		mtu = inet->cork.fragsize;
825	}
826	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
827
828	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
829	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
830
831	if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
832		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
833		return -EMSGSIZE;
834	}
835
836	/*
837	 * transhdrlen > 0 means that this is the first fragment and we wish
838	 * it won't be fragmented in the future.
839	 */
840	if (transhdrlen &&
841	    length + fragheaderlen <= mtu &&
842	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
843	    !exthdrlen)
844		csummode = CHECKSUM_HW;
845
846	inet->cork.length += length;
847	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
848			(rt->u.dst.dev->features & NETIF_F_UFO)) {
849
850		err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
851					 fragheaderlen, transhdrlen, mtu,
852					 flags);
853		if (err)
854			goto error;
855		return 0;
856	}
857
858	/* So, what's going on in the loop below?
859	 *
860	 * We use calculated fragment length to generate chained skb,
861	 * each of segments is IP fragment ready for sending to network after
862	 * adding appropriate IP header.
863	 */
864
865	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
866		goto alloc_new_skb;
867
868	while (length > 0) {
869		/* Check if the remaining data fits into current packet. */
870		copy = mtu - skb->len;
871		if (copy < length)
872			copy = maxfraglen - skb->len;
873		if (copy <= 0) {
874			char *data;
875			unsigned int datalen;
876			unsigned int fraglen;
877			unsigned int fraggap;
878			unsigned int alloclen;
879			struct sk_buff *skb_prev;
880alloc_new_skb:
881			skb_prev = skb;
882			if (skb_prev)
883				fraggap = skb_prev->len - maxfraglen;
884			else
885				fraggap = 0;
886
887			/*
888			 * If remaining data exceeds the mtu,
889			 * we know we need more fragment(s).
890			 */
891			datalen = length + fraggap;
892			if (datalen > mtu - fragheaderlen)
893				datalen = maxfraglen - fragheaderlen;
894			fraglen = datalen + fragheaderlen;
895
896			if ((flags & MSG_MORE) &&
897			    !(rt->u.dst.dev->features&NETIF_F_SG))
898				alloclen = mtu;
899			else
900				alloclen = datalen + fragheaderlen;
901
902			/* The last fragment gets additional space at tail.
903			 * Note, with MSG_MORE we overallocate on fragments,
904			 * because we have no idea what fragment will be
905			 * the last.
906			 */
907			if (datalen == length)
908				alloclen += rt->u.dst.trailer_len;
909
910			if (transhdrlen) {
911				skb = sock_alloc_send_skb(sk,
912						alloclen + hh_len + 15,
913						(flags & MSG_DONTWAIT), &err);
914			} else {
915				skb = NULL;
916				if (atomic_read(&sk->sk_wmem_alloc) <=
917				    2 * sk->sk_sndbuf)
918					skb = sock_wmalloc(sk,
919							   alloclen + hh_len + 15, 1,
920							   sk->sk_allocation);
921				if (unlikely(skb == NULL))
922					err = -ENOBUFS;
923			}
924			if (skb == NULL)
925				goto error;
926
927			/*
928			 *	Fill in the control structures
929			 */
930			skb->ip_summed = csummode;
931			skb->csum = 0;
932			skb_reserve(skb, hh_len);
933
934			/*
935			 *	Find where to start putting bytes.
936			 */
937			data = skb_put(skb, fraglen);
938			skb->nh.raw = data + exthdrlen;
939			data += fragheaderlen;
940			skb->h.raw = data + exthdrlen;
941
942			if (fraggap) {
943				skb->csum = skb_copy_and_csum_bits(
944					skb_prev, maxfraglen,
945					data + transhdrlen, fraggap, 0);
946				skb_prev->csum = csum_sub(skb_prev->csum,
947							  skb->csum);
948				data += fraggap;
949				skb_trim(skb_prev, maxfraglen);
950			}
951
952			copy = datalen - transhdrlen - fraggap;
953			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
954				err = -EFAULT;
955				kfree_skb(skb);
956				goto error;
957			}
958
959			offset += copy;
960			length -= datalen - fraggap;
961			transhdrlen = 0;
962			exthdrlen = 0;
963			csummode = CHECKSUM_NONE;
964
965			/*
966			 * Put the packet on the pending queue.
967			 */
968			__skb_queue_tail(&sk->sk_write_queue, skb);
969			continue;
970		}
971
972		if (copy > length)
973			copy = length;
974
975		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
976			unsigned int off;
977
978			off = skb->len;
979			if (getfrag(from, skb_put(skb, copy),
980					offset, copy, off, skb) < 0) {
981				__skb_trim(skb, off);
982				err = -EFAULT;
983				goto error;
984			}
985		} else {
986			int i = skb_shinfo(skb)->nr_frags;
987			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
988			struct page *page = sk->sk_sndmsg_page;
989			int off = sk->sk_sndmsg_off;
990			unsigned int left;
991
992			if (page && (left = PAGE_SIZE - off) > 0) {
993				if (copy >= left)
994					copy = left;
995				if (page != frag->page) {
996					if (i == MAX_SKB_FRAGS) {
997						err = -EMSGSIZE;
998						goto error;
999					}
1000					get_page(page);
1001	 				skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1002					frag = &skb_shinfo(skb)->frags[i];
1003				}
1004			} else if (i < MAX_SKB_FRAGS) {
1005				if (copy > PAGE_SIZE)
1006					copy = PAGE_SIZE;
1007				page = alloc_pages(sk->sk_allocation, 0);
1008				if (page == NULL)  {
1009					err = -ENOMEM;
1010					goto error;
1011				}
1012				sk->sk_sndmsg_page = page;
1013				sk->sk_sndmsg_off = 0;
1014
1015				skb_fill_page_desc(skb, i, page, 0, 0);
1016				frag = &skb_shinfo(skb)->frags[i];
1017				skb->truesize += PAGE_SIZE;
1018				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1019			} else {
1020				err = -EMSGSIZE;
1021				goto error;
1022			}
1023			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1024				err = -EFAULT;
1025				goto error;
1026			}
1027			sk->sk_sndmsg_off += copy;
1028			frag->size += copy;
1029			skb->len += copy;
1030			skb->data_len += copy;
1031		}
1032		offset += copy;
1033		length -= copy;
1034	}
1035
1036	return 0;
1037
1038error:
1039	inet->cork.length -= length;
1040	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1041	return err;
1042}
1043
1044ssize_t	ip_append_page(struct sock *sk, struct page *page,
1045		       int offset, size_t size, int flags)
1046{
1047	struct inet_sock *inet = inet_sk(sk);
1048	struct sk_buff *skb;
1049	struct rtable *rt;
1050	struct ip_options *opt = NULL;
1051	int hh_len;
1052	int mtu;
1053	int len;
1054	int err;
1055	unsigned int maxfraglen, fragheaderlen, fraggap;
1056
1057	if (inet->hdrincl)
1058		return -EPERM;
1059
1060	if (flags&MSG_PROBE)
1061		return 0;
1062
1063	if (skb_queue_empty(&sk->sk_write_queue))
1064		return -EINVAL;
1065
1066	rt = inet->cork.rt;
1067	if (inet->cork.flags & IPCORK_OPT)
1068		opt = inet->cork.opt;
1069
1070	if (!(rt->u.dst.dev->features&NETIF_F_SG))
1071		return -EOPNOTSUPP;
1072
1073	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1074	mtu = inet->cork.fragsize;
1075
1076	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1077	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1078
1079	if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1080		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1081		return -EMSGSIZE;
1082	}
1083
1084	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1085		return -EINVAL;
1086
1087	inet->cork.length += size;
1088	if ((sk->sk_protocol == IPPROTO_UDP) &&
1089	    (rt->u.dst.dev->features & NETIF_F_UFO))
1090		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1091
1092
1093	while (size > 0) {
1094		int i;
1095
1096		if (skb_shinfo(skb)->ufo_size)
1097			len = size;
1098		else {
1099
1100			/* Check if the remaining data fits into current packet. */
1101			len = mtu - skb->len;
1102			if (len < size)
1103				len = maxfraglen - skb->len;
1104		}
1105		if (len <= 0) {
1106			struct sk_buff *skb_prev;
1107			char *data;
1108			struct iphdr *iph;
1109			int alloclen;
1110
1111			skb_prev = skb;
1112			fraggap = skb_prev->len - maxfraglen;
1113
1114			alloclen = fragheaderlen + hh_len + fraggap + 15;
1115			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1116			if (unlikely(!skb)) {
1117				err = -ENOBUFS;
1118				goto error;
1119			}
1120
1121			/*
1122			 *	Fill in the control structures
1123			 */
1124			skb->ip_summed = CHECKSUM_NONE;
1125			skb->csum = 0;
1126			skb_reserve(skb, hh_len);
1127
1128			/*
1129			 *	Find where to start putting bytes.
1130			 */
1131			data = skb_put(skb, fragheaderlen + fraggap);
1132			skb->nh.iph = iph = (struct iphdr *)data;
1133			data += fragheaderlen;
1134			skb->h.raw = data;
1135
1136			if (fraggap) {
1137				skb->csum = skb_copy_and_csum_bits(
1138					skb_prev, maxfraglen,
1139					data, fraggap, 0);
1140				skb_prev->csum = csum_sub(skb_prev->csum,
1141							  skb->csum);
1142				skb_trim(skb_prev, maxfraglen);
1143			}
1144
1145			/*
1146			 * Put the packet on the pending queue.
1147			 */
1148			__skb_queue_tail(&sk->sk_write_queue, skb);
1149			continue;
1150		}
1151
1152		i = skb_shinfo(skb)->nr_frags;
1153		if (len > size)
1154			len = size;
1155		if (skb_can_coalesce(skb, i, page, offset)) {
1156			skb_shinfo(skb)->frags[i-1].size += len;
1157		} else if (i < MAX_SKB_FRAGS) {
1158			get_page(page);
1159			skb_fill_page_desc(skb, i, page, offset, len);
1160		} else {
1161			err = -EMSGSIZE;
1162			goto error;
1163		}
1164
1165		if (skb->ip_summed == CHECKSUM_NONE) {
1166			unsigned int csum;
1167			csum = csum_page(page, offset, len);
1168			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1169		}
1170
1171		skb->len += len;
1172		skb->data_len += len;
1173		offset += len;
1174		size -= len;
1175	}
1176	return 0;
1177
1178error:
1179	inet->cork.length -= size;
1180	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1181	return err;
1182}
1183
1184/*
1185 *	Combined all pending IP fragments on the socket as one IP datagram
1186 *	and push them out.
1187 */
1188int ip_push_pending_frames(struct sock *sk)
1189{
1190	struct sk_buff *skb, *tmp_skb;
1191	struct sk_buff **tail_skb;
1192	struct inet_sock *inet = inet_sk(sk);
1193	struct ip_options *opt = NULL;
1194	struct rtable *rt = inet->cork.rt;
1195	struct iphdr *iph;
1196	__be16 df = 0;
1197	__u8 ttl;
1198	int err = 0;
1199
1200	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1201		goto out;
1202	tail_skb = &(skb_shinfo(skb)->frag_list);
1203
1204	/* move skb->data to ip header from ext header */
1205	if (skb->data < skb->nh.raw)
1206		__skb_pull(skb, skb->nh.raw - skb->data);
1207	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1208		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1209		*tail_skb = tmp_skb;
1210		tail_skb = &(tmp_skb->next);
1211		skb->len += tmp_skb->len;
1212		skb->data_len += tmp_skb->len;
1213		skb->truesize += tmp_skb->truesize;
1214		__sock_put(tmp_skb->sk);
1215		tmp_skb->destructor = NULL;
1216		tmp_skb->sk = NULL;
1217	}
1218
1219	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1220	 * to fragment the frame generated here. No matter, what transforms
1221	 * how transforms change size of the packet, it will come out.
1222	 */
1223	if (inet->pmtudisc != IP_PMTUDISC_DO)
1224		skb->local_df = 1;
1225
1226	/* DF bit is set when we want to see DF on outgoing frames.
1227	 * If local_df is set too, we still allow to fragment this frame
1228	 * locally. */
1229	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1230	    (skb->len <= dst_mtu(&rt->u.dst) &&
1231	     ip_dont_fragment(sk, &rt->u.dst)))
1232		df = htons(IP_DF);
1233
1234	if (inet->cork.flags & IPCORK_OPT)
1235		opt = inet->cork.opt;
1236
1237	if (rt->rt_type == RTN_MULTICAST)
1238		ttl = inet->mc_ttl;
1239	else
1240		ttl = ip_select_ttl(inet, &rt->u.dst);
1241
1242	iph = (struct iphdr *)skb->data;
1243	iph->version = 4;
1244	iph->ihl = 5;
1245	if (opt) {
1246		iph->ihl += opt->optlen>>2;
1247		ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1248	}
1249	iph->tos = inet->tos;
1250	iph->tot_len = htons(skb->len);
1251	iph->frag_off = df;
1252	ip_select_ident(iph, &rt->u.dst, sk);
1253	iph->ttl = ttl;
1254	iph->protocol = sk->sk_protocol;
1255	iph->saddr = rt->rt_src;
1256	iph->daddr = rt->rt_dst;
1257	ip_send_check(iph);
1258
1259	skb->priority = sk->sk_priority;
1260	skb->dst = dst_clone(&rt->u.dst);
1261
1262	/* Netfilter gets whole the not fragmented skb. */
1263	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1264		      skb->dst->dev, dst_output);
1265	if (err) {
1266		if (err > 0)
1267			err = inet->recverr ? net_xmit_errno(err) : 0;
1268		if (err)
1269			goto error;
1270	}
1271
1272out:
1273	inet->cork.flags &= ~IPCORK_OPT;
1274	kfree(inet->cork.opt);
1275	inet->cork.opt = NULL;
1276	if (inet->cork.rt) {
1277		ip_rt_put(inet->cork.rt);
1278		inet->cork.rt = NULL;
1279	}
1280	return err;
1281
1282error:
1283	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1284	goto out;
1285}
1286
1287/*
1288 *	Throw away all pending data on the socket.
1289 */
1290void ip_flush_pending_frames(struct sock *sk)
1291{
1292	struct inet_sock *inet = inet_sk(sk);
1293	struct sk_buff *skb;
1294
1295	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1296		kfree_skb(skb);
1297
1298	inet->cork.flags &= ~IPCORK_OPT;
1299	kfree(inet->cork.opt);
1300	inet->cork.opt = NULL;
1301	if (inet->cork.rt) {
1302		ip_rt_put(inet->cork.rt);
1303		inet->cork.rt = NULL;
1304	}
1305}
1306
1307
1308/*
1309 *	Fetch data from kernel space and fill in checksum if needed.
1310 */
1311static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1312			      int len, int odd, struct sk_buff *skb)
1313{
1314	unsigned int csum;
1315
1316	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1317	skb->csum = csum_block_add(skb->csum, csum, odd);
1318	return 0;
1319}
1320
1321/*
1322 *	Generic function to send a packet as reply to another packet.
1323 *	Used to send TCP resets so far. ICMP should use this function too.
1324 *
1325 *	Should run single threaded per socket because it uses the sock
1326 *     	structure to pass arguments.
1327 *
1328 *	LATER: switch from ip_build_xmit to ip_append_*
1329 */
1330void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1331		   unsigned int len)
1332{
1333	struct inet_sock *inet = inet_sk(sk);
1334	struct {
1335		struct ip_options	opt;
1336		char			data[40];
1337	} replyopts;
1338	struct ipcm_cookie ipc;
1339	u32 daddr;
1340	struct rtable *rt = (struct rtable*)skb->dst;
1341
1342	if (ip_options_echo(&replyopts.opt, skb))
1343		return;
1344
1345	daddr = ipc.addr = rt->rt_src;
1346	ipc.opt = NULL;
1347
1348	if (replyopts.opt.optlen) {
1349		ipc.opt = &replyopts.opt;
1350
1351		if (ipc.opt->srr)
1352			daddr = replyopts.opt.faddr;
1353	}
1354
1355	{
1356		struct flowi fl = { .nl_u = { .ip4_u =
1357					      { .daddr = daddr,
1358						.saddr = rt->rt_spec_dst,
1359						.tos = RT_TOS(skb->nh.iph->tos) } },
1360				    /* Not quite clean, but right. */
1361				    .uli_u = { .ports =
1362					       { .sport = skb->h.th->dest,
1363					         .dport = skb->h.th->source } },
1364				    .proto = sk->sk_protocol };
1365		if (ip_route_output_key(&rt, &fl))
1366			return;
1367	}
1368
1369	/* And let IP do all the hard work.
1370
1371	   This chunk is not reenterable, hence spinlock.
1372	   Note that it uses the fact, that this function is called
1373	   with locally disabled BH and that sk cannot be already spinlocked.
1374	 */
1375	bh_lock_sock(sk);
1376	inet->tos = skb->nh.iph->tos;
1377	sk->sk_priority = skb->priority;
1378	sk->sk_protocol = skb->nh.iph->protocol;
1379	ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1380		       &ipc, rt, MSG_DONTWAIT);
1381	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1382		if (arg->csumoffset >= 0)
1383			*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1384		skb->ip_summed = CHECKSUM_NONE;
1385		ip_push_pending_frames(sk);
1386	}
1387
1388	bh_unlock_sock(sk);
1389
1390	ip_rt_put(rt);
1391}
1392
1393void __init ip_init(void)
1394{
1395	ip_rt_init();
1396	inet_initpeers();
1397
1398#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1399	igmp_mc_proc_init();
1400#endif
1401}
1402
1403EXPORT_SYMBOL(ip_generic_getfrag);
1404EXPORT_SYMBOL(ip_queue_xmit);
1405EXPORT_SYMBOL(ip_send_check);
1406