ip_output.c revision 5084205faf45384fff25c4cf77dd5c96279283ad
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		The Internet Protocol (IP) output module.
7 *
8 * Version:	$Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors:	Ross Biro
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Donald Becker, <becker@super.org>
13 *		Alan Cox, <Alan.Cox@linux.org>
14 *		Richard Underwood
15 *		Stefan Becker, <stefanb@yello.ping.de>
16 *		Jorge Cwik, <jorge@laser.satlink.net>
17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *		Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 *	See ip_input.c for original log
21 *
22 *	Fixes:
23 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
24 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
25 *		Bradford Johnson:	Fix faulty handling of some frames when
26 *					no route is found.
27 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
28 *					(in case if packet not accepted by
29 *					output firewall rules)
30 *		Mike McLagan	:	Routing by source
31 *		Alexey Kuznetsov:	use new route cache
32 *		Andi Kleen:		Fix broken PMTU recovery and remove
33 *					some redundant tests.
34 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
35 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
36 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
37 *					for decreased register pressure on x86
38 *					and more readibility.
39 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
40 *					silently drop skb instead of failing with -EPERM.
41 *		Detlev Wengorz	:	Copy protocol for fragments.
42 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
43 *					datagrams.
44 *		Hirokazu Takahashi:	sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
52#include <linux/sched.h>
53#include <linux/mm.h>
54#include <linux/string.h>
55#include <linux/errno.h>
56
57#include <linux/socket.h>
58#include <linux/sockios.h>
59#include <linux/in.h>
60#include <linux/inet.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/proc_fs.h>
64#include <linux/stat.h>
65#include <linux/init.h>
66
67#include <net/snmp.h>
68#include <net/ip.h>
69#include <net/protocol.h>
70#include <net/route.h>
71#include <net/xfrm.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <net/arp.h>
75#include <net/icmp.h>
76#include <net/checksum.h>
77#include <net/inetpeer.h>
78#include <net/checksum.h>
79#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
82#include <linux/mroute.h>
83#include <linux/netlink.h>
84#include <linux/tcp.h>
85
86int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
87
88/* Generate a checksum for an outgoing IP datagram. */
89__inline__ void ip_send_check(struct iphdr *iph)
90{
91	iph->check = 0;
92	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
93}
94
95/* dev_loopback_xmit for use with netfilter. */
96static int ip_dev_loopback_xmit(struct sk_buff *newskb)
97{
98	newskb->mac.raw = newskb->data;
99	__skb_pull(newskb, newskb->nh.raw - newskb->data);
100	newskb->pkt_type = PACKET_LOOPBACK;
101	newskb->ip_summed = CHECKSUM_UNNECESSARY;
102	BUG_TRAP(newskb->dst);
103	netif_rx(newskb);
104	return 0;
105}
106
107static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
108{
109	int ttl = inet->uc_ttl;
110
111	if (ttl < 0)
112		ttl = dst_metric(dst, RTAX_HOPLIMIT);
113	return ttl;
114}
115
116/*
117 *		Add an ip header to a skbuff and send it out.
118 *
119 */
120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121			  __be32 saddr, __be32 daddr, struct ip_options *opt)
122{
123	struct inet_sock *inet = inet_sk(sk);
124	struct rtable *rt = (struct rtable *)skb->dst;
125	struct iphdr *iph;
126
127	/* Build the IP header. */
128	if (opt)
129		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
130	else
131		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
132
133	iph->version  = 4;
134	iph->ihl      = 5;
135	iph->tos      = inet->tos;
136	if (ip_dont_fragment(sk, &rt->u.dst))
137		iph->frag_off = htons(IP_DF);
138	else
139		iph->frag_off = 0;
140	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
141	iph->daddr    = rt->rt_dst;
142	iph->saddr    = rt->rt_src;
143	iph->protocol = sk->sk_protocol;
144	iph->tot_len  = htons(skb->len);
145	ip_select_ident(iph, &rt->u.dst, sk);
146	skb->nh.iph   = iph;
147
148	if (opt && opt->optlen) {
149		iph->ihl += opt->optlen>>2;
150		ip_options_build(skb, opt, daddr, rt, 0);
151	}
152	ip_send_check(iph);
153
154	skb->priority = sk->sk_priority;
155
156	/* Send it out. */
157	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
158		       dst_output);
159}
160
161EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
162
163static inline int ip_finish_output2(struct sk_buff *skb)
164{
165	struct dst_entry *dst = skb->dst;
166	struct hh_cache *hh = dst->hh;
167	struct net_device *dev = dst->dev;
168	int hh_len = LL_RESERVED_SPACE(dev);
169
170	/* Be paranoid, rather than too clever. */
171	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
172		struct sk_buff *skb2;
173
174		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
175		if (skb2 == NULL) {
176			kfree_skb(skb);
177			return -ENOMEM;
178		}
179		if (skb->sk)
180			skb_set_owner_w(skb2, skb->sk);
181		kfree_skb(skb);
182		skb = skb2;
183	}
184
185	if (hh) {
186		int hh_alen;
187
188		read_lock_bh(&hh->hh_lock);
189		hh_alen = HH_DATA_ALIGN(hh->hh_len);
190  		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
191		read_unlock_bh(&hh->hh_lock);
192	        skb_push(skb, hh->hh_len);
193		return hh->hh_output(skb);
194	} else if (dst->neighbour)
195		return dst->neighbour->output(skb);
196
197	if (net_ratelimit())
198		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
199	kfree_skb(skb);
200	return -EINVAL;
201}
202
203static inline int ip_finish_output(struct sk_buff *skb)
204{
205#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
206	/* Policy lookup after SNAT yielded a new policy */
207	if (skb->dst->xfrm != NULL) {
208		IPCB(skb)->flags |= IPSKB_REROUTED;
209		return dst_output(skb);
210	}
211#endif
212	if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213		return ip_fragment(skb, ip_finish_output2);
214	else
215		return ip_finish_output2(skb);
216}
217
218int ip_mc_output(struct sk_buff *skb)
219{
220	struct sock *sk = skb->sk;
221	struct rtable *rt = (struct rtable*)skb->dst;
222	struct net_device *dev = rt->u.dst.dev;
223
224	/*
225	 *	If the indicated interface is up and running, send the packet.
226	 */
227	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
228
229	skb->dev = dev;
230	skb->protocol = htons(ETH_P_IP);
231
232	/*
233	 *	Multicasts are looped back for other local users
234	 */
235
236	if (rt->rt_flags&RTCF_MULTICAST) {
237		if ((!sk || inet_sk(sk)->mc_loop)
238#ifdef CONFIG_IP_MROUTE
239		/* Small optimization: do not loopback not local frames,
240		   which returned after forwarding; they will be  dropped
241		   by ip_mr_input in any case.
242		   Note, that local frames are looped back to be delivered
243		   to local recipients.
244
245		   This check is duplicated in ip_mr_input at the moment.
246		 */
247		    && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
248#endif
249		) {
250			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
251			if (newskb)
252				NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
253					newskb->dev,
254					ip_dev_loopback_xmit);
255		}
256
257		/* Multicasts with ttl 0 must not go beyond the host */
258
259		if (skb->nh.iph->ttl == 0) {
260			kfree_skb(skb);
261			return 0;
262		}
263	}
264
265	if (rt->rt_flags&RTCF_BROADCAST) {
266		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
267		if (newskb)
268			NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
269				newskb->dev, ip_dev_loopback_xmit);
270	}
271
272	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
273			    ip_finish_output,
274			    !(IPCB(skb)->flags & IPSKB_REROUTED));
275}
276
277int ip_output(struct sk_buff *skb)
278{
279	struct net_device *dev = skb->dst->dev;
280
281	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
282
283	skb->dev = dev;
284	skb->protocol = htons(ETH_P_IP);
285
286	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
287		            ip_finish_output,
288			    !(IPCB(skb)->flags & IPSKB_REROUTED));
289}
290
291int ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok)
292{
293	struct inet_sock *inet = inet_sk(sk);
294	struct ip_options *opt = inet->opt;
295	struct rtable *rt;
296	struct iphdr *iph;
297
298	/* Skip all of this if the packet is already routed,
299	 * f.e. by something like SCTP.
300	 */
301	rt = (struct rtable *) skb->dst;
302	if (rt != NULL)
303		goto packet_routed;
304
305	/* Make sure we can route this packet. */
306	rt = (struct rtable *)__sk_dst_check(sk, 0);
307	if (rt == NULL) {
308		__be32 daddr;
309
310		/* Use correct destination address if we have options. */
311		daddr = inet->daddr;
312		if(opt && opt->srr)
313			daddr = opt->faddr;
314
315		{
316			struct flowi fl = { .oif = sk->sk_bound_dev_if,
317					    .nl_u = { .ip4_u =
318						      { .daddr = daddr,
319							.saddr = inet->saddr,
320							.tos = RT_CONN_FLAGS(sk) } },
321					    .proto = sk->sk_protocol,
322					    .uli_u = { .ports =
323						       { .sport = inet->sport,
324							 .dport = inet->dport } } };
325
326			/* If this fails, retransmit mechanism of transport layer will
327			 * keep trying until route appears or the connection times
328			 * itself out.
329			 */
330			security_sk_classify_flow(sk, &fl);
331			if (ip_route_output_flow(&rt, &fl, sk, 0))
332				goto no_route;
333		}
334		sk_setup_caps(sk, &rt->u.dst);
335	}
336	skb->dst = dst_clone(&rt->u.dst);
337
338packet_routed:
339	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
340		goto no_route;
341
342	/* OK, we know where to send it, allocate and build IP header. */
343	iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
344	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
345	iph->tot_len = htons(skb->len);
346	if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
347		iph->frag_off = htons(IP_DF);
348	else
349		iph->frag_off = 0;
350	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
351	iph->protocol = sk->sk_protocol;
352	iph->saddr    = rt->rt_src;
353	iph->daddr    = rt->rt_dst;
354	skb->nh.iph   = iph;
355	/* Transport layer set skb->h.foo itself. */
356
357	if (opt && opt->optlen) {
358		iph->ihl += opt->optlen >> 2;
359		ip_options_build(skb, opt, inet->daddr, rt, 0);
360	}
361
362	ip_select_ident_more(iph, &rt->u.dst, sk,
363			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
364
365	/* Add an IP checksum. */
366	ip_send_check(iph);
367
368	skb->priority = sk->sk_priority;
369
370	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
371		       dst_output);
372
373no_route:
374	IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
375	kfree_skb(skb);
376	return -EHOSTUNREACH;
377}
378
379
380static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
381{
382	to->pkt_type = from->pkt_type;
383	to->priority = from->priority;
384	to->protocol = from->protocol;
385	dst_release(to->dst);
386	to->dst = dst_clone(from->dst);
387	to->dev = from->dev;
388	to->mark = from->mark;
389
390	/* Copy the flags to each fragment. */
391	IPCB(to)->flags = IPCB(from)->flags;
392
393#ifdef CONFIG_NET_SCHED
394	to->tc_index = from->tc_index;
395#endif
396#ifdef CONFIG_NETFILTER
397	/* Connection association is same as pre-frag packet */
398	nf_conntrack_put(to->nfct);
399	to->nfct = from->nfct;
400	nf_conntrack_get(to->nfct);
401	to->nfctinfo = from->nfctinfo;
402#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
403	to->ipvs_property = from->ipvs_property;
404#endif
405#ifdef CONFIG_BRIDGE_NETFILTER
406	nf_bridge_put(to->nf_bridge);
407	to->nf_bridge = from->nf_bridge;
408	nf_bridge_get(to->nf_bridge);
409#endif
410#endif
411	skb_copy_secmark(to, from);
412}
413
414/*
415 *	This IP datagram is too large to be sent in one piece.  Break it up into
416 *	smaller pieces (each of size equal to IP header plus
417 *	a block of the data of the original IP data part) that will yet fit in a
418 *	single device frame, and queue such a frame for sending.
419 */
420
421int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
422{
423	struct iphdr *iph;
424	int raw = 0;
425	int ptr;
426	struct net_device *dev;
427	struct sk_buff *skb2;
428	unsigned int mtu, hlen, left, len, ll_rs, pad;
429	int offset;
430	__be16 not_last_frag;
431	struct rtable *rt = (struct rtable*)skb->dst;
432	int err = 0;
433
434	dev = rt->u.dst.dev;
435
436	/*
437	 *	Point into the IP datagram header.
438	 */
439
440	iph = skb->nh.iph;
441
442	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
443		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
444		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
445			  htonl(dst_mtu(&rt->u.dst)));
446		kfree_skb(skb);
447		return -EMSGSIZE;
448	}
449
450	/*
451	 *	Setup starting values.
452	 */
453
454	hlen = iph->ihl * 4;
455	mtu = dst_mtu(&rt->u.dst) - hlen;	/* Size of data space */
456	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
457
458	/* When frag_list is given, use it. First, check its validity:
459	 * some transformers could create wrong frag_list or break existing
460	 * one, it is not prohibited. In this case fall back to copying.
461	 *
462	 * LATER: this step can be merged to real generation of fragments,
463	 * we can switch to copy when see the first bad fragment.
464	 */
465	if (skb_shinfo(skb)->frag_list) {
466		struct sk_buff *frag;
467		int first_len = skb_pagelen(skb);
468
469		if (first_len - hlen > mtu ||
470		    ((first_len - hlen) & 7) ||
471		    (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
472		    skb_cloned(skb))
473			goto slow_path;
474
475		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
476			/* Correct geometry. */
477			if (frag->len > mtu ||
478			    ((frag->len & 7) && frag->next) ||
479			    skb_headroom(frag) < hlen)
480			    goto slow_path;
481
482			/* Partially cloned skb? */
483			if (skb_shared(frag))
484				goto slow_path;
485
486			BUG_ON(frag->sk);
487			if (skb->sk) {
488				sock_hold(skb->sk);
489				frag->sk = skb->sk;
490				frag->destructor = sock_wfree;
491				skb->truesize -= frag->truesize;
492			}
493		}
494
495		/* Everything is OK. Generate! */
496
497		err = 0;
498		offset = 0;
499		frag = skb_shinfo(skb)->frag_list;
500		skb_shinfo(skb)->frag_list = NULL;
501		skb->data_len = first_len - skb_headlen(skb);
502		skb->len = first_len;
503		iph->tot_len = htons(first_len);
504		iph->frag_off = htons(IP_MF);
505		ip_send_check(iph);
506
507		for (;;) {
508			/* Prepare header of the next frame,
509			 * before previous one went down. */
510			if (frag) {
511				frag->ip_summed = CHECKSUM_NONE;
512				frag->h.raw = frag->data;
513				frag->nh.raw = __skb_push(frag, hlen);
514				memcpy(frag->nh.raw, iph, hlen);
515				iph = frag->nh.iph;
516				iph->tot_len = htons(frag->len);
517				ip_copy_metadata(frag, skb);
518				if (offset == 0)
519					ip_options_fragment(frag);
520				offset += skb->len - hlen;
521				iph->frag_off = htons(offset>>3);
522				if (frag->next != NULL)
523					iph->frag_off |= htons(IP_MF);
524				/* Ready, complete checksum */
525				ip_send_check(iph);
526			}
527
528			err = output(skb);
529
530			if (!err)
531				IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
532			if (err || !frag)
533				break;
534
535			skb = frag;
536			frag = skb->next;
537			skb->next = NULL;
538		}
539
540		if (err == 0) {
541			IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
542			return 0;
543		}
544
545		while (frag) {
546			skb = frag->next;
547			kfree_skb(frag);
548			frag = skb;
549		}
550		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
551		return err;
552	}
553
554slow_path:
555	left = skb->len - hlen;		/* Space per frame */
556	ptr = raw + hlen;		/* Where to start from */
557
558	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
559	 * we need to make room for the encapsulating header
560	 */
561	pad = nf_bridge_pad(skb);
562	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
563	mtu -= pad;
564
565	/*
566	 *	Fragment the datagram.
567	 */
568
569	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
570	not_last_frag = iph->frag_off & htons(IP_MF);
571
572	/*
573	 *	Keep copying data until we run out.
574	 */
575
576	while(left > 0)	{
577		len = left;
578		/* IF: it doesn't fit, use 'mtu' - the data space left */
579		if (len > mtu)
580			len = mtu;
581		/* IF: we are not sending upto and including the packet end
582		   then align the next start on an eight byte boundary */
583		if (len < left)	{
584			len &= ~7;
585		}
586		/*
587		 *	Allocate buffer.
588		 */
589
590		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
591			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
592			err = -ENOMEM;
593			goto fail;
594		}
595
596		/*
597		 *	Set up data on packet
598		 */
599
600		ip_copy_metadata(skb2, skb);
601		skb_reserve(skb2, ll_rs);
602		skb_put(skb2, len + hlen);
603		skb2->nh.raw = skb2->data;
604		skb2->h.raw = skb2->data + hlen;
605
606		/*
607		 *	Charge the memory for the fragment to any owner
608		 *	it might possess
609		 */
610
611		if (skb->sk)
612			skb_set_owner_w(skb2, skb->sk);
613
614		/*
615		 *	Copy the packet header into the new buffer.
616		 */
617
618		memcpy(skb2->nh.raw, skb->data, hlen);
619
620		/*
621		 *	Copy a block of the IP datagram.
622		 */
623		if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
624			BUG();
625		left -= len;
626
627		/*
628		 *	Fill in the new header fields.
629		 */
630		iph = skb2->nh.iph;
631		iph->frag_off = htons((offset >> 3));
632
633		/* ANK: dirty, but effective trick. Upgrade options only if
634		 * the segment to be fragmented was THE FIRST (otherwise,
635		 * options are already fixed) and make it ONCE
636		 * on the initial skb, so that all the following fragments
637		 * will inherit fixed options.
638		 */
639		if (offset == 0)
640			ip_options_fragment(skb);
641
642		/*
643		 *	Added AC : If we are fragmenting a fragment that's not the
644		 *		   last fragment then keep MF on each bit
645		 */
646		if (left > 0 || not_last_frag)
647			iph->frag_off |= htons(IP_MF);
648		ptr += len;
649		offset += len;
650
651		/*
652		 *	Put this fragment into the sending queue.
653		 */
654		iph->tot_len = htons(len + hlen);
655
656		ip_send_check(iph);
657
658		err = output(skb2);
659		if (err)
660			goto fail;
661
662		IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
663	}
664	kfree_skb(skb);
665	IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
666	return err;
667
668fail:
669	kfree_skb(skb);
670	IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
671	return err;
672}
673
674EXPORT_SYMBOL(ip_fragment);
675
676int
677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678{
679	struct iovec *iov = from;
680
681	if (skb->ip_summed == CHECKSUM_PARTIAL) {
682		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
683			return -EFAULT;
684	} else {
685		__wsum csum = 0;
686		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
687			return -EFAULT;
688		skb->csum = csum_block_add(skb->csum, csum, odd);
689	}
690	return 0;
691}
692
693static inline __wsum
694csum_page(struct page *page, int offset, int copy)
695{
696	char *kaddr;
697	__wsum csum;
698	kaddr = kmap(page);
699	csum = csum_partial(kaddr + offset, copy, 0);
700	kunmap(page);
701	return csum;
702}
703
704static inline int ip_ufo_append_data(struct sock *sk,
705			int getfrag(void *from, char *to, int offset, int len,
706			       int odd, struct sk_buff *skb),
707			void *from, int length, int hh_len, int fragheaderlen,
708			int transhdrlen, int mtu,unsigned int flags)
709{
710	struct sk_buff *skb;
711	int err;
712
713	/* There is support for UDP fragmentation offload by network
714	 * device, so create one single skb packet containing complete
715	 * udp datagram
716	 */
717	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718		skb = sock_alloc_send_skb(sk,
719			hh_len + fragheaderlen + transhdrlen + 20,
720			(flags & MSG_DONTWAIT), &err);
721
722		if (skb == NULL)
723			return err;
724
725		/* reserve space for Hardware header */
726		skb_reserve(skb, hh_len);
727
728		/* create space for UDP/IP header */
729		skb_put(skb,fragheaderlen + transhdrlen);
730
731		/* initialize network header pointer */
732		skb->nh.raw = skb->data;
733
734		/* initialize protocol header pointer */
735		skb->h.raw = skb->data + fragheaderlen;
736
737		skb->ip_summed = CHECKSUM_PARTIAL;
738		skb->csum = 0;
739		sk->sk_sndmsg_off = 0;
740	}
741
742	err = skb_append_datato_frags(sk,skb, getfrag, from,
743			       (length - transhdrlen));
744	if (!err) {
745		/* specify the length of each IP datagram fragment*/
746		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
747		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
748		__skb_queue_tail(&sk->sk_write_queue, skb);
749
750		return 0;
751	}
752	/* There is not enough support do UFO ,
753	 * so follow normal path
754	 */
755	kfree_skb(skb);
756	return err;
757}
758
759/*
760 *	ip_append_data() and ip_append_page() can make one large IP datagram
761 *	from many pieces of data. Each pieces will be holded on the socket
762 *	until ip_push_pending_frames() is called. Each piece can be a page
763 *	or non-page data.
764 *
765 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
766 *	this interface potentially.
767 *
768 *	LATER: length must be adjusted by pad at tail, when it is required.
769 */
770int ip_append_data(struct sock *sk,
771		   int getfrag(void *from, char *to, int offset, int len,
772			       int odd, struct sk_buff *skb),
773		   void *from, int length, int transhdrlen,
774		   struct ipcm_cookie *ipc, struct rtable *rt,
775		   unsigned int flags)
776{
777	struct inet_sock *inet = inet_sk(sk);
778	struct sk_buff *skb;
779
780	struct ip_options *opt = NULL;
781	int hh_len;
782	int exthdrlen;
783	int mtu;
784	int copy;
785	int err;
786	int offset = 0;
787	unsigned int maxfraglen, fragheaderlen;
788	int csummode = CHECKSUM_NONE;
789
790	if (flags&MSG_PROBE)
791		return 0;
792
793	if (skb_queue_empty(&sk->sk_write_queue)) {
794		/*
795		 * setup for corking.
796		 */
797		opt = ipc->opt;
798		if (opt) {
799			if (inet->cork.opt == NULL) {
800				inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
801				if (unlikely(inet->cork.opt == NULL))
802					return -ENOBUFS;
803			}
804			memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
805			inet->cork.flags |= IPCORK_OPT;
806			inet->cork.addr = ipc->addr;
807		}
808		dst_hold(&rt->u.dst);
809		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
810		inet->cork.rt = rt;
811		inet->cork.length = 0;
812		sk->sk_sndmsg_page = NULL;
813		sk->sk_sndmsg_off = 0;
814		if ((exthdrlen = rt->u.dst.header_len) != 0) {
815			length += exthdrlen;
816			transhdrlen += exthdrlen;
817		}
818	} else {
819		rt = inet->cork.rt;
820		if (inet->cork.flags & IPCORK_OPT)
821			opt = inet->cork.opt;
822
823		transhdrlen = 0;
824		exthdrlen = 0;
825		mtu = inet->cork.fragsize;
826	}
827	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
828
829	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
830	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
831
832	if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
833		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
834		return -EMSGSIZE;
835	}
836
837	/*
838	 * transhdrlen > 0 means that this is the first fragment and we wish
839	 * it won't be fragmented in the future.
840	 */
841	if (transhdrlen &&
842	    length + fragheaderlen <= mtu &&
843	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
844	    !exthdrlen)
845		csummode = CHECKSUM_PARTIAL;
846
847	inet->cork.length += length;
848	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
849			(rt->u.dst.dev->features & NETIF_F_UFO)) {
850
851		err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
852					 fragheaderlen, transhdrlen, mtu,
853					 flags);
854		if (err)
855			goto error;
856		return 0;
857	}
858
859	/* So, what's going on in the loop below?
860	 *
861	 * We use calculated fragment length to generate chained skb,
862	 * each of segments is IP fragment ready for sending to network after
863	 * adding appropriate IP header.
864	 */
865
866	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
867		goto alloc_new_skb;
868
869	while (length > 0) {
870		/* Check if the remaining data fits into current packet. */
871		copy = mtu - skb->len;
872		if (copy < length)
873			copy = maxfraglen - skb->len;
874		if (copy <= 0) {
875			char *data;
876			unsigned int datalen;
877			unsigned int fraglen;
878			unsigned int fraggap;
879			unsigned int alloclen;
880			struct sk_buff *skb_prev;
881alloc_new_skb:
882			skb_prev = skb;
883			if (skb_prev)
884				fraggap = skb_prev->len - maxfraglen;
885			else
886				fraggap = 0;
887
888			/*
889			 * If remaining data exceeds the mtu,
890			 * we know we need more fragment(s).
891			 */
892			datalen = length + fraggap;
893			if (datalen > mtu - fragheaderlen)
894				datalen = maxfraglen - fragheaderlen;
895			fraglen = datalen + fragheaderlen;
896
897			if ((flags & MSG_MORE) &&
898			    !(rt->u.dst.dev->features&NETIF_F_SG))
899				alloclen = mtu;
900			else
901				alloclen = datalen + fragheaderlen;
902
903			/* The last fragment gets additional space at tail.
904			 * Note, with MSG_MORE we overallocate on fragments,
905			 * because we have no idea what fragment will be
906			 * the last.
907			 */
908			if (datalen == length + fraggap)
909				alloclen += rt->u.dst.trailer_len;
910
911			if (transhdrlen) {
912				skb = sock_alloc_send_skb(sk,
913						alloclen + hh_len + 15,
914						(flags & MSG_DONTWAIT), &err);
915			} else {
916				skb = NULL;
917				if (atomic_read(&sk->sk_wmem_alloc) <=
918				    2 * sk->sk_sndbuf)
919					skb = sock_wmalloc(sk,
920							   alloclen + hh_len + 15, 1,
921							   sk->sk_allocation);
922				if (unlikely(skb == NULL))
923					err = -ENOBUFS;
924			}
925			if (skb == NULL)
926				goto error;
927
928			/*
929			 *	Fill in the control structures
930			 */
931			skb->ip_summed = csummode;
932			skb->csum = 0;
933			skb_reserve(skb, hh_len);
934
935			/*
936			 *	Find where to start putting bytes.
937			 */
938			data = skb_put(skb, fraglen);
939			skb->nh.raw = data + exthdrlen;
940			data += fragheaderlen;
941			skb->h.raw = data + exthdrlen;
942
943			if (fraggap) {
944				skb->csum = skb_copy_and_csum_bits(
945					skb_prev, maxfraglen,
946					data + transhdrlen, fraggap, 0);
947				skb_prev->csum = csum_sub(skb_prev->csum,
948							  skb->csum);
949				data += fraggap;
950				pskb_trim_unique(skb_prev, maxfraglen);
951			}
952
953			copy = datalen - transhdrlen - fraggap;
954			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
955				err = -EFAULT;
956				kfree_skb(skb);
957				goto error;
958			}
959
960			offset += copy;
961			length -= datalen - fraggap;
962			transhdrlen = 0;
963			exthdrlen = 0;
964			csummode = CHECKSUM_NONE;
965
966			/*
967			 * Put the packet on the pending queue.
968			 */
969			__skb_queue_tail(&sk->sk_write_queue, skb);
970			continue;
971		}
972
973		if (copy > length)
974			copy = length;
975
976		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
977			unsigned int off;
978
979			off = skb->len;
980			if (getfrag(from, skb_put(skb, copy),
981					offset, copy, off, skb) < 0) {
982				__skb_trim(skb, off);
983				err = -EFAULT;
984				goto error;
985			}
986		} else {
987			int i = skb_shinfo(skb)->nr_frags;
988			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
989			struct page *page = sk->sk_sndmsg_page;
990			int off = sk->sk_sndmsg_off;
991			unsigned int left;
992
993			if (page && (left = PAGE_SIZE - off) > 0) {
994				if (copy >= left)
995					copy = left;
996				if (page != frag->page) {
997					if (i == MAX_SKB_FRAGS) {
998						err = -EMSGSIZE;
999						goto error;
1000					}
1001					get_page(page);
1002	 				skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1003					frag = &skb_shinfo(skb)->frags[i];
1004				}
1005			} else if (i < MAX_SKB_FRAGS) {
1006				if (copy > PAGE_SIZE)
1007					copy = PAGE_SIZE;
1008				page = alloc_pages(sk->sk_allocation, 0);
1009				if (page == NULL)  {
1010					err = -ENOMEM;
1011					goto error;
1012				}
1013				sk->sk_sndmsg_page = page;
1014				sk->sk_sndmsg_off = 0;
1015
1016				skb_fill_page_desc(skb, i, page, 0, 0);
1017				frag = &skb_shinfo(skb)->frags[i];
1018				skb->truesize += PAGE_SIZE;
1019				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1020			} else {
1021				err = -EMSGSIZE;
1022				goto error;
1023			}
1024			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1025				err = -EFAULT;
1026				goto error;
1027			}
1028			sk->sk_sndmsg_off += copy;
1029			frag->size += copy;
1030			skb->len += copy;
1031			skb->data_len += copy;
1032		}
1033		offset += copy;
1034		length -= copy;
1035	}
1036
1037	return 0;
1038
1039error:
1040	inet->cork.length -= length;
1041	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1042	return err;
1043}
1044
1045ssize_t	ip_append_page(struct sock *sk, struct page *page,
1046		       int offset, size_t size, int flags)
1047{
1048	struct inet_sock *inet = inet_sk(sk);
1049	struct sk_buff *skb;
1050	struct rtable *rt;
1051	struct ip_options *opt = NULL;
1052	int hh_len;
1053	int mtu;
1054	int len;
1055	int err;
1056	unsigned int maxfraglen, fragheaderlen, fraggap;
1057
1058	if (inet->hdrincl)
1059		return -EPERM;
1060
1061	if (flags&MSG_PROBE)
1062		return 0;
1063
1064	if (skb_queue_empty(&sk->sk_write_queue))
1065		return -EINVAL;
1066
1067	rt = inet->cork.rt;
1068	if (inet->cork.flags & IPCORK_OPT)
1069		opt = inet->cork.opt;
1070
1071	if (!(rt->u.dst.dev->features&NETIF_F_SG))
1072		return -EOPNOTSUPP;
1073
1074	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1075	mtu = inet->cork.fragsize;
1076
1077	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1078	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1079
1080	if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1081		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1082		return -EMSGSIZE;
1083	}
1084
1085	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1086		return -EINVAL;
1087
1088	inet->cork.length += size;
1089	if ((sk->sk_protocol == IPPROTO_UDP) &&
1090	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
1091		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1092		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1093	}
1094
1095
1096	while (size > 0) {
1097		int i;
1098
1099		if (skb_is_gso(skb))
1100			len = size;
1101		else {
1102
1103			/* Check if the remaining data fits into current packet. */
1104			len = mtu - skb->len;
1105			if (len < size)
1106				len = maxfraglen - skb->len;
1107		}
1108		if (len <= 0) {
1109			struct sk_buff *skb_prev;
1110			char *data;
1111			struct iphdr *iph;
1112			int alloclen;
1113
1114			skb_prev = skb;
1115			fraggap = skb_prev->len - maxfraglen;
1116
1117			alloclen = fragheaderlen + hh_len + fraggap + 15;
1118			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1119			if (unlikely(!skb)) {
1120				err = -ENOBUFS;
1121				goto error;
1122			}
1123
1124			/*
1125			 *	Fill in the control structures
1126			 */
1127			skb->ip_summed = CHECKSUM_NONE;
1128			skb->csum = 0;
1129			skb_reserve(skb, hh_len);
1130
1131			/*
1132			 *	Find where to start putting bytes.
1133			 */
1134			data = skb_put(skb, fragheaderlen + fraggap);
1135			skb->nh.iph = iph = (struct iphdr *)data;
1136			data += fragheaderlen;
1137			skb->h.raw = data;
1138
1139			if (fraggap) {
1140				skb->csum = skb_copy_and_csum_bits(
1141					skb_prev, maxfraglen,
1142					data, fraggap, 0);
1143				skb_prev->csum = csum_sub(skb_prev->csum,
1144							  skb->csum);
1145				pskb_trim_unique(skb_prev, maxfraglen);
1146			}
1147
1148			/*
1149			 * Put the packet on the pending queue.
1150			 */
1151			__skb_queue_tail(&sk->sk_write_queue, skb);
1152			continue;
1153		}
1154
1155		i = skb_shinfo(skb)->nr_frags;
1156		if (len > size)
1157			len = size;
1158		if (skb_can_coalesce(skb, i, page, offset)) {
1159			skb_shinfo(skb)->frags[i-1].size += len;
1160		} else if (i < MAX_SKB_FRAGS) {
1161			get_page(page);
1162			skb_fill_page_desc(skb, i, page, offset, len);
1163		} else {
1164			err = -EMSGSIZE;
1165			goto error;
1166		}
1167
1168		if (skb->ip_summed == CHECKSUM_NONE) {
1169			__wsum csum;
1170			csum = csum_page(page, offset, len);
1171			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1172		}
1173
1174		skb->len += len;
1175		skb->data_len += len;
1176		offset += len;
1177		size -= len;
1178	}
1179	return 0;
1180
1181error:
1182	inet->cork.length -= size;
1183	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1184	return err;
1185}
1186
1187/*
1188 *	Combined all pending IP fragments on the socket as one IP datagram
1189 *	and push them out.
1190 */
1191int ip_push_pending_frames(struct sock *sk)
1192{
1193	struct sk_buff *skb, *tmp_skb;
1194	struct sk_buff **tail_skb;
1195	struct inet_sock *inet = inet_sk(sk);
1196	struct ip_options *opt = NULL;
1197	struct rtable *rt = inet->cork.rt;
1198	struct iphdr *iph;
1199	__be16 df = 0;
1200	__u8 ttl;
1201	int err = 0;
1202
1203	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1204		goto out;
1205	tail_skb = &(skb_shinfo(skb)->frag_list);
1206
1207	/* move skb->data to ip header from ext header */
1208	if (skb->data < skb->nh.raw)
1209		__skb_pull(skb, skb->nh.raw - skb->data);
1210	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1211		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1212		*tail_skb = tmp_skb;
1213		tail_skb = &(tmp_skb->next);
1214		skb->len += tmp_skb->len;
1215		skb->data_len += tmp_skb->len;
1216		skb->truesize += tmp_skb->truesize;
1217		__sock_put(tmp_skb->sk);
1218		tmp_skb->destructor = NULL;
1219		tmp_skb->sk = NULL;
1220	}
1221
1222	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1223	 * to fragment the frame generated here. No matter, what transforms
1224	 * how transforms change size of the packet, it will come out.
1225	 */
1226	if (inet->pmtudisc != IP_PMTUDISC_DO)
1227		skb->local_df = 1;
1228
1229	/* DF bit is set when we want to see DF on outgoing frames.
1230	 * If local_df is set too, we still allow to fragment this frame
1231	 * locally. */
1232	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1233	    (skb->len <= dst_mtu(&rt->u.dst) &&
1234	     ip_dont_fragment(sk, &rt->u.dst)))
1235		df = htons(IP_DF);
1236
1237	if (inet->cork.flags & IPCORK_OPT)
1238		opt = inet->cork.opt;
1239
1240	if (rt->rt_type == RTN_MULTICAST)
1241		ttl = inet->mc_ttl;
1242	else
1243		ttl = ip_select_ttl(inet, &rt->u.dst);
1244
1245	iph = (struct iphdr *)skb->data;
1246	iph->version = 4;
1247	iph->ihl = 5;
1248	if (opt) {
1249		iph->ihl += opt->optlen>>2;
1250		ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1251	}
1252	iph->tos = inet->tos;
1253	iph->tot_len = htons(skb->len);
1254	iph->frag_off = df;
1255	ip_select_ident(iph, &rt->u.dst, sk);
1256	iph->ttl = ttl;
1257	iph->protocol = sk->sk_protocol;
1258	iph->saddr = rt->rt_src;
1259	iph->daddr = rt->rt_dst;
1260	ip_send_check(iph);
1261
1262	skb->priority = sk->sk_priority;
1263	skb->dst = dst_clone(&rt->u.dst);
1264
1265	/* Netfilter gets whole the not fragmented skb. */
1266	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1267		      skb->dst->dev, dst_output);
1268	if (err) {
1269		if (err > 0)
1270			err = inet->recverr ? net_xmit_errno(err) : 0;
1271		if (err)
1272			goto error;
1273	}
1274
1275out:
1276	inet->cork.flags &= ~IPCORK_OPT;
1277	kfree(inet->cork.opt);
1278	inet->cork.opt = NULL;
1279	if (inet->cork.rt) {
1280		ip_rt_put(inet->cork.rt);
1281		inet->cork.rt = NULL;
1282	}
1283	return err;
1284
1285error:
1286	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1287	goto out;
1288}
1289
1290/*
1291 *	Throw away all pending data on the socket.
1292 */
1293void ip_flush_pending_frames(struct sock *sk)
1294{
1295	struct inet_sock *inet = inet_sk(sk);
1296	struct sk_buff *skb;
1297
1298	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1299		kfree_skb(skb);
1300
1301	inet->cork.flags &= ~IPCORK_OPT;
1302	kfree(inet->cork.opt);
1303	inet->cork.opt = NULL;
1304	if (inet->cork.rt) {
1305		ip_rt_put(inet->cork.rt);
1306		inet->cork.rt = NULL;
1307	}
1308}
1309
1310
1311/*
1312 *	Fetch data from kernel space and fill in checksum if needed.
1313 */
1314static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1315			      int len, int odd, struct sk_buff *skb)
1316{
1317	__wsum csum;
1318
1319	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1320	skb->csum = csum_block_add(skb->csum, csum, odd);
1321	return 0;
1322}
1323
1324/*
1325 *	Generic function to send a packet as reply to another packet.
1326 *	Used to send TCP resets so far. ICMP should use this function too.
1327 *
1328 *	Should run single threaded per socket because it uses the sock
1329 *     	structure to pass arguments.
1330 *
1331 *	LATER: switch from ip_build_xmit to ip_append_*
1332 */
1333void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1334		   unsigned int len)
1335{
1336	struct inet_sock *inet = inet_sk(sk);
1337	struct {
1338		struct ip_options	opt;
1339		char			data[40];
1340	} replyopts;
1341	struct ipcm_cookie ipc;
1342	__be32 daddr;
1343	struct rtable *rt = (struct rtable*)skb->dst;
1344
1345	if (ip_options_echo(&replyopts.opt, skb))
1346		return;
1347
1348	daddr = ipc.addr = rt->rt_src;
1349	ipc.opt = NULL;
1350
1351	if (replyopts.opt.optlen) {
1352		ipc.opt = &replyopts.opt;
1353
1354		if (ipc.opt->srr)
1355			daddr = replyopts.opt.faddr;
1356	}
1357
1358	{
1359		struct flowi fl = { .nl_u = { .ip4_u =
1360					      { .daddr = daddr,
1361						.saddr = rt->rt_spec_dst,
1362						.tos = RT_TOS(skb->nh.iph->tos) } },
1363				    /* Not quite clean, but right. */
1364				    .uli_u = { .ports =
1365					       { .sport = skb->h.th->dest,
1366					         .dport = skb->h.th->source } },
1367				    .proto = sk->sk_protocol };
1368		security_skb_classify_flow(skb, &fl);
1369		if (ip_route_output_key(&rt, &fl))
1370			return;
1371	}
1372
1373	/* And let IP do all the hard work.
1374
1375	   This chunk is not reenterable, hence spinlock.
1376	   Note that it uses the fact, that this function is called
1377	   with locally disabled BH and that sk cannot be already spinlocked.
1378	 */
1379	bh_lock_sock(sk);
1380	inet->tos = skb->nh.iph->tos;
1381	sk->sk_priority = skb->priority;
1382	sk->sk_protocol = skb->nh.iph->protocol;
1383	ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1384		       &ipc, rt, MSG_DONTWAIT);
1385	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1386		if (arg->csumoffset >= 0)
1387			*((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1388		skb->ip_summed = CHECKSUM_NONE;
1389		ip_push_pending_frames(sk);
1390	}
1391
1392	bh_unlock_sock(sk);
1393
1394	ip_rt_put(rt);
1395}
1396
1397void __init ip_init(void)
1398{
1399	ip_rt_init();
1400	inet_initpeers();
1401
1402#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1403	igmp_mc_proc_init();
1404#endif
1405}
1406
1407EXPORT_SYMBOL(ip_generic_getfrag);
1408EXPORT_SYMBOL(ip_queue_xmit);
1409EXPORT_SYMBOL(ip_send_check);
1410