1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		IPv4 Forwarding Information Base: semantics.
7 *
8 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 *		This program is free software; you can redistribute it and/or
11 *		modify it under the terms of the GNU General Public License
12 *		as published by the Free Software Foundation; either version
13 *		2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/uaccess.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/jiffies.h>
21#include <linux/mm.h>
22#include <linux/string.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/errno.h>
26#include <linux/in.h>
27#include <linux/inet.h>
28#include <linux/inetdevice.h>
29#include <linux/netdevice.h>
30#include <linux/if_arp.h>
31#include <linux/proc_fs.h>
32#include <linux/skbuff.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35
36#include <net/arp.h>
37#include <net/ip.h>
38#include <net/protocol.h>
39#include <net/route.h>
40#include <net/tcp.h>
41#include <net/sock.h>
42#include <net/ip_fib.h>
43#include <net/netlink.h>
44#include <net/nexthop.h>
45
46#include "fib_lookup.h"
47
48static DEFINE_SPINLOCK(fib_info_lock);
49static struct hlist_head *fib_info_hash;
50static struct hlist_head *fib_info_laddrhash;
51static unsigned int fib_info_hash_size;
52static unsigned int fib_info_cnt;
53
54#define DEVINDEX_HASHBITS 8
55#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
57
58#ifdef CONFIG_IP_ROUTE_MULTIPATH
59
60static DEFINE_SPINLOCK(fib_multipath_lock);
61
62#define for_nexthops(fi) {						\
63	int nhsel; const struct fib_nh *nh;				\
64	for (nhsel = 0, nh = (fi)->fib_nh;				\
65	     nhsel < (fi)->fib_nhs;					\
66	     nh++, nhsel++)
67
68#define change_nexthops(fi) {						\
69	int nhsel; struct fib_nh *nexthop_nh;				\
70	for (nhsel = 0,	nexthop_nh = (struct fib_nh *)((fi)->fib_nh);	\
71	     nhsel < (fi)->fib_nhs;					\
72	     nexthop_nh++, nhsel++)
73
74#else /* CONFIG_IP_ROUTE_MULTIPATH */
75
76/* Hope, that gcc will optimize it to get rid of dummy loop */
77
78#define for_nexthops(fi) {						\
79	int nhsel; const struct fib_nh *nh = (fi)->fib_nh;		\
80	for (nhsel = 0; nhsel < 1; nhsel++)
81
82#define change_nexthops(fi) {						\
83	int nhsel;							\
84	struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh);	\
85	for (nhsel = 0; nhsel < 1; nhsel++)
86
87#endif /* CONFIG_IP_ROUTE_MULTIPATH */
88
89#define endfor_nexthops(fi) }
90
91
92const struct fib_prop fib_props[RTN_MAX + 1] = {
93	[RTN_UNSPEC] = {
94		.error	= 0,
95		.scope	= RT_SCOPE_NOWHERE,
96	},
97	[RTN_UNICAST] = {
98		.error	= 0,
99		.scope	= RT_SCOPE_UNIVERSE,
100	},
101	[RTN_LOCAL] = {
102		.error	= 0,
103		.scope	= RT_SCOPE_HOST,
104	},
105	[RTN_BROADCAST] = {
106		.error	= 0,
107		.scope	= RT_SCOPE_LINK,
108	},
109	[RTN_ANYCAST] = {
110		.error	= 0,
111		.scope	= RT_SCOPE_LINK,
112	},
113	[RTN_MULTICAST] = {
114		.error	= 0,
115		.scope	= RT_SCOPE_UNIVERSE,
116	},
117	[RTN_BLACKHOLE] = {
118		.error	= -EINVAL,
119		.scope	= RT_SCOPE_UNIVERSE,
120	},
121	[RTN_UNREACHABLE] = {
122		.error	= -EHOSTUNREACH,
123		.scope	= RT_SCOPE_UNIVERSE,
124	},
125	[RTN_PROHIBIT] = {
126		.error	= -EACCES,
127		.scope	= RT_SCOPE_UNIVERSE,
128	},
129	[RTN_THROW] = {
130		.error	= -EAGAIN,
131		.scope	= RT_SCOPE_UNIVERSE,
132	},
133	[RTN_NAT] = {
134		.error	= -EINVAL,
135		.scope	= RT_SCOPE_NOWHERE,
136	},
137	[RTN_XRESOLVE] = {
138		.error	= -EINVAL,
139		.scope	= RT_SCOPE_NOWHERE,
140	},
141};
142
143static void rt_fibinfo_free(struct rtable __rcu **rtp)
144{
145	struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147	if (!rt)
148		return;
149
150	/* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151	 * because we waited an RCU grace period before calling
152	 * free_fib_info_rcu()
153	 */
154
155	dst_free(&rt->dst);
156}
157
158static void free_nh_exceptions(struct fib_nh *nh)
159{
160	struct fnhe_hash_bucket *hash = nh->nh_exceptions;
161	int i;
162
163	for (i = 0; i < FNHE_HASH_SIZE; i++) {
164		struct fib_nh_exception *fnhe;
165
166		fnhe = rcu_dereference_protected(hash[i].chain, 1);
167		while (fnhe) {
168			struct fib_nh_exception *next;
169
170			next = rcu_dereference_protected(fnhe->fnhe_next, 1);
171
172			rt_fibinfo_free(&fnhe->fnhe_rth);
173
174			kfree(fnhe);
175
176			fnhe = next;
177		}
178	}
179	kfree(hash);
180}
181
182static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
183{
184	int cpu;
185
186	if (!rtp)
187		return;
188
189	for_each_possible_cpu(cpu) {
190		struct rtable *rt;
191
192		rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
193		if (rt)
194			dst_free(&rt->dst);
195	}
196	free_percpu(rtp);
197}
198
199/* Release a nexthop info record */
200static void free_fib_info_rcu(struct rcu_head *head)
201{
202	struct fib_info *fi = container_of(head, struct fib_info, rcu);
203
204	change_nexthops(fi) {
205		if (nexthop_nh->nh_dev)
206			dev_put(nexthop_nh->nh_dev);
207		if (nexthop_nh->nh_exceptions)
208			free_nh_exceptions(nexthop_nh);
209		rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
210		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
211	} endfor_nexthops(fi);
212
213	release_net(fi->fib_net);
214	if (fi->fib_metrics != (u32 *) dst_default_metrics)
215		kfree(fi->fib_metrics);
216	kfree(fi);
217}
218
219void free_fib_info(struct fib_info *fi)
220{
221	if (fi->fib_dead == 0) {
222		pr_warn("Freeing alive fib_info %p\n", fi);
223		return;
224	}
225	fib_info_cnt--;
226#ifdef CONFIG_IP_ROUTE_CLASSID
227	change_nexthops(fi) {
228		if (nexthop_nh->nh_tclassid)
229			fi->fib_net->ipv4.fib_num_tclassid_users--;
230	} endfor_nexthops(fi);
231#endif
232	call_rcu(&fi->rcu, free_fib_info_rcu);
233}
234
235void fib_release_info(struct fib_info *fi)
236{
237	spin_lock_bh(&fib_info_lock);
238	if (fi && --fi->fib_treeref == 0) {
239		hlist_del(&fi->fib_hash);
240		if (fi->fib_prefsrc)
241			hlist_del(&fi->fib_lhash);
242		change_nexthops(fi) {
243			if (!nexthop_nh->nh_dev)
244				continue;
245			hlist_del(&nexthop_nh->nh_hash);
246		} endfor_nexthops(fi)
247		fi->fib_dead = 1;
248		fib_info_put(fi);
249	}
250	spin_unlock_bh(&fib_info_lock);
251}
252
253static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
254{
255	const struct fib_nh *onh = ofi->fib_nh;
256
257	for_nexthops(fi) {
258		if (nh->nh_oif != onh->nh_oif ||
259		    nh->nh_gw  != onh->nh_gw ||
260		    nh->nh_scope != onh->nh_scope ||
261#ifdef CONFIG_IP_ROUTE_MULTIPATH
262		    nh->nh_weight != onh->nh_weight ||
263#endif
264#ifdef CONFIG_IP_ROUTE_CLASSID
265		    nh->nh_tclassid != onh->nh_tclassid ||
266#endif
267		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
268			return -1;
269		onh++;
270	} endfor_nexthops(fi);
271	return 0;
272}
273
274static inline unsigned int fib_devindex_hashfn(unsigned int val)
275{
276	unsigned int mask = DEVINDEX_HASHSIZE - 1;
277
278	return (val ^
279		(val >> DEVINDEX_HASHBITS) ^
280		(val >> (DEVINDEX_HASHBITS * 2))) & mask;
281}
282
283static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
284{
285	unsigned int mask = (fib_info_hash_size - 1);
286	unsigned int val = fi->fib_nhs;
287
288	val ^= (fi->fib_protocol << 8) | fi->fib_scope;
289	val ^= (__force u32)fi->fib_prefsrc;
290	val ^= fi->fib_priority;
291	for_nexthops(fi) {
292		val ^= fib_devindex_hashfn(nh->nh_oif);
293	} endfor_nexthops(fi)
294
295	return (val ^ (val >> 7) ^ (val >> 12)) & mask;
296}
297
298static struct fib_info *fib_find_info(const struct fib_info *nfi)
299{
300	struct hlist_head *head;
301	struct fib_info *fi;
302	unsigned int hash;
303
304	hash = fib_info_hashfn(nfi);
305	head = &fib_info_hash[hash];
306
307	hlist_for_each_entry(fi, head, fib_hash) {
308		if (!net_eq(fi->fib_net, nfi->fib_net))
309			continue;
310		if (fi->fib_nhs != nfi->fib_nhs)
311			continue;
312		if (nfi->fib_protocol == fi->fib_protocol &&
313		    nfi->fib_scope == fi->fib_scope &&
314		    nfi->fib_prefsrc == fi->fib_prefsrc &&
315		    nfi->fib_priority == fi->fib_priority &&
316		    nfi->fib_type == fi->fib_type &&
317		    memcmp(nfi->fib_metrics, fi->fib_metrics,
318			   sizeof(u32) * RTAX_MAX) == 0 &&
319		    ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
320		    (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
321			return fi;
322	}
323
324	return NULL;
325}
326
327/* Check, that the gateway is already configured.
328 * Used only by redirect accept routine.
329 */
330int ip_fib_check_default(__be32 gw, struct net_device *dev)
331{
332	struct hlist_head *head;
333	struct fib_nh *nh;
334	unsigned int hash;
335
336	spin_lock(&fib_info_lock);
337
338	hash = fib_devindex_hashfn(dev->ifindex);
339	head = &fib_info_devhash[hash];
340	hlist_for_each_entry(nh, head, nh_hash) {
341		if (nh->nh_dev == dev &&
342		    nh->nh_gw == gw &&
343		    !(nh->nh_flags & RTNH_F_DEAD)) {
344			spin_unlock(&fib_info_lock);
345			return 0;
346		}
347	}
348
349	spin_unlock(&fib_info_lock);
350
351	return -1;
352}
353
354static inline size_t fib_nlmsg_size(struct fib_info *fi)
355{
356	size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
357			 + nla_total_size(4) /* RTA_TABLE */
358			 + nla_total_size(4) /* RTA_DST */
359			 + nla_total_size(4) /* RTA_PRIORITY */
360			 + nla_total_size(4); /* RTA_PREFSRC */
361
362	/* space for nested metrics */
363	payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
364
365	if (fi->fib_nhs) {
366		/* Also handles the special case fib_nhs == 1 */
367
368		/* each nexthop is packed in an attribute */
369		size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
370
371		/* may contain flow and gateway attribute */
372		nhsize += 2 * nla_total_size(4);
373
374		/* all nexthops are packed in a nested attribute */
375		payload += nla_total_size(fi->fib_nhs * nhsize);
376	}
377
378	return payload;
379}
380
381void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
382	       int dst_len, u32 tb_id, struct nl_info *info,
383	       unsigned int nlm_flags)
384{
385	struct sk_buff *skb;
386	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
387	int err = -ENOBUFS;
388
389	skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
390	if (skb == NULL)
391		goto errout;
392
393	err = fib_dump_info(skb, info->portid, seq, event, tb_id,
394			    fa->fa_type, key, dst_len,
395			    fa->fa_tos, fa->fa_info, nlm_flags);
396	if (err < 0) {
397		/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
398		WARN_ON(err == -EMSGSIZE);
399		kfree_skb(skb);
400		goto errout;
401	}
402	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
403		    info->nlh, GFP_KERNEL);
404	return;
405errout:
406	if (err < 0)
407		rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
408}
409
410/* Return the first fib alias matching TOS with
411 * priority less than or equal to PRIO.
412 */
413struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
414{
415	if (fah) {
416		struct fib_alias *fa;
417		list_for_each_entry(fa, fah, fa_list) {
418			if (fa->fa_tos > tos)
419				continue;
420			if (fa->fa_info->fib_priority >= prio ||
421			    fa->fa_tos < tos)
422				return fa;
423		}
424	}
425	return NULL;
426}
427
428int fib_detect_death(struct fib_info *fi, int order,
429		     struct fib_info **last_resort, int *last_idx, int dflt)
430{
431	struct neighbour *n;
432	int state = NUD_NONE;
433
434	n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
435	if (n) {
436		state = n->nud_state;
437		neigh_release(n);
438	}
439	if (state == NUD_REACHABLE)
440		return 0;
441	if ((state & NUD_VALID) && order != dflt)
442		return 0;
443	if ((state & NUD_VALID) ||
444	    (*last_idx < 0 && order > dflt)) {
445		*last_resort = fi;
446		*last_idx = order;
447	}
448	return 1;
449}
450
451#ifdef CONFIG_IP_ROUTE_MULTIPATH
452
453static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
454{
455	int nhs = 0;
456
457	while (rtnh_ok(rtnh, remaining)) {
458		nhs++;
459		rtnh = rtnh_next(rtnh, &remaining);
460	}
461
462	/* leftover implies invalid nexthop configuration, discard it */
463	return remaining > 0 ? 0 : nhs;
464}
465
466static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
467		       int remaining, struct fib_config *cfg)
468{
469	change_nexthops(fi) {
470		int attrlen;
471
472		if (!rtnh_ok(rtnh, remaining))
473			return -EINVAL;
474
475		nexthop_nh->nh_flags =
476			(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
477		nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
478		nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
479
480		attrlen = rtnh_attrlen(rtnh);
481		if (attrlen > 0) {
482			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
483
484			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
485			nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
486#ifdef CONFIG_IP_ROUTE_CLASSID
487			nla = nla_find(attrs, attrlen, RTA_FLOW);
488			nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
489			if (nexthop_nh->nh_tclassid)
490				fi->fib_net->ipv4.fib_num_tclassid_users++;
491#endif
492		}
493
494		rtnh = rtnh_next(rtnh, &remaining);
495	} endfor_nexthops(fi);
496
497	return 0;
498}
499
500#endif
501
502int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
503{
504#ifdef CONFIG_IP_ROUTE_MULTIPATH
505	struct rtnexthop *rtnh;
506	int remaining;
507#endif
508
509	if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
510		return 1;
511
512	if (cfg->fc_oif || cfg->fc_gw) {
513		if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
514		    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
515			return 0;
516		return 1;
517	}
518
519#ifdef CONFIG_IP_ROUTE_MULTIPATH
520	if (cfg->fc_mp == NULL)
521		return 0;
522
523	rtnh = cfg->fc_mp;
524	remaining = cfg->fc_mp_len;
525
526	for_nexthops(fi) {
527		int attrlen;
528
529		if (!rtnh_ok(rtnh, remaining))
530			return -EINVAL;
531
532		if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
533			return 1;
534
535		attrlen = rtnh_attrlen(rtnh);
536		if (attrlen < 0) {
537			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
538
539			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
540			if (nla && nla_get_be32(nla) != nh->nh_gw)
541				return 1;
542#ifdef CONFIG_IP_ROUTE_CLASSID
543			nla = nla_find(attrs, attrlen, RTA_FLOW);
544			if (nla && nla_get_u32(nla) != nh->nh_tclassid)
545				return 1;
546#endif
547		}
548
549		rtnh = rtnh_next(rtnh, &remaining);
550	} endfor_nexthops(fi);
551#endif
552	return 0;
553}
554
555
556/*
557 * Picture
558 * -------
559 *
560 * Semantics of nexthop is very messy by historical reasons.
561 * We have to take into account, that:
562 * a) gateway can be actually local interface address,
563 *    so that gatewayed route is direct.
564 * b) gateway must be on-link address, possibly
565 *    described not by an ifaddr, but also by a direct route.
566 * c) If both gateway and interface are specified, they should not
567 *    contradict.
568 * d) If we use tunnel routes, gateway could be not on-link.
569 *
570 * Attempt to reconcile all of these (alas, self-contradictory) conditions
571 * results in pretty ugly and hairy code with obscure logic.
572 *
573 * I chose to generalized it instead, so that the size
574 * of code does not increase practically, but it becomes
575 * much more general.
576 * Every prefix is assigned a "scope" value: "host" is local address,
577 * "link" is direct route,
578 * [ ... "site" ... "interior" ... ]
579 * and "universe" is true gateway route with global meaning.
580 *
581 * Every prefix refers to a set of "nexthop"s (gw, oif),
582 * where gw must have narrower scope. This recursion stops
583 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
584 * which means that gw is forced to be on link.
585 *
586 * Code is still hairy, but now it is apparently logically
587 * consistent and very flexible. F.e. as by-product it allows
588 * to co-exists in peace independent exterior and interior
589 * routing processes.
590 *
591 * Normally it looks as following.
592 *
593 * {universe prefix}  -> (gw, oif) [scope link]
594 *		  |
595 *		  |-> {link prefix} -> (gw, oif) [scope local]
596 *					|
597 *					|-> {local prefix} (terminal node)
598 */
599static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
600			struct fib_nh *nh)
601{
602	int err;
603	struct net *net;
604	struct net_device *dev;
605
606	net = cfg->fc_nlinfo.nl_net;
607	if (nh->nh_gw) {
608		struct fib_result res;
609
610		if (nh->nh_flags & RTNH_F_ONLINK) {
611
612			if (cfg->fc_scope >= RT_SCOPE_LINK)
613				return -EINVAL;
614			if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
615				return -EINVAL;
616			dev = __dev_get_by_index(net, nh->nh_oif);
617			if (!dev)
618				return -ENODEV;
619			if (!(dev->flags & IFF_UP))
620				return -ENETDOWN;
621			nh->nh_dev = dev;
622			dev_hold(dev);
623			nh->nh_scope = RT_SCOPE_LINK;
624			return 0;
625		}
626		rcu_read_lock();
627		{
628			struct flowi4 fl4 = {
629				.daddr = nh->nh_gw,
630				.flowi4_scope = cfg->fc_scope + 1,
631				.flowi4_oif = nh->nh_oif,
632				.flowi4_iif = LOOPBACK_IFINDEX,
633			};
634
635			/* It is not necessary, but requires a bit of thinking */
636			if (fl4.flowi4_scope < RT_SCOPE_LINK)
637				fl4.flowi4_scope = RT_SCOPE_LINK;
638			err = fib_lookup(net, &fl4, &res);
639			if (err) {
640				rcu_read_unlock();
641				return err;
642			}
643		}
644		err = -EINVAL;
645		if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
646			goto out;
647		nh->nh_scope = res.scope;
648		nh->nh_oif = FIB_RES_OIF(res);
649		nh->nh_dev = dev = FIB_RES_DEV(res);
650		if (!dev)
651			goto out;
652		dev_hold(dev);
653		err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
654	} else {
655		struct in_device *in_dev;
656
657		if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
658			return -EINVAL;
659
660		rcu_read_lock();
661		err = -ENODEV;
662		in_dev = inetdev_by_index(net, nh->nh_oif);
663		if (in_dev == NULL)
664			goto out;
665		err = -ENETDOWN;
666		if (!(in_dev->dev->flags & IFF_UP))
667			goto out;
668		nh->nh_dev = in_dev->dev;
669		dev_hold(nh->nh_dev);
670		nh->nh_scope = RT_SCOPE_HOST;
671		err = 0;
672	}
673out:
674	rcu_read_unlock();
675	return err;
676}
677
678static inline unsigned int fib_laddr_hashfn(__be32 val)
679{
680	unsigned int mask = (fib_info_hash_size - 1);
681
682	return ((__force u32)val ^
683		((__force u32)val >> 7) ^
684		((__force u32)val >> 14)) & mask;
685}
686
687static struct hlist_head *fib_info_hash_alloc(int bytes)
688{
689	if (bytes <= PAGE_SIZE)
690		return kzalloc(bytes, GFP_KERNEL);
691	else
692		return (struct hlist_head *)
693			__get_free_pages(GFP_KERNEL | __GFP_ZERO,
694					 get_order(bytes));
695}
696
697static void fib_info_hash_free(struct hlist_head *hash, int bytes)
698{
699	if (!hash)
700		return;
701
702	if (bytes <= PAGE_SIZE)
703		kfree(hash);
704	else
705		free_pages((unsigned long) hash, get_order(bytes));
706}
707
708static void fib_info_hash_move(struct hlist_head *new_info_hash,
709			       struct hlist_head *new_laddrhash,
710			       unsigned int new_size)
711{
712	struct hlist_head *old_info_hash, *old_laddrhash;
713	unsigned int old_size = fib_info_hash_size;
714	unsigned int i, bytes;
715
716	spin_lock_bh(&fib_info_lock);
717	old_info_hash = fib_info_hash;
718	old_laddrhash = fib_info_laddrhash;
719	fib_info_hash_size = new_size;
720
721	for (i = 0; i < old_size; i++) {
722		struct hlist_head *head = &fib_info_hash[i];
723		struct hlist_node *n;
724		struct fib_info *fi;
725
726		hlist_for_each_entry_safe(fi, n, head, fib_hash) {
727			struct hlist_head *dest;
728			unsigned int new_hash;
729
730			hlist_del(&fi->fib_hash);
731
732			new_hash = fib_info_hashfn(fi);
733			dest = &new_info_hash[new_hash];
734			hlist_add_head(&fi->fib_hash, dest);
735		}
736	}
737	fib_info_hash = new_info_hash;
738
739	for (i = 0; i < old_size; i++) {
740		struct hlist_head *lhead = &fib_info_laddrhash[i];
741		struct hlist_node *n;
742		struct fib_info *fi;
743
744		hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
745			struct hlist_head *ldest;
746			unsigned int new_hash;
747
748			hlist_del(&fi->fib_lhash);
749
750			new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
751			ldest = &new_laddrhash[new_hash];
752			hlist_add_head(&fi->fib_lhash, ldest);
753		}
754	}
755	fib_info_laddrhash = new_laddrhash;
756
757	spin_unlock_bh(&fib_info_lock);
758
759	bytes = old_size * sizeof(struct hlist_head *);
760	fib_info_hash_free(old_info_hash, bytes);
761	fib_info_hash_free(old_laddrhash, bytes);
762}
763
764__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
765{
766	nh->nh_saddr = inet_select_addr(nh->nh_dev,
767					nh->nh_gw,
768					nh->nh_parent->fib_scope);
769	nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
770
771	return nh->nh_saddr;
772}
773
774struct fib_info *fib_create_info(struct fib_config *cfg)
775{
776	int err;
777	struct fib_info *fi = NULL;
778	struct fib_info *ofi;
779	int nhs = 1;
780	struct net *net = cfg->fc_nlinfo.nl_net;
781
782	if (cfg->fc_type > RTN_MAX)
783		goto err_inval;
784
785	/* Fast check to catch the most weird cases */
786	if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
787		goto err_inval;
788
789#ifdef CONFIG_IP_ROUTE_MULTIPATH
790	if (cfg->fc_mp) {
791		nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
792		if (nhs == 0)
793			goto err_inval;
794	}
795#endif
796
797	err = -ENOBUFS;
798	if (fib_info_cnt >= fib_info_hash_size) {
799		unsigned int new_size = fib_info_hash_size << 1;
800		struct hlist_head *new_info_hash;
801		struct hlist_head *new_laddrhash;
802		unsigned int bytes;
803
804		if (!new_size)
805			new_size = 16;
806		bytes = new_size * sizeof(struct hlist_head *);
807		new_info_hash = fib_info_hash_alloc(bytes);
808		new_laddrhash = fib_info_hash_alloc(bytes);
809		if (!new_info_hash || !new_laddrhash) {
810			fib_info_hash_free(new_info_hash, bytes);
811			fib_info_hash_free(new_laddrhash, bytes);
812		} else
813			fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
814
815		if (!fib_info_hash_size)
816			goto failure;
817	}
818
819	fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
820	if (fi == NULL)
821		goto failure;
822	if (cfg->fc_mx) {
823		fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
824		if (!fi->fib_metrics)
825			goto failure;
826	} else
827		fi->fib_metrics = (u32 *) dst_default_metrics;
828	fib_info_cnt++;
829
830	fi->fib_net = hold_net(net);
831	fi->fib_protocol = cfg->fc_protocol;
832	fi->fib_scope = cfg->fc_scope;
833	fi->fib_flags = cfg->fc_flags;
834	fi->fib_priority = cfg->fc_priority;
835	fi->fib_prefsrc = cfg->fc_prefsrc;
836	fi->fib_type = cfg->fc_type;
837
838	fi->fib_nhs = nhs;
839	change_nexthops(fi) {
840		nexthop_nh->nh_parent = fi;
841		nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
842		if (!nexthop_nh->nh_pcpu_rth_output)
843			goto failure;
844	} endfor_nexthops(fi)
845
846	if (cfg->fc_mx) {
847		struct nlattr *nla;
848		int remaining;
849
850		nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
851			int type = nla_type(nla);
852
853			if (type) {
854				u32 val;
855
856				if (type > RTAX_MAX)
857					goto err_inval;
858				val = nla_get_u32(nla);
859				if (type == RTAX_ADVMSS && val > 65535 - 40)
860					val = 65535 - 40;
861				if (type == RTAX_MTU && val > 65535 - 15)
862					val = 65535 - 15;
863				fi->fib_metrics[type - 1] = val;
864			}
865		}
866	}
867
868	if (cfg->fc_mp) {
869#ifdef CONFIG_IP_ROUTE_MULTIPATH
870		err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
871		if (err != 0)
872			goto failure;
873		if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
874			goto err_inval;
875		if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
876			goto err_inval;
877#ifdef CONFIG_IP_ROUTE_CLASSID
878		if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
879			goto err_inval;
880#endif
881#else
882		goto err_inval;
883#endif
884	} else {
885		struct fib_nh *nh = fi->fib_nh;
886
887		nh->nh_oif = cfg->fc_oif;
888		nh->nh_gw = cfg->fc_gw;
889		nh->nh_flags = cfg->fc_flags;
890#ifdef CONFIG_IP_ROUTE_CLASSID
891		nh->nh_tclassid = cfg->fc_flow;
892		if (nh->nh_tclassid)
893			fi->fib_net->ipv4.fib_num_tclassid_users++;
894#endif
895#ifdef CONFIG_IP_ROUTE_MULTIPATH
896		nh->nh_weight = 1;
897#endif
898	}
899
900	if (fib_props[cfg->fc_type].error) {
901		if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
902			goto err_inval;
903		goto link_it;
904	} else {
905		switch (cfg->fc_type) {
906		case RTN_UNICAST:
907		case RTN_LOCAL:
908		case RTN_BROADCAST:
909		case RTN_ANYCAST:
910		case RTN_MULTICAST:
911			break;
912		default:
913			goto err_inval;
914		}
915	}
916
917	if (cfg->fc_scope > RT_SCOPE_HOST)
918		goto err_inval;
919
920	if (cfg->fc_scope == RT_SCOPE_HOST) {
921		struct fib_nh *nh = fi->fib_nh;
922
923		/* Local address is added. */
924		if (nhs != 1 || nh->nh_gw)
925			goto err_inval;
926		nh->nh_scope = RT_SCOPE_NOWHERE;
927		nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
928		err = -ENODEV;
929		if (nh->nh_dev == NULL)
930			goto failure;
931	} else {
932		change_nexthops(fi) {
933			err = fib_check_nh(cfg, fi, nexthop_nh);
934			if (err != 0)
935				goto failure;
936		} endfor_nexthops(fi)
937	}
938
939	if (fi->fib_prefsrc) {
940		if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
941		    fi->fib_prefsrc != cfg->fc_dst)
942			if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
943				goto err_inval;
944	}
945
946	change_nexthops(fi) {
947		fib_info_update_nh_saddr(net, nexthop_nh);
948	} endfor_nexthops(fi)
949
950link_it:
951	ofi = fib_find_info(fi);
952	if (ofi) {
953		fi->fib_dead = 1;
954		free_fib_info(fi);
955		ofi->fib_treeref++;
956		return ofi;
957	}
958
959	fi->fib_treeref++;
960	atomic_inc(&fi->fib_clntref);
961	spin_lock_bh(&fib_info_lock);
962	hlist_add_head(&fi->fib_hash,
963		       &fib_info_hash[fib_info_hashfn(fi)]);
964	if (fi->fib_prefsrc) {
965		struct hlist_head *head;
966
967		head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
968		hlist_add_head(&fi->fib_lhash, head);
969	}
970	change_nexthops(fi) {
971		struct hlist_head *head;
972		unsigned int hash;
973
974		if (!nexthop_nh->nh_dev)
975			continue;
976		hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
977		head = &fib_info_devhash[hash];
978		hlist_add_head(&nexthop_nh->nh_hash, head);
979	} endfor_nexthops(fi)
980	spin_unlock_bh(&fib_info_lock);
981	return fi;
982
983err_inval:
984	err = -EINVAL;
985
986failure:
987	if (fi) {
988		fi->fib_dead = 1;
989		free_fib_info(fi);
990	}
991
992	return ERR_PTR(err);
993}
994
995int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
996		  u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
997		  struct fib_info *fi, unsigned int flags)
998{
999	struct nlmsghdr *nlh;
1000	struct rtmsg *rtm;
1001
1002	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1003	if (nlh == NULL)
1004		return -EMSGSIZE;
1005
1006	rtm = nlmsg_data(nlh);
1007	rtm->rtm_family = AF_INET;
1008	rtm->rtm_dst_len = dst_len;
1009	rtm->rtm_src_len = 0;
1010	rtm->rtm_tos = tos;
1011	if (tb_id < 256)
1012		rtm->rtm_table = tb_id;
1013	else
1014		rtm->rtm_table = RT_TABLE_COMPAT;
1015	if (nla_put_u32(skb, RTA_TABLE, tb_id))
1016		goto nla_put_failure;
1017	rtm->rtm_type = type;
1018	rtm->rtm_flags = fi->fib_flags;
1019	rtm->rtm_scope = fi->fib_scope;
1020	rtm->rtm_protocol = fi->fib_protocol;
1021
1022	if (rtm->rtm_dst_len &&
1023	    nla_put_be32(skb, RTA_DST, dst))
1024		goto nla_put_failure;
1025	if (fi->fib_priority &&
1026	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1027		goto nla_put_failure;
1028	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1029		goto nla_put_failure;
1030
1031	if (fi->fib_prefsrc &&
1032	    nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
1033		goto nla_put_failure;
1034	if (fi->fib_nhs == 1) {
1035		if (fi->fib_nh->nh_gw &&
1036		    nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1037			goto nla_put_failure;
1038		if (fi->fib_nh->nh_oif &&
1039		    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1040			goto nla_put_failure;
1041#ifdef CONFIG_IP_ROUTE_CLASSID
1042		if (fi->fib_nh[0].nh_tclassid &&
1043		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1044			goto nla_put_failure;
1045#endif
1046	}
1047#ifdef CONFIG_IP_ROUTE_MULTIPATH
1048	if (fi->fib_nhs > 1) {
1049		struct rtnexthop *rtnh;
1050		struct nlattr *mp;
1051
1052		mp = nla_nest_start(skb, RTA_MULTIPATH);
1053		if (mp == NULL)
1054			goto nla_put_failure;
1055
1056		for_nexthops(fi) {
1057			rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1058			if (rtnh == NULL)
1059				goto nla_put_failure;
1060
1061			rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1062			rtnh->rtnh_hops = nh->nh_weight - 1;
1063			rtnh->rtnh_ifindex = nh->nh_oif;
1064
1065			if (nh->nh_gw &&
1066			    nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
1067				goto nla_put_failure;
1068#ifdef CONFIG_IP_ROUTE_CLASSID
1069			if (nh->nh_tclassid &&
1070			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1071				goto nla_put_failure;
1072#endif
1073			/* length of rtnetlink header + attributes */
1074			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1075		} endfor_nexthops(fi);
1076
1077		nla_nest_end(skb, mp);
1078	}
1079#endif
1080	return nlmsg_end(skb, nlh);
1081
1082nla_put_failure:
1083	nlmsg_cancel(skb, nlh);
1084	return -EMSGSIZE;
1085}
1086
1087/*
1088 * Update FIB if:
1089 * - local address disappeared -> we must delete all the entries
1090 *   referring to it.
1091 * - device went down -> we must shutdown all nexthops going via it.
1092 */
1093int fib_sync_down_addr(struct net *net, __be32 local)
1094{
1095	int ret = 0;
1096	unsigned int hash = fib_laddr_hashfn(local);
1097	struct hlist_head *head = &fib_info_laddrhash[hash];
1098	struct fib_info *fi;
1099
1100	if (fib_info_laddrhash == NULL || local == 0)
1101		return 0;
1102
1103	hlist_for_each_entry(fi, head, fib_lhash) {
1104		if (!net_eq(fi->fib_net, net))
1105			continue;
1106		if (fi->fib_prefsrc == local) {
1107			fi->fib_flags |= RTNH_F_DEAD;
1108			ret++;
1109		}
1110	}
1111	return ret;
1112}
1113
1114int fib_sync_down_dev(struct net_device *dev, int force)
1115{
1116	int ret = 0;
1117	int scope = RT_SCOPE_NOWHERE;
1118	struct fib_info *prev_fi = NULL;
1119	unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1120	struct hlist_head *head = &fib_info_devhash[hash];
1121	struct fib_nh *nh;
1122
1123	if (force)
1124		scope = -1;
1125
1126	hlist_for_each_entry(nh, head, nh_hash) {
1127		struct fib_info *fi = nh->nh_parent;
1128		int dead;
1129
1130		BUG_ON(!fi->fib_nhs);
1131		if (nh->nh_dev != dev || fi == prev_fi)
1132			continue;
1133		prev_fi = fi;
1134		dead = 0;
1135		change_nexthops(fi) {
1136			if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1137				dead++;
1138			else if (nexthop_nh->nh_dev == dev &&
1139				 nexthop_nh->nh_scope != scope) {
1140				nexthop_nh->nh_flags |= RTNH_F_DEAD;
1141#ifdef CONFIG_IP_ROUTE_MULTIPATH
1142				spin_lock_bh(&fib_multipath_lock);
1143				fi->fib_power -= nexthop_nh->nh_power;
1144				nexthop_nh->nh_power = 0;
1145				spin_unlock_bh(&fib_multipath_lock);
1146#endif
1147				dead++;
1148			}
1149#ifdef CONFIG_IP_ROUTE_MULTIPATH
1150			if (force > 1 && nexthop_nh->nh_dev == dev) {
1151				dead = fi->fib_nhs;
1152				break;
1153			}
1154#endif
1155		} endfor_nexthops(fi)
1156		if (dead == fi->fib_nhs) {
1157			fi->fib_flags |= RTNH_F_DEAD;
1158			ret++;
1159		}
1160	}
1161
1162	return ret;
1163}
1164
1165/* Must be invoked inside of an RCU protected region.  */
1166void fib_select_default(struct fib_result *res)
1167{
1168	struct fib_info *fi = NULL, *last_resort = NULL;
1169	struct list_head *fa_head = res->fa_head;
1170	struct fib_table *tb = res->table;
1171	int order = -1, last_idx = -1;
1172	struct fib_alias *fa;
1173
1174	list_for_each_entry_rcu(fa, fa_head, fa_list) {
1175		struct fib_info *next_fi = fa->fa_info;
1176
1177		if (next_fi->fib_scope != res->scope ||
1178		    fa->fa_type != RTN_UNICAST)
1179			continue;
1180
1181		if (next_fi->fib_priority > res->fi->fib_priority)
1182			break;
1183		if (!next_fi->fib_nh[0].nh_gw ||
1184		    next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1185			continue;
1186
1187		fib_alias_accessed(fa);
1188
1189		if (fi == NULL) {
1190			if (next_fi != res->fi)
1191				break;
1192		} else if (!fib_detect_death(fi, order, &last_resort,
1193					     &last_idx, tb->tb_default)) {
1194			fib_result_assign(res, fi);
1195			tb->tb_default = order;
1196			goto out;
1197		}
1198		fi = next_fi;
1199		order++;
1200	}
1201
1202	if (order <= 0 || fi == NULL) {
1203		tb->tb_default = -1;
1204		goto out;
1205	}
1206
1207	if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1208				tb->tb_default)) {
1209		fib_result_assign(res, fi);
1210		tb->tb_default = order;
1211		goto out;
1212	}
1213
1214	if (last_idx >= 0)
1215		fib_result_assign(res, last_resort);
1216	tb->tb_default = last_idx;
1217out:
1218	return;
1219}
1220
1221#ifdef CONFIG_IP_ROUTE_MULTIPATH
1222
1223/*
1224 * Dead device goes up. We wake up dead nexthops.
1225 * It takes sense only on multipath routes.
1226 */
1227int fib_sync_up(struct net_device *dev)
1228{
1229	struct fib_info *prev_fi;
1230	unsigned int hash;
1231	struct hlist_head *head;
1232	struct fib_nh *nh;
1233	int ret;
1234
1235	if (!(dev->flags & IFF_UP))
1236		return 0;
1237
1238	prev_fi = NULL;
1239	hash = fib_devindex_hashfn(dev->ifindex);
1240	head = &fib_info_devhash[hash];
1241	ret = 0;
1242
1243	hlist_for_each_entry(nh, head, nh_hash) {
1244		struct fib_info *fi = nh->nh_parent;
1245		int alive;
1246
1247		BUG_ON(!fi->fib_nhs);
1248		if (nh->nh_dev != dev || fi == prev_fi)
1249			continue;
1250
1251		prev_fi = fi;
1252		alive = 0;
1253		change_nexthops(fi) {
1254			if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1255				alive++;
1256				continue;
1257			}
1258			if (nexthop_nh->nh_dev == NULL ||
1259			    !(nexthop_nh->nh_dev->flags & IFF_UP))
1260				continue;
1261			if (nexthop_nh->nh_dev != dev ||
1262			    !__in_dev_get_rtnl(dev))
1263				continue;
1264			alive++;
1265			spin_lock_bh(&fib_multipath_lock);
1266			nexthop_nh->nh_power = 0;
1267			nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1268			spin_unlock_bh(&fib_multipath_lock);
1269		} endfor_nexthops(fi)
1270
1271		if (alive > 0) {
1272			fi->fib_flags &= ~RTNH_F_DEAD;
1273			ret++;
1274		}
1275	}
1276
1277	return ret;
1278}
1279
1280/*
1281 * The algorithm is suboptimal, but it provides really
1282 * fair weighted route distribution.
1283 */
1284void fib_select_multipath(struct fib_result *res)
1285{
1286	struct fib_info *fi = res->fi;
1287	int w;
1288
1289	spin_lock_bh(&fib_multipath_lock);
1290	if (fi->fib_power <= 0) {
1291		int power = 0;
1292		change_nexthops(fi) {
1293			if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1294				power += nexthop_nh->nh_weight;
1295				nexthop_nh->nh_power = nexthop_nh->nh_weight;
1296			}
1297		} endfor_nexthops(fi);
1298		fi->fib_power = power;
1299		if (power <= 0) {
1300			spin_unlock_bh(&fib_multipath_lock);
1301			/* Race condition: route has just become dead. */
1302			res->nh_sel = 0;
1303			return;
1304		}
1305	}
1306
1307
1308	/* w should be random number [0..fi->fib_power-1],
1309	 * it is pretty bad approximation.
1310	 */
1311
1312	w = jiffies % fi->fib_power;
1313
1314	change_nexthops(fi) {
1315		if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1316		    nexthop_nh->nh_power) {
1317			w -= nexthop_nh->nh_power;
1318			if (w <= 0) {
1319				nexthop_nh->nh_power--;
1320				fi->fib_power--;
1321				res->nh_sel = nhsel;
1322				spin_unlock_bh(&fib_multipath_lock);
1323				return;
1324			}
1325		}
1326	} endfor_nexthops(fi);
1327
1328	/* Race condition: route has just become dead. */
1329	res->nh_sel = 0;
1330	spin_unlock_bh(&fib_multipath_lock);
1331}
1332#endif
1333