1/*
2 * net/core/fib_rules.c		Generic Routing Rules
3 *
4 *	This program is free software; you can redistribute it and/or
5 *	modify it under the terms of the GNU General Public License as
6 *	published by the Free Software Foundation, version 2.
7 *
8 * Authors:	Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <net/net_namespace.h>
17#include <net/sock.h>
18#include <net/fib_rules.h>
19
20int fib_default_rule_add(struct fib_rules_ops *ops,
21			 u32 pref, u32 table, u32 flags)
22{
23	struct fib_rule *r;
24
25	r = kzalloc(ops->rule_size, GFP_KERNEL);
26	if (r == NULL)
27		return -ENOMEM;
28
29	atomic_set(&r->refcnt, 1);
30	r->action = FR_ACT_TO_TBL;
31	r->pref = pref;
32	r->table = table;
33	r->flags = flags;
34	r->uid_start = INVALID_UID;
35	r->uid_end = INVALID_UID;
36	r->fr_net = hold_net(ops->fro_net);
37
38	r->suppress_prefixlen = -1;
39	r->suppress_ifgroup = -1;
40
41	/* The lock is not required here, the list in unreacheable
42	 * at the moment this function is called */
43	list_add_tail(&r->list, &ops->rules_list);
44	return 0;
45}
46EXPORT_SYMBOL(fib_default_rule_add);
47
48u32 fib_default_rule_pref(struct fib_rules_ops *ops)
49{
50	struct list_head *pos;
51	struct fib_rule *rule;
52
53	if (!list_empty(&ops->rules_list)) {
54		pos = ops->rules_list.next;
55		if (pos->next != &ops->rules_list) {
56			rule = list_entry(pos->next, struct fib_rule, list);
57			if (rule->pref)
58				return rule->pref - 1;
59		}
60	}
61
62	return 0;
63}
64EXPORT_SYMBOL(fib_default_rule_pref);
65
66static void notify_rule_change(int event, struct fib_rule *rule,
67			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
68			       u32 pid);
69
70static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
71{
72	struct fib_rules_ops *ops;
73
74	rcu_read_lock();
75	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
76		if (ops->family == family) {
77			if (!try_module_get(ops->owner))
78				ops = NULL;
79			rcu_read_unlock();
80			return ops;
81		}
82	}
83	rcu_read_unlock();
84
85	return NULL;
86}
87
88static void rules_ops_put(struct fib_rules_ops *ops)
89{
90	if (ops)
91		module_put(ops->owner);
92}
93
94static void flush_route_cache(struct fib_rules_ops *ops)
95{
96	if (ops->flush_cache)
97		ops->flush_cache(ops);
98}
99
100static int __fib_rules_register(struct fib_rules_ops *ops)
101{
102	int err = -EEXIST;
103	struct fib_rules_ops *o;
104	struct net *net;
105
106	net = ops->fro_net;
107
108	if (ops->rule_size < sizeof(struct fib_rule))
109		return -EINVAL;
110
111	if (ops->match == NULL || ops->configure == NULL ||
112	    ops->compare == NULL || ops->fill == NULL ||
113	    ops->action == NULL)
114		return -EINVAL;
115
116	spin_lock(&net->rules_mod_lock);
117	list_for_each_entry(o, &net->rules_ops, list)
118		if (ops->family == o->family)
119			goto errout;
120
121	hold_net(net);
122	list_add_tail_rcu(&ops->list, &net->rules_ops);
123	err = 0;
124errout:
125	spin_unlock(&net->rules_mod_lock);
126
127	return err;
128}
129
130struct fib_rules_ops *
131fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
132{
133	struct fib_rules_ops *ops;
134	int err;
135
136	ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
137	if (ops == NULL)
138		return ERR_PTR(-ENOMEM);
139
140	INIT_LIST_HEAD(&ops->rules_list);
141	ops->fro_net = net;
142
143	err = __fib_rules_register(ops);
144	if (err) {
145		kfree(ops);
146		ops = ERR_PTR(err);
147	}
148
149	return ops;
150}
151EXPORT_SYMBOL_GPL(fib_rules_register);
152
153static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
154{
155	struct fib_rule *rule, *tmp;
156
157	list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
158		list_del_rcu(&rule->list);
159		if (ops->delete)
160			ops->delete(rule);
161		fib_rule_put(rule);
162	}
163}
164
165static void fib_rules_put_rcu(struct rcu_head *head)
166{
167	struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
168	struct net *net = ops->fro_net;
169
170	release_net(net);
171	kfree(ops);
172}
173
174void fib_rules_unregister(struct fib_rules_ops *ops)
175{
176	struct net *net = ops->fro_net;
177
178	spin_lock(&net->rules_mod_lock);
179	list_del_rcu(&ops->list);
180	fib_rules_cleanup_ops(ops);
181	spin_unlock(&net->rules_mod_lock);
182
183	call_rcu(&ops->rcu, fib_rules_put_rcu);
184}
185EXPORT_SYMBOL_GPL(fib_rules_unregister);
186
187static inline kuid_t fib_nl_uid(struct nlattr *nla)
188{
189	return make_kuid(current_user_ns(), nla_get_u32(nla));
190}
191
192static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
193{
194	return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
195}
196
197static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
198{
199	return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
200	       (uid_gte(fl->flowi_uid, rule->uid_start) &&
201		uid_lte(fl->flowi_uid, rule->uid_end));
202}
203
204static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
205			  struct flowi *fl, int flags)
206{
207	int ret = 0;
208
209	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
210		goto out;
211
212	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
213		goto out;
214
215	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
216		goto out;
217
218	if (!fib_uid_range_match(fl, rule))
219		goto out;
220
221	ret = ops->match(rule, fl, flags);
222out:
223	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
224}
225
226int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
227		     int flags, struct fib_lookup_arg *arg)
228{
229	struct fib_rule *rule;
230	int err;
231
232	rcu_read_lock();
233
234	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
235jumped:
236		if (!fib_rule_match(rule, ops, fl, flags))
237			continue;
238
239		if (rule->action == FR_ACT_GOTO) {
240			struct fib_rule *target;
241
242			target = rcu_dereference(rule->ctarget);
243			if (target == NULL) {
244				continue;
245			} else {
246				rule = target;
247				goto jumped;
248			}
249		} else if (rule->action == FR_ACT_NOP)
250			continue;
251		else
252			err = ops->action(rule, fl, flags, arg);
253
254		if (!err && ops->suppress && ops->suppress(rule, arg))
255			continue;
256
257		if (err != -EAGAIN) {
258			if ((arg->flags & FIB_LOOKUP_NOREF) ||
259			    likely(atomic_inc_not_zero(&rule->refcnt))) {
260				arg->rule = rule;
261				goto out;
262			}
263			break;
264		}
265	}
266
267	err = -ESRCH;
268out:
269	rcu_read_unlock();
270
271	return err;
272}
273EXPORT_SYMBOL_GPL(fib_rules_lookup);
274
275static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
276			    struct fib_rules_ops *ops)
277{
278	int err = -EINVAL;
279
280	if (frh->src_len)
281		if (tb[FRA_SRC] == NULL ||
282		    frh->src_len > (ops->addr_size * 8) ||
283		    nla_len(tb[FRA_SRC]) != ops->addr_size)
284			goto errout;
285
286	if (frh->dst_len)
287		if (tb[FRA_DST] == NULL ||
288		    frh->dst_len > (ops->addr_size * 8) ||
289		    nla_len(tb[FRA_DST]) != ops->addr_size)
290			goto errout;
291
292	err = 0;
293errout:
294	return err;
295}
296
297static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
298{
299	struct net *net = sock_net(skb->sk);
300	struct fib_rule_hdr *frh = nlmsg_data(nlh);
301	struct fib_rules_ops *ops = NULL;
302	struct fib_rule *rule, *r, *last = NULL;
303	struct nlattr *tb[FRA_MAX+1];
304	int err = -EINVAL, unresolved = 0;
305
306	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
307		goto errout;
308
309	ops = lookup_rules_ops(net, frh->family);
310	if (ops == NULL) {
311		err = -EAFNOSUPPORT;
312		goto errout;
313	}
314
315	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
316	if (err < 0)
317		goto errout;
318
319	err = validate_rulemsg(frh, tb, ops);
320	if (err < 0)
321		goto errout;
322
323	rule = kzalloc(ops->rule_size, GFP_KERNEL);
324	if (rule == NULL) {
325		err = -ENOMEM;
326		goto errout;
327	}
328	rule->fr_net = hold_net(net);
329
330	if (tb[FRA_PRIORITY])
331		rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
332
333	if (tb[FRA_IIFNAME]) {
334		struct net_device *dev;
335
336		rule->iifindex = -1;
337		nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
338		dev = __dev_get_by_name(net, rule->iifname);
339		if (dev)
340			rule->iifindex = dev->ifindex;
341	}
342
343	if (tb[FRA_OIFNAME]) {
344		struct net_device *dev;
345
346		rule->oifindex = -1;
347		nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
348		dev = __dev_get_by_name(net, rule->oifname);
349		if (dev)
350			rule->oifindex = dev->ifindex;
351	}
352
353	if (tb[FRA_FWMARK]) {
354		rule->mark = nla_get_u32(tb[FRA_FWMARK]);
355		if (rule->mark)
356			/* compatibility: if the mark value is non-zero all bits
357			 * are compared unless a mask is explicitly specified.
358			 */
359			rule->mark_mask = 0xFFFFFFFF;
360	}
361
362	if (tb[FRA_FWMASK])
363		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
364
365	rule->action = frh->action;
366	rule->flags = frh->flags;
367	rule->table = frh_get_table(frh, tb);
368	if (tb[FRA_SUPPRESS_PREFIXLEN])
369		rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
370	else
371		rule->suppress_prefixlen = -1;
372
373	if (tb[FRA_SUPPRESS_IFGROUP])
374		rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
375	else
376		rule->suppress_ifgroup = -1;
377
378	if (!tb[FRA_PRIORITY] && ops->default_pref)
379		rule->pref = ops->default_pref(ops);
380
381	err = -EINVAL;
382	if (tb[FRA_GOTO]) {
383		if (rule->action != FR_ACT_GOTO)
384			goto errout_free;
385
386		rule->target = nla_get_u32(tb[FRA_GOTO]);
387		/* Backward jumps are prohibited to avoid endless loops */
388		if (rule->target <= rule->pref)
389			goto errout_free;
390
391		list_for_each_entry(r, &ops->rules_list, list) {
392			if (r->pref == rule->target) {
393				RCU_INIT_POINTER(rule->ctarget, r);
394				break;
395			}
396		}
397
398		if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
399			unresolved = 1;
400	} else if (rule->action == FR_ACT_GOTO)
401		goto errout_free;
402
403	/* UID start and end must either both be valid or both unspecified. */
404	rule->uid_start = rule->uid_end = INVALID_UID;
405	if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
406		if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
407			rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
408			rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
409		}
410		if (!uid_valid(rule->uid_start) ||
411		    !uid_valid(rule->uid_end) ||
412		    !uid_lte(rule->uid_start, rule->uid_end))
413		goto errout_free;
414	}
415
416	err = ops->configure(rule, skb, frh, tb);
417	if (err < 0)
418		goto errout_free;
419
420	list_for_each_entry(r, &ops->rules_list, list) {
421		if (r->pref > rule->pref)
422			break;
423		last = r;
424	}
425
426	fib_rule_get(rule);
427
428	if (last)
429		list_add_rcu(&rule->list, &last->list);
430	else
431		list_add_rcu(&rule->list, &ops->rules_list);
432
433	if (ops->unresolved_rules) {
434		/*
435		 * There are unresolved goto rules in the list, check if
436		 * any of them are pointing to this new rule.
437		 */
438		list_for_each_entry(r, &ops->rules_list, list) {
439			if (r->action == FR_ACT_GOTO &&
440			    r->target == rule->pref &&
441			    rtnl_dereference(r->ctarget) == NULL) {
442				rcu_assign_pointer(r->ctarget, rule);
443				if (--ops->unresolved_rules == 0)
444					break;
445			}
446		}
447	}
448
449	if (rule->action == FR_ACT_GOTO)
450		ops->nr_goto_rules++;
451
452	if (unresolved)
453		ops->unresolved_rules++;
454
455	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
456	flush_route_cache(ops);
457	rules_ops_put(ops);
458	return 0;
459
460errout_free:
461	release_net(rule->fr_net);
462	kfree(rule);
463errout:
464	rules_ops_put(ops);
465	return err;
466}
467
468static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
469{
470	struct net *net = sock_net(skb->sk);
471	struct fib_rule_hdr *frh = nlmsg_data(nlh);
472	struct fib_rules_ops *ops = NULL;
473	struct fib_rule *rule, *tmp;
474	struct nlattr *tb[FRA_MAX+1];
475	int err = -EINVAL;
476
477	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
478		goto errout;
479
480	ops = lookup_rules_ops(net, frh->family);
481	if (ops == NULL) {
482		err = -EAFNOSUPPORT;
483		goto errout;
484	}
485
486	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
487	if (err < 0)
488		goto errout;
489
490	err = validate_rulemsg(frh, tb, ops);
491	if (err < 0)
492		goto errout;
493
494	list_for_each_entry(rule, &ops->rules_list, list) {
495		if (frh->action && (frh->action != rule->action))
496			continue;
497
498		if (frh_get_table(frh, tb) &&
499		    (frh_get_table(frh, tb) != rule->table))
500			continue;
501
502		if (tb[FRA_PRIORITY] &&
503		    (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
504			continue;
505
506		if (tb[FRA_IIFNAME] &&
507		    nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
508			continue;
509
510		if (tb[FRA_OIFNAME] &&
511		    nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
512			continue;
513
514		if (tb[FRA_FWMARK] &&
515		    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
516			continue;
517
518		if (tb[FRA_FWMASK] &&
519		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
520			continue;
521
522		if (tb[FRA_UID_START] &&
523		    !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
524			continue;
525
526		if (tb[FRA_UID_END] &&
527		    !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
528			continue;
529
530		if (!ops->compare(rule, frh, tb))
531			continue;
532
533		if (rule->flags & FIB_RULE_PERMANENT) {
534			err = -EPERM;
535			goto errout;
536		}
537
538		list_del_rcu(&rule->list);
539
540		if (rule->action == FR_ACT_GOTO) {
541			ops->nr_goto_rules--;
542			if (rtnl_dereference(rule->ctarget) == NULL)
543				ops->unresolved_rules--;
544		}
545
546		/*
547		 * Check if this rule is a target to any of them. If so,
548		 * disable them. As this operation is eventually very
549		 * expensive, it is only performed if goto rules have
550		 * actually been added.
551		 */
552		if (ops->nr_goto_rules > 0) {
553			list_for_each_entry(tmp, &ops->rules_list, list) {
554				if (rtnl_dereference(tmp->ctarget) == rule) {
555					RCU_INIT_POINTER(tmp->ctarget, NULL);
556					ops->unresolved_rules++;
557				}
558			}
559		}
560
561		notify_rule_change(RTM_DELRULE, rule, ops, nlh,
562				   NETLINK_CB(skb).portid);
563		if (ops->delete)
564			ops->delete(rule);
565		fib_rule_put(rule);
566		flush_route_cache(ops);
567		rules_ops_put(ops);
568		return 0;
569	}
570
571	err = -ENOENT;
572errout:
573	rules_ops_put(ops);
574	return err;
575}
576
577static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
578					 struct fib_rule *rule)
579{
580	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
581			 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
582			 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
583			 + nla_total_size(4) /* FRA_PRIORITY */
584			 + nla_total_size(4) /* FRA_TABLE */
585			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
586			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
587			 + nla_total_size(4) /* FRA_FWMARK */
588			 + nla_total_size(4) /* FRA_FWMASK */
589			 + nla_total_size(4) /* FRA_UID_START */
590			 + nla_total_size(4); /* FRA_UID_END */
591
592	if (ops->nlmsg_payload)
593		payload += ops->nlmsg_payload(rule);
594
595	return payload;
596}
597
598static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
599			    u32 pid, u32 seq, int type, int flags,
600			    struct fib_rules_ops *ops)
601{
602	struct nlmsghdr *nlh;
603	struct fib_rule_hdr *frh;
604
605	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
606	if (nlh == NULL)
607		return -EMSGSIZE;
608
609	frh = nlmsg_data(nlh);
610	frh->family = ops->family;
611	frh->table = rule->table;
612	if (nla_put_u32(skb, FRA_TABLE, rule->table))
613		goto nla_put_failure;
614	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
615		goto nla_put_failure;
616	frh->res1 = 0;
617	frh->res2 = 0;
618	frh->action = rule->action;
619	frh->flags = rule->flags;
620
621	if (rule->action == FR_ACT_GOTO &&
622	    rcu_access_pointer(rule->ctarget) == NULL)
623		frh->flags |= FIB_RULE_UNRESOLVED;
624
625	if (rule->iifname[0]) {
626		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
627			goto nla_put_failure;
628		if (rule->iifindex == -1)
629			frh->flags |= FIB_RULE_IIF_DETACHED;
630	}
631
632	if (rule->oifname[0]) {
633		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
634			goto nla_put_failure;
635		if (rule->oifindex == -1)
636			frh->flags |= FIB_RULE_OIF_DETACHED;
637	}
638
639	if ((rule->pref &&
640	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
641	    (rule->mark &&
642	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
643	    ((rule->mark_mask || rule->mark) &&
644	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
645	    (rule->target &&
646	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
647	    (uid_valid(rule->uid_start) &&
648	     nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
649	    (uid_valid(rule->uid_end) &&
650	     nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
651		goto nla_put_failure;
652
653	if (rule->suppress_ifgroup != -1) {
654		if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
655			goto nla_put_failure;
656	}
657
658	if (ops->fill(rule, skb, frh) < 0)
659		goto nla_put_failure;
660
661	return nlmsg_end(skb, nlh);
662
663nla_put_failure:
664	nlmsg_cancel(skb, nlh);
665	return -EMSGSIZE;
666}
667
668static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
669		      struct fib_rules_ops *ops)
670{
671	int idx = 0;
672	struct fib_rule *rule;
673
674	rcu_read_lock();
675	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
676		if (idx < cb->args[1])
677			goto skip;
678
679		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
680				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
681				     NLM_F_MULTI, ops) < 0)
682			break;
683skip:
684		idx++;
685	}
686	rcu_read_unlock();
687	cb->args[1] = idx;
688	rules_ops_put(ops);
689
690	return skb->len;
691}
692
693static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
694{
695	struct net *net = sock_net(skb->sk);
696	struct fib_rules_ops *ops;
697	int idx = 0, family;
698
699	family = rtnl_msg_family(cb->nlh);
700	if (family != AF_UNSPEC) {
701		/* Protocol specific dump request */
702		ops = lookup_rules_ops(net, family);
703		if (ops == NULL)
704			return -EAFNOSUPPORT;
705
706		return dump_rules(skb, cb, ops);
707	}
708
709	rcu_read_lock();
710	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
711		if (idx < cb->args[0] || !try_module_get(ops->owner))
712			goto skip;
713
714		if (dump_rules(skb, cb, ops) < 0)
715			break;
716
717		cb->args[1] = 0;
718skip:
719		idx++;
720	}
721	rcu_read_unlock();
722	cb->args[0] = idx;
723
724	return skb->len;
725}
726
727static void notify_rule_change(int event, struct fib_rule *rule,
728			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
729			       u32 pid)
730{
731	struct net *net;
732	struct sk_buff *skb;
733	int err = -ENOBUFS;
734
735	net = ops->fro_net;
736	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
737	if (skb == NULL)
738		goto errout;
739
740	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
741	if (err < 0) {
742		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
743		WARN_ON(err == -EMSGSIZE);
744		kfree_skb(skb);
745		goto errout;
746	}
747
748	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
749	return;
750errout:
751	if (err < 0)
752		rtnl_set_sk_err(net, ops->nlgroup, err);
753}
754
755static void attach_rules(struct list_head *rules, struct net_device *dev)
756{
757	struct fib_rule *rule;
758
759	list_for_each_entry(rule, rules, list) {
760		if (rule->iifindex == -1 &&
761		    strcmp(dev->name, rule->iifname) == 0)
762			rule->iifindex = dev->ifindex;
763		if (rule->oifindex == -1 &&
764		    strcmp(dev->name, rule->oifname) == 0)
765			rule->oifindex = dev->ifindex;
766	}
767}
768
769static void detach_rules(struct list_head *rules, struct net_device *dev)
770{
771	struct fib_rule *rule;
772
773	list_for_each_entry(rule, rules, list) {
774		if (rule->iifindex == dev->ifindex)
775			rule->iifindex = -1;
776		if (rule->oifindex == dev->ifindex)
777			rule->oifindex = -1;
778	}
779}
780
781
782static int fib_rules_event(struct notifier_block *this, unsigned long event,
783			   void *ptr)
784{
785	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
786	struct net *net = dev_net(dev);
787	struct fib_rules_ops *ops;
788
789	ASSERT_RTNL();
790
791	switch (event) {
792	case NETDEV_REGISTER:
793		list_for_each_entry(ops, &net->rules_ops, list)
794			attach_rules(&ops->rules_list, dev);
795		break;
796
797	case NETDEV_CHANGENAME:
798		list_for_each_entry(ops, &net->rules_ops, list) {
799			detach_rules(&ops->rules_list, dev);
800			attach_rules(&ops->rules_list, dev);
801		}
802		break;
803
804	case NETDEV_UNREGISTER:
805		list_for_each_entry(ops, &net->rules_ops, list)
806			detach_rules(&ops->rules_list, dev);
807		break;
808	}
809
810	return NOTIFY_DONE;
811}
812
813static struct notifier_block fib_rules_notifier = {
814	.notifier_call = fib_rules_event,
815};
816
817static int __net_init fib_rules_net_init(struct net *net)
818{
819	INIT_LIST_HEAD(&net->rules_ops);
820	spin_lock_init(&net->rules_mod_lock);
821	return 0;
822}
823
824static struct pernet_operations fib_rules_net_ops = {
825	.init = fib_rules_net_init,
826};
827
828static int __init fib_rules_init(void)
829{
830	int err;
831	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
832	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
833	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
834
835	err = register_pernet_subsys(&fib_rules_net_ops);
836	if (err < 0)
837		goto fail;
838
839	err = register_netdevice_notifier(&fib_rules_notifier);
840	if (err < 0)
841		goto fail_unregister;
842
843	return 0;
844
845fail_unregister:
846	unregister_pernet_subsys(&fib_rules_net_ops);
847fail:
848	rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
849	rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
850	rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
851	return err;
852}
853
854subsys_initcall(fib_rules_init);
855