2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/fib_rules.h>
16 static LIST_HEAD(rules_ops);
17 static DEFINE_SPINLOCK(rules_mod_lock);
19 static void notify_rule_change(int event, struct fib_rule *rule,
20 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
23 static struct fib_rules_ops *lookup_rules_ops(int family)
25 struct fib_rules_ops *ops;
28 list_for_each_entry_rcu(ops, &rules_ops, list) {
29 if (ops->family == family) {
30 if (!try_module_get(ops->owner))
41 static void rules_ops_put(struct fib_rules_ops *ops)
44 module_put(ops->owner);
47 int fib_rules_register(struct fib_rules_ops *ops)
50 struct fib_rules_ops *o;
52 if (ops->rule_size < sizeof(struct fib_rule))
55 if (ops->match == NULL || ops->configure == NULL ||
56 ops->compare == NULL || ops->fill == NULL ||
60 spin_lock(&rules_mod_lock);
61 list_for_each_entry(o, &rules_ops, list)
62 if (ops->family == o->family)
65 list_add_tail_rcu(&ops->list, &rules_ops);
68 spin_unlock(&rules_mod_lock);
73 EXPORT_SYMBOL_GPL(fib_rules_register);
75 static void cleanup_ops(struct fib_rules_ops *ops)
77 struct fib_rule *rule, *tmp;
79 list_for_each_entry_safe(rule, tmp, ops->rules_list, list) {
80 list_del_rcu(&rule->list);
85 int fib_rules_unregister(struct fib_rules_ops *ops)
88 struct fib_rules_ops *o;
90 spin_lock(&rules_mod_lock);
91 list_for_each_entry(o, &rules_ops, list) {
93 list_del_rcu(&o->list);
101 spin_unlock(&rules_mod_lock);
108 EXPORT_SYMBOL_GPL(fib_rules_unregister);
110 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
111 struct flowi *fl, int flags)
115 if (rule->ifindex && (rule->ifindex != fl->iif))
118 if ((rule->mark ^ fl->mark) & rule->mark_mask)
121 ret = ops->match(rule, fl, flags);
123 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
126 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
127 int flags, struct fib_lookup_arg *arg)
129 struct fib_rule *rule;
134 list_for_each_entry_rcu(rule, ops->rules_list, list) {
136 if (!fib_rule_match(rule, ops, fl, flags))
139 if (rule->action == FR_ACT_GOTO) {
140 struct fib_rule *target;
142 target = rcu_dereference(rule->ctarget);
143 if (target == NULL) {
149 } else if (rule->action == FR_ACT_NOP)
152 err = ops->action(rule, fl, flags, arg);
154 if (err != -EAGAIN) {
168 EXPORT_SYMBOL_GPL(fib_rules_lookup);
170 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
171 struct fib_rules_ops *ops)
176 if (tb[FRA_SRC] == NULL ||
177 frh->src_len > (ops->addr_size * 8) ||
178 nla_len(tb[FRA_SRC]) != ops->addr_size)
182 if (tb[FRA_DST] == NULL ||
183 frh->dst_len > (ops->addr_size * 8) ||
184 nla_len(tb[FRA_DST]) != ops->addr_size)
192 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
194 struct fib_rule_hdr *frh = nlmsg_data(nlh);
195 struct fib_rules_ops *ops = NULL;
196 struct fib_rule *rule, *r, *last = NULL;
197 struct nlattr *tb[FRA_MAX+1];
198 int err = -EINVAL, unresolved = 0;
200 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
203 ops = lookup_rules_ops(frh->family);
209 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
213 err = validate_rulemsg(frh, tb, ops);
217 rule = kzalloc(ops->rule_size, GFP_KERNEL);
223 if (tb[FRA_PRIORITY])
224 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
226 if (tb[FRA_IFNAME]) {
227 struct net_device *dev;
230 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
231 dev = __dev_get_by_name(rule->ifname);
233 rule->ifindex = dev->ifindex;
236 if (tb[FRA_FWMARK]) {
237 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
239 /* compatibility: if the mark value is non-zero all bits
240 * are compared unless a mask is explicitly specified.
242 rule->mark_mask = 0xFFFFFFFF;
246 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
248 rule->action = frh->action;
249 rule->flags = frh->flags;
250 rule->table = frh_get_table(frh, tb);
252 if (!rule->pref && ops->default_pref)
253 rule->pref = ops->default_pref();
257 if (rule->action != FR_ACT_GOTO)
260 rule->target = nla_get_u32(tb[FRA_GOTO]);
261 /* Backward jumps are prohibited to avoid endless loops */
262 if (rule->target <= rule->pref)
265 list_for_each_entry(r, ops->rules_list, list) {
266 if (r->pref == rule->target) {
272 if (rule->ctarget == NULL)
274 } else if (rule->action == FR_ACT_GOTO)
277 err = ops->configure(rule, skb, nlh, frh, tb);
281 list_for_each_entry(r, ops->rules_list, list) {
282 if (r->pref > rule->pref)
289 if (ops->unresolved_rules) {
291 * There are unresolved goto rules in the list, check if
292 * any of them are pointing to this new rule.
294 list_for_each_entry(r, ops->rules_list, list) {
295 if (r->action == FR_ACT_GOTO &&
296 r->target == rule->pref) {
297 BUG_ON(r->ctarget != NULL);
298 rcu_assign_pointer(r->ctarget, rule);
299 if (--ops->unresolved_rules == 0)
305 if (rule->action == FR_ACT_GOTO)
306 ops->nr_goto_rules++;
309 ops->unresolved_rules++;
312 list_add_rcu(&rule->list, &last->list);
314 list_add_rcu(&rule->list, ops->rules_list);
316 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
327 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
329 struct fib_rule_hdr *frh = nlmsg_data(nlh);
330 struct fib_rules_ops *ops = NULL;
331 struct fib_rule *rule, *tmp;
332 struct nlattr *tb[FRA_MAX+1];
335 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
338 ops = lookup_rules_ops(frh->family);
344 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
348 err = validate_rulemsg(frh, tb, ops);
352 list_for_each_entry(rule, ops->rules_list, list) {
353 if (frh->action && (frh->action != rule->action))
356 if (frh->table && (frh_get_table(frh, tb) != rule->table))
359 if (tb[FRA_PRIORITY] &&
360 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
363 if (tb[FRA_IFNAME] &&
364 nla_strcmp(tb[FRA_IFNAME], rule->ifname))
367 if (tb[FRA_FWMARK] &&
368 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
371 if (tb[FRA_FWMASK] &&
372 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
375 if (!ops->compare(rule, frh, tb))
378 if (rule->flags & FIB_RULE_PERMANENT) {
383 list_del_rcu(&rule->list);
385 if (rule->action == FR_ACT_GOTO)
386 ops->nr_goto_rules--;
389 * Check if this rule is a target to any of them. If so,
390 * disable them. As this operation is eventually very
391 * expensive, it is only performed if goto rules have
392 * actually been added.
394 if (ops->nr_goto_rules > 0) {
395 list_for_each_entry(tmp, ops->rules_list, list) {
396 if (tmp->ctarget == rule) {
397 rcu_assign_pointer(tmp->ctarget, NULL);
398 ops->unresolved_rules++;
404 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
405 NETLINK_CB(skb).pid);
417 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
418 struct fib_rule *rule)
420 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
421 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
422 + nla_total_size(4) /* FRA_PRIORITY */
423 + nla_total_size(4) /* FRA_TABLE */
424 + nla_total_size(4) /* FRA_FWMARK */
425 + nla_total_size(4); /* FRA_FWMASK */
427 if (ops->nlmsg_payload)
428 payload += ops->nlmsg_payload(rule);
433 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
434 u32 pid, u32 seq, int type, int flags,
435 struct fib_rules_ops *ops)
437 struct nlmsghdr *nlh;
438 struct fib_rule_hdr *frh;
440 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
444 frh = nlmsg_data(nlh);
445 frh->table = rule->table;
446 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
449 frh->action = rule->action;
450 frh->flags = rule->flags;
452 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
453 frh->flags |= FIB_RULE_UNRESOLVED;
455 if (rule->ifname[0]) {
456 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
458 if (rule->ifindex == -1)
459 frh->flags |= FIB_RULE_DEV_DETACHED;
463 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
466 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
468 if (rule->mark_mask || rule->mark)
469 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
472 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
474 if (ops->fill(rule, skb, nlh, frh) < 0)
475 goto nla_put_failure;
477 return nlmsg_end(skb, nlh);
480 nlmsg_cancel(skb, nlh);
484 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
485 struct fib_rules_ops *ops)
488 struct fib_rule *rule;
491 list_for_each_entry_rcu(rule, ops->rules_list, list) {
492 if (idx < cb->args[1])
495 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
496 cb->nlh->nlmsg_seq, RTM_NEWRULE,
497 NLM_F_MULTI, ops) < 0)
509 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
511 struct fib_rules_ops *ops;
514 family = rtnl_msg_family(cb->nlh);
515 if (family != AF_UNSPEC) {
516 /* Protocol specific dump request */
517 ops = lookup_rules_ops(family);
519 return -EAFNOSUPPORT;
521 return dump_rules(skb, cb, ops);
525 list_for_each_entry_rcu(ops, &rules_ops, list) {
526 if (idx < cb->args[0] || !try_module_get(ops->owner))
529 if (dump_rules(skb, cb, ops) < 0)
542 static void notify_rule_change(int event, struct fib_rule *rule,
543 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
549 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
553 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
555 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
556 WARN_ON(err == -EMSGSIZE);
560 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
563 rtnl_set_sk_err(ops->nlgroup, err);
566 static void attach_rules(struct list_head *rules, struct net_device *dev)
568 struct fib_rule *rule;
570 list_for_each_entry(rule, rules, list) {
571 if (rule->ifindex == -1 &&
572 strcmp(dev->name, rule->ifname) == 0)
573 rule->ifindex = dev->ifindex;
577 static void detach_rules(struct list_head *rules, struct net_device *dev)
579 struct fib_rule *rule;
581 list_for_each_entry(rule, rules, list)
582 if (rule->ifindex == dev->ifindex)
587 static int fib_rules_event(struct notifier_block *this, unsigned long event,
590 struct net_device *dev = ptr;
591 struct fib_rules_ops *ops;
597 case NETDEV_REGISTER:
598 list_for_each_entry(ops, &rules_ops, list)
599 attach_rules(ops->rules_list, dev);
602 case NETDEV_UNREGISTER:
603 list_for_each_entry(ops, &rules_ops, list)
604 detach_rules(ops->rules_list, dev);
613 static struct notifier_block fib_rules_notifier = {
614 .notifier_call = fib_rules_event,
617 static int __init fib_rules_init(void)
619 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
620 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
621 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
623 return register_netdevice_notifier(&fib_rules_notifier);
626 subsys_initcall(fib_rules_init);