[PATCH] i386: make fault notifier unconditional and export it
[linux-2.6] / net / core / fib_rules.c
1 /*
2  * net/core/fib_rules.c         Generic Routing Rules
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License as
6  *      published by the Free Software Foundation, version 2.
7  *
8  * Authors:     Thomas Graf <tgraf@suug.ch>
9  */
10
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <net/fib_rules.h>
16
17 static LIST_HEAD(rules_ops);
18 static DEFINE_SPINLOCK(rules_mod_lock);
19
20 static void notify_rule_change(int event, struct fib_rule *rule,
21                                struct fib_rules_ops *ops, struct nlmsghdr *nlh,
22                                u32 pid);
23
24 static struct fib_rules_ops *lookup_rules_ops(int family)
25 {
26         struct fib_rules_ops *ops;
27
28         rcu_read_lock();
29         list_for_each_entry_rcu(ops, &rules_ops, list) {
30                 if (ops->family == family) {
31                         if (!try_module_get(ops->owner))
32                                 ops = NULL;
33                         rcu_read_unlock();
34                         return ops;
35                 }
36         }
37         rcu_read_unlock();
38
39         return NULL;
40 }
41
42 static void rules_ops_put(struct fib_rules_ops *ops)
43 {
44         if (ops)
45                 module_put(ops->owner);
46 }
47
48 int fib_rules_register(struct fib_rules_ops *ops)
49 {
50         int err = -EEXIST;
51         struct fib_rules_ops *o;
52
53         if (ops->rule_size < sizeof(struct fib_rule))
54                 return -EINVAL;
55
56         if (ops->match == NULL || ops->configure == NULL ||
57             ops->compare == NULL || ops->fill == NULL ||
58             ops->action == NULL)
59                 return -EINVAL;
60
61         spin_lock(&rules_mod_lock);
62         list_for_each_entry(o, &rules_ops, list)
63                 if (ops->family == o->family)
64                         goto errout;
65
66         list_add_tail_rcu(&ops->list, &rules_ops);
67         err = 0;
68 errout:
69         spin_unlock(&rules_mod_lock);
70
71         return err;
72 }
73
74 EXPORT_SYMBOL_GPL(fib_rules_register);
75
76 static void cleanup_ops(struct fib_rules_ops *ops)
77 {
78         struct fib_rule *rule, *tmp;
79
80         list_for_each_entry_safe(rule, tmp, ops->rules_list, list) {
81                 list_del_rcu(&rule->list);
82                 fib_rule_put(rule);
83         }
84 }
85
86 int fib_rules_unregister(struct fib_rules_ops *ops)
87 {
88         int err = 0;
89         struct fib_rules_ops *o;
90
91         spin_lock(&rules_mod_lock);
92         list_for_each_entry(o, &rules_ops, list) {
93                 if (o == ops) {
94                         list_del_rcu(&o->list);
95                         cleanup_ops(ops);
96                         goto out;
97                 }
98         }
99
100         err = -ENOENT;
101 out:
102         spin_unlock(&rules_mod_lock);
103
104         synchronize_rcu();
105
106         return err;
107 }
108
109 EXPORT_SYMBOL_GPL(fib_rules_unregister);
110
111 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
112                      int flags, struct fib_lookup_arg *arg)
113 {
114         struct fib_rule *rule;
115         int err;
116
117         rcu_read_lock();
118
119         list_for_each_entry_rcu(rule, ops->rules_list, list) {
120                 if (rule->ifindex && (rule->ifindex != fl->iif))
121                         continue;
122
123                 if (!ops->match(rule, fl, flags))
124                         continue;
125
126                 err = ops->action(rule, fl, flags, arg);
127                 if (err != -EAGAIN) {
128                         fib_rule_get(rule);
129                         arg->rule = rule;
130                         goto out;
131                 }
132         }
133
134         err = -ENETUNREACH;
135 out:
136         rcu_read_unlock();
137
138         return err;
139 }
140
141 EXPORT_SYMBOL_GPL(fib_rules_lookup);
142
143 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
144 {
145         struct fib_rule_hdr *frh = nlmsg_data(nlh);
146         struct fib_rules_ops *ops = NULL;
147         struct fib_rule *rule, *r, *last = NULL;
148         struct nlattr *tb[FRA_MAX+1];
149         int err = -EINVAL;
150
151         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
152                 goto errout;
153
154         ops = lookup_rules_ops(frh->family);
155         if (ops == NULL) {
156                 err = EAFNOSUPPORT;
157                 goto errout;
158         }
159
160         err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
161         if (err < 0)
162                 goto errout;
163
164         rule = kzalloc(ops->rule_size, GFP_KERNEL);
165         if (rule == NULL) {
166                 err = -ENOMEM;
167                 goto errout;
168         }
169
170         if (tb[FRA_PRIORITY])
171                 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
172
173         if (tb[FRA_IFNAME]) {
174                 struct net_device *dev;
175
176                 rule->ifindex = -1;
177                 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
178                 dev = __dev_get_by_name(rule->ifname);
179                 if (dev)
180                         rule->ifindex = dev->ifindex;
181         }
182
183         rule->action = frh->action;
184         rule->flags = frh->flags;
185         rule->table = frh_get_table(frh, tb);
186
187         if (!rule->pref && ops->default_pref)
188                 rule->pref = ops->default_pref();
189
190         err = ops->configure(rule, skb, nlh, frh, tb);
191         if (err < 0)
192                 goto errout_free;
193
194         list_for_each_entry(r, ops->rules_list, list) {
195                 if (r->pref > rule->pref)
196                         break;
197                 last = r;
198         }
199
200         fib_rule_get(rule);
201
202         if (last)
203                 list_add_rcu(&rule->list, &last->list);
204         else
205                 list_add_rcu(&rule->list, ops->rules_list);
206
207         notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
208         rules_ops_put(ops);
209         return 0;
210
211 errout_free:
212         kfree(rule);
213 errout:
214         rules_ops_put(ops);
215         return err;
216 }
217
218 int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
219 {
220         struct fib_rule_hdr *frh = nlmsg_data(nlh);
221         struct fib_rules_ops *ops = NULL;
222         struct fib_rule *rule;
223         struct nlattr *tb[FRA_MAX+1];
224         int err = -EINVAL;
225
226         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
227                 goto errout;
228
229         ops = lookup_rules_ops(frh->family);
230         if (ops == NULL) {
231                 err = EAFNOSUPPORT;
232                 goto errout;
233         }
234
235         err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
236         if (err < 0)
237                 goto errout;
238
239         list_for_each_entry(rule, ops->rules_list, list) {
240                 if (frh->action && (frh->action != rule->action))
241                         continue;
242
243                 if (frh->table && (frh_get_table(frh, tb) != rule->table))
244                         continue;
245
246                 if (tb[FRA_PRIORITY] &&
247                     (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
248                         continue;
249
250                 if (tb[FRA_IFNAME] &&
251                     nla_strcmp(tb[FRA_IFNAME], rule->ifname))
252                         continue;
253
254                 if (!ops->compare(rule, frh, tb))
255                         continue;
256
257                 if (rule->flags & FIB_RULE_PERMANENT) {
258                         err = -EPERM;
259                         goto errout;
260                 }
261
262                 list_del_rcu(&rule->list);
263                 synchronize_rcu();
264                 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
265                                    NETLINK_CB(skb).pid);
266                 fib_rule_put(rule);
267                 rules_ops_put(ops);
268                 return 0;
269         }
270
271         err = -ENOENT;
272 errout:
273         rules_ops_put(ops);
274         return err;
275 }
276
277 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
278                             u32 pid, u32 seq, int type, int flags,
279                             struct fib_rules_ops *ops)
280 {
281         struct nlmsghdr *nlh;
282         struct fib_rule_hdr *frh;
283
284         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
285         if (nlh == NULL)
286                 return -1;
287
288         frh = nlmsg_data(nlh);
289         frh->table = rule->table;
290         NLA_PUT_U32(skb, FRA_TABLE, rule->table);
291         frh->res1 = 0;
292         frh->res2 = 0;
293         frh->action = rule->action;
294         frh->flags = rule->flags;
295
296         if (rule->ifname[0])
297                 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
298
299         if (rule->pref)
300                 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
301
302         if (ops->fill(rule, skb, nlh, frh) < 0)
303                 goto nla_put_failure;
304
305         return nlmsg_end(skb, nlh);
306
307 nla_put_failure:
308         return nlmsg_cancel(skb, nlh);
309 }
310
311 int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
312 {
313         int idx = 0;
314         struct fib_rule *rule;
315         struct fib_rules_ops *ops;
316
317         ops = lookup_rules_ops(family);
318         if (ops == NULL)
319                 return -EAFNOSUPPORT;
320
321         rcu_read_lock();
322         list_for_each_entry(rule, ops->rules_list, list) {
323                 if (idx < cb->args[0])
324                         goto skip;
325
326                 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
327                                      cb->nlh->nlmsg_seq, RTM_NEWRULE,
328                                      NLM_F_MULTI, ops) < 0)
329                         break;
330 skip:
331                 idx++;
332         }
333         rcu_read_unlock();
334         cb->args[0] = idx;
335         rules_ops_put(ops);
336
337         return skb->len;
338 }
339
340 EXPORT_SYMBOL_GPL(fib_rules_dump);
341
342 static void notify_rule_change(int event, struct fib_rule *rule,
343                                struct fib_rules_ops *ops, struct nlmsghdr *nlh,
344                                u32 pid)
345 {
346         struct sk_buff *skb;
347         int err = -ENOBUFS;
348
349         skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
350         if (skb == NULL)
351                 goto errout;
352
353         err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
354         if (err < 0) {
355                 kfree_skb(skb);
356                 goto errout;
357         }
358
359         err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
360 errout:
361         if (err < 0)
362                 rtnl_set_sk_err(ops->nlgroup, err);
363 }
364
365 static void attach_rules(struct list_head *rules, struct net_device *dev)
366 {
367         struct fib_rule *rule;
368
369         list_for_each_entry(rule, rules, list) {
370                 if (rule->ifindex == -1 &&
371                     strcmp(dev->name, rule->ifname) == 0)
372                         rule->ifindex = dev->ifindex;
373         }
374 }
375
376 static void detach_rules(struct list_head *rules, struct net_device *dev)
377 {
378         struct fib_rule *rule;
379
380         list_for_each_entry(rule, rules, list)
381                 if (rule->ifindex == dev->ifindex)
382                         rule->ifindex = -1;
383 }
384
385
386 static int fib_rules_event(struct notifier_block *this, unsigned long event,
387                             void *ptr)
388 {
389         struct net_device *dev = ptr;
390         struct fib_rules_ops *ops;
391
392         ASSERT_RTNL();
393         rcu_read_lock();
394
395         switch (event) {
396         case NETDEV_REGISTER:
397                 list_for_each_entry(ops, &rules_ops, list)
398                         attach_rules(ops->rules_list, dev);
399                 break;
400
401         case NETDEV_UNREGISTER:
402                 list_for_each_entry(ops, &rules_ops, list)
403                         detach_rules(ops->rules_list, dev);
404                 break;
405         }
406
407         rcu_read_unlock();
408
409         return NOTIFY_DONE;
410 }
411
412 static struct notifier_block fib_rules_notifier = {
413         .notifier_call = fib_rules_event,
414 };
415
416 static int __init fib_rules_init(void)
417 {
418         return register_netdevice_notifier(&fib_rules_notifier);
419 }
420
421 subsys_initcall(fib_rules_init);