2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <linux/bitops.h>
71 #include <linux/types.h>
72 #include <linux/kernel.h>
74 #include <linux/bootmem.h>
75 #include <linux/string.h>
76 #include <linux/socket.h>
77 #include <linux/sockios.h>
78 #include <linux/errno.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/proc_fs.h>
83 #include <linux/init.h>
84 #include <linux/workqueue.h>
85 #include <linux/skbuff.h>
86 #include <linux/inetdevice.h>
87 #include <linux/igmp.h>
88 #include <linux/pkt_sched.h>
89 #include <linux/mroute.h>
90 #include <linux/netfilter_ipv4.h>
91 #include <linux/random.h>
92 #include <linux/jhash.h>
93 #include <linux/rcupdate.h>
94 #include <linux/times.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
109 #include <linux/sysctl.h>
112 #define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115 #define IP_MAX_MTU 0xFFF0
117 #define RT_GC_TIMEOUT (300*HZ)
119 static int ip_rt_min_delay = 2 * HZ;
120 static int ip_rt_max_delay = 10 * HZ;
121 static int ip_rt_max_size;
122 static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
123 static int ip_rt_gc_interval = 60 * HZ;
124 static int ip_rt_gc_min_interval = HZ / 2;
125 static int ip_rt_redirect_number = 9;
126 static int ip_rt_redirect_load = HZ / 50;
127 static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1));
128 static int ip_rt_error_cost = HZ;
129 static int ip_rt_error_burst = 5 * HZ;
130 static int ip_rt_gc_elasticity = 8;
131 static int ip_rt_mtu_expires = 10 * 60 * HZ;
132 static int ip_rt_min_pmtu = 512 + 20 + 20;
133 static int ip_rt_min_advmss = 256;
134 static int ip_rt_secret_interval = 10 * 60 * HZ;
135 static unsigned long rt_deadline;
137 #define RTprint(a...) printk(KERN_DEBUG a)
139 static struct timer_list rt_flush_timer;
140 static void rt_check_expire(struct work_struct *work);
141 static DECLARE_DELAYED_WORK(expires_work, rt_check_expire);
142 static struct timer_list rt_secret_timer;
145 * Interface to generic destination cache.
148 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
149 static void ipv4_dst_destroy(struct dst_entry *dst);
150 static void ipv4_dst_ifdown(struct dst_entry *dst,
151 struct net_device *dev, int how);
152 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
153 static void ipv4_link_failure(struct sk_buff *skb);
154 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
155 static int rt_garbage_collect(void);
158 static struct dst_ops ipv4_dst_ops = {
160 .protocol = __constant_htons(ETH_P_IP),
161 .gc = rt_garbage_collect,
162 .check = ipv4_dst_check,
163 .destroy = ipv4_dst_destroy,
164 .ifdown = ipv4_dst_ifdown,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .entry_size = sizeof(struct rtable),
171 #define ECN_OR_COST(class) TC_PRIO_##class
173 const __u8 ip_tos2prio[16] = {
177 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(INTERACTIVE),
185 ECN_OR_COST(INTERACTIVE),
186 TC_PRIO_INTERACTIVE_BULK,
187 ECN_OR_COST(INTERACTIVE_BULK),
188 TC_PRIO_INTERACTIVE_BULK,
189 ECN_OR_COST(INTERACTIVE_BULK)
197 /* The locking scheme is rather straight forward:
199 * 1) Read-Copy Update protects the buckets of the central route hash.
200 * 2) Only writers remove entries, and they hold the lock
201 * as they look at rtable reference counts.
202 * 3) Only readers acquire references to rtable entries,
203 * they do so with atomic increments and with the
207 struct rt_hash_bucket {
208 struct rtable *chain;
210 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
211 defined(CONFIG_PROVE_LOCKING)
213 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
214 * The size of this table is a power of two and depends on the number of CPUS.
215 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
217 #ifdef CONFIG_LOCKDEP
218 # define RT_HASH_LOCK_SZ 256
221 # define RT_HASH_LOCK_SZ 4096
223 # define RT_HASH_LOCK_SZ 2048
225 # define RT_HASH_LOCK_SZ 1024
227 # define RT_HASH_LOCK_SZ 512
229 # define RT_HASH_LOCK_SZ 256
233 static spinlock_t *rt_hash_locks;
234 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
235 # define rt_hash_lock_init() { \
237 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
238 if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
239 for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
240 spin_lock_init(&rt_hash_locks[i]); \
243 # define rt_hash_lock_addr(slot) NULL
244 # define rt_hash_lock_init()
247 static struct rt_hash_bucket *rt_hash_table;
248 static unsigned rt_hash_mask;
249 static unsigned int rt_hash_log;
250 static unsigned int rt_hash_rnd;
252 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
253 #define RT_CACHE_STAT_INC(field) \
254 (__raw_get_cpu_var(rt_cache_stat).field++)
256 static int rt_intern_hash(unsigned hash, struct rtable *rth,
257 struct rtable **res);
259 static unsigned int rt_hash_code(u32 daddr, u32 saddr)
261 return (jhash_2words(daddr, saddr, rt_hash_rnd)
265 #define rt_hash(daddr, saddr, idx) \
266 rt_hash_code((__force u32)(__be32)(daddr),\
267 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
269 #ifdef CONFIG_PROC_FS
270 struct rt_cache_iter_state {
274 static struct rtable *rt_cache_get_first(struct seq_file *seq)
276 struct rtable *r = NULL;
277 struct rt_cache_iter_state *st = seq->private;
279 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
281 r = rt_hash_table[st->bucket].chain;
284 rcu_read_unlock_bh();
289 static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
291 struct rt_cache_iter_state *st = rcu_dereference(seq->private);
293 r = r->u.dst.rt_next;
295 rcu_read_unlock_bh();
296 if (--st->bucket < 0)
299 r = rt_hash_table[st->bucket].chain;
304 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
306 struct rtable *r = rt_cache_get_first(seq);
309 while (pos && (r = rt_cache_get_next(seq, r)))
311 return pos ? NULL : r;
314 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
316 return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
319 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
321 struct rtable *r = NULL;
323 if (v == SEQ_START_TOKEN)
324 r = rt_cache_get_first(seq);
326 r = rt_cache_get_next(seq, v);
331 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
333 if (v && v != SEQ_START_TOKEN)
334 rcu_read_unlock_bh();
337 static int rt_cache_seq_show(struct seq_file *seq, void *v)
339 if (v == SEQ_START_TOKEN)
340 seq_printf(seq, "%-127s\n",
341 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
342 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
345 struct rtable *r = v;
348 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
349 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
350 r->u.dst.dev ? r->u.dst.dev->name : "*",
351 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
352 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
353 r->u.dst.__use, 0, (unsigned long)r->rt_src,
354 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
355 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
356 dst_metric(&r->u.dst, RTAX_WINDOW),
357 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
358 dst_metric(&r->u.dst, RTAX_RTTVAR)),
360 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
361 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
364 seq_printf(seq, "%-127s\n", temp);
369 static const struct seq_operations rt_cache_seq_ops = {
370 .start = rt_cache_seq_start,
371 .next = rt_cache_seq_next,
372 .stop = rt_cache_seq_stop,
373 .show = rt_cache_seq_show,
376 static int rt_cache_seq_open(struct inode *inode, struct file *file)
378 return seq_open_private(file, &rt_cache_seq_ops,
379 sizeof(struct rt_cache_iter_state));
382 static const struct file_operations rt_cache_seq_fops = {
383 .owner = THIS_MODULE,
384 .open = rt_cache_seq_open,
387 .release = seq_release_private,
391 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
396 return SEQ_START_TOKEN;
398 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
399 if (!cpu_possible(cpu))
402 return &per_cpu(rt_cache_stat, cpu);
407 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
411 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
412 if (!cpu_possible(cpu))
415 return &per_cpu(rt_cache_stat, cpu);
421 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
426 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
428 struct rt_cache_stat *st = v;
430 if (v == SEQ_START_TOKEN) {
431 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
435 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
436 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
437 atomic_read(&ipv4_dst_ops.entries),
460 static const struct seq_operations rt_cpu_seq_ops = {
461 .start = rt_cpu_seq_start,
462 .next = rt_cpu_seq_next,
463 .stop = rt_cpu_seq_stop,
464 .show = rt_cpu_seq_show,
468 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
470 return seq_open(file, &rt_cpu_seq_ops);
473 static const struct file_operations rt_cpu_seq_fops = {
474 .owner = THIS_MODULE,
475 .open = rt_cpu_seq_open,
478 .release = seq_release,
481 #endif /* CONFIG_PROC_FS */
483 static __inline__ void rt_free(struct rtable *rt)
485 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
488 static __inline__ void rt_drop(struct rtable *rt)
491 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
494 static __inline__ int rt_fast_clean(struct rtable *rth)
496 /* Kill broadcast/multicast entries very aggresively, if they
497 collide in hash table with more useful entries */
498 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
499 rth->fl.iif && rth->u.dst.rt_next;
502 static __inline__ int rt_valuable(struct rtable *rth)
504 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
508 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
513 if (atomic_read(&rth->u.dst.__refcnt))
517 if (rth->u.dst.expires &&
518 time_after_eq(jiffies, rth->u.dst.expires))
521 age = jiffies - rth->u.dst.lastuse;
523 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
524 (age <= tmo2 && rt_valuable(rth)))
530 /* Bits of score are:
532 * 30: not quite useless
533 * 29..0: usage counter
535 static inline u32 rt_score(struct rtable *rt)
537 u32 score = jiffies - rt->u.dst.lastuse;
539 score = ~score & ~(3<<30);
545 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
551 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
553 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
554 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
555 (fl1->mark ^ fl2->mark) |
556 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
557 *(u16 *)&fl2->nl_u.ip4_u.tos) |
558 (fl1->oif ^ fl2->oif) |
559 (fl1->iif ^ fl2->iif)) == 0;
562 static void rt_check_expire(struct work_struct *work)
564 static unsigned int rover;
565 unsigned int i = rover, goal;
566 struct rtable *rth, **rthp;
569 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
570 if (ip_rt_gc_timeout > 1)
571 do_div(mult, ip_rt_gc_timeout);
572 goal = (unsigned int)mult;
573 if (goal > rt_hash_mask)
574 goal = rt_hash_mask + 1;
575 for (; goal > 0; goal--) {
576 unsigned long tmo = ip_rt_gc_timeout;
578 i = (i + 1) & rt_hash_mask;
579 rthp = &rt_hash_table[i].chain;
583 spin_lock_bh(rt_hash_lock_addr(i));
584 while ((rth = *rthp) != NULL) {
585 if (rth->u.dst.expires) {
586 /* Entry is expired even if it is in use */
587 if (time_before_eq(jiffies, rth->u.dst.expires)) {
589 rthp = &rth->u.dst.rt_next;
592 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
594 rthp = &rth->u.dst.rt_next;
598 /* Cleanup aged off entries. */
599 *rthp = rth->u.dst.rt_next;
602 spin_unlock_bh(rt_hash_lock_addr(i));
605 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
608 /* This can run from both BH and non-BH contexts, the latter
609 * in the case of a forced flush event.
611 static void rt_run_flush(unsigned long dummy)
614 struct rtable *rth, *next;
618 get_random_bytes(&rt_hash_rnd, 4);
620 for (i = rt_hash_mask; i >= 0; i--) {
621 spin_lock_bh(rt_hash_lock_addr(i));
622 rth = rt_hash_table[i].chain;
624 rt_hash_table[i].chain = NULL;
625 spin_unlock_bh(rt_hash_lock_addr(i));
627 for (; rth; rth = next) {
628 next = rth->u.dst.rt_next;
634 static DEFINE_SPINLOCK(rt_flush_lock);
636 void rt_cache_flush(int delay)
638 unsigned long now = jiffies;
639 int user_mode = !in_softirq();
642 delay = ip_rt_min_delay;
644 spin_lock_bh(&rt_flush_lock);
646 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
647 long tmo = (long)(rt_deadline - now);
649 /* If flush timer is already running
650 and flush request is not immediate (delay > 0):
652 if deadline is not achieved, prolongate timer to "delay",
653 otherwise fire it at deadline time.
656 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
664 spin_unlock_bh(&rt_flush_lock);
669 if (rt_deadline == 0)
670 rt_deadline = now + ip_rt_max_delay;
672 mod_timer(&rt_flush_timer, now+delay);
673 spin_unlock_bh(&rt_flush_lock);
676 static void rt_secret_rebuild(unsigned long dummy)
678 unsigned long now = jiffies;
681 mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
685 Short description of GC goals.
687 We want to build algorithm, which will keep routing cache
688 at some equilibrium point, when number of aged off entries
689 is kept approximately equal to newly generated ones.
691 Current expiration strength is variable "expire".
692 We try to adjust it dynamically, so that if networking
693 is idle expires is large enough to keep enough of warm entries,
694 and when load increases it reduces to limit cache size.
697 static int rt_garbage_collect(void)
699 static unsigned long expire = RT_GC_TIMEOUT;
700 static unsigned long last_gc;
702 static int equilibrium;
703 struct rtable *rth, **rthp;
704 unsigned long now = jiffies;
708 * Garbage collection is pretty expensive,
709 * do not make it too frequently.
712 RT_CACHE_STAT_INC(gc_total);
714 if (now - last_gc < ip_rt_gc_min_interval &&
715 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
716 RT_CACHE_STAT_INC(gc_ignored);
720 /* Calculate number of entries, which we want to expire now. */
721 goal = atomic_read(&ipv4_dst_ops.entries) -
722 (ip_rt_gc_elasticity << rt_hash_log);
724 if (equilibrium < ipv4_dst_ops.gc_thresh)
725 equilibrium = ipv4_dst_ops.gc_thresh;
726 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
728 equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
729 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
732 /* We are in dangerous area. Try to reduce cache really
735 goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
736 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
739 if (now - last_gc >= ip_rt_gc_min_interval)
750 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
751 unsigned long tmo = expire;
753 k = (k + 1) & rt_hash_mask;
754 rthp = &rt_hash_table[k].chain;
755 spin_lock_bh(rt_hash_lock_addr(k));
756 while ((rth = *rthp) != NULL) {
757 if (!rt_may_expire(rth, tmo, expire)) {
759 rthp = &rth->u.dst.rt_next;
762 *rthp = rth->u.dst.rt_next;
766 spin_unlock_bh(rt_hash_lock_addr(k));
775 /* Goal is not achieved. We stop process if:
777 - if expire reduced to zero. Otherwise, expire is halfed.
778 - if table is not full.
779 - if we are called from interrupt.
780 - jiffies check is just fallback/debug loop breaker.
781 We will not spin here for long time in any case.
784 RT_CACHE_STAT_INC(gc_goal_miss);
790 #if RT_CACHE_DEBUG >= 2
791 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
792 atomic_read(&ipv4_dst_ops.entries), goal, i);
795 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
797 } while (!in_softirq() && time_before_eq(jiffies, now));
799 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
802 printk(KERN_WARNING "dst cache overflow\n");
803 RT_CACHE_STAT_INC(gc_dst_overflow);
807 expire += ip_rt_gc_min_interval;
808 if (expire > ip_rt_gc_timeout ||
809 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
810 expire = ip_rt_gc_timeout;
811 #if RT_CACHE_DEBUG >= 2
812 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
813 atomic_read(&ipv4_dst_ops.entries), goal, rover);
818 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
820 struct rtable *rth, **rthp;
822 struct rtable *cand, **candp;
825 int attempts = !in_softirq();
834 rthp = &rt_hash_table[hash].chain;
836 spin_lock_bh(rt_hash_lock_addr(hash));
837 while ((rth = *rthp) != NULL) {
838 if (compare_keys(&rth->fl, &rt->fl)) {
840 *rthp = rth->u.dst.rt_next;
842 * Since lookup is lockfree, the deletion
843 * must be visible to another weakly ordered CPU before
844 * the insertion at the start of the hash chain.
846 rcu_assign_pointer(rth->u.dst.rt_next,
847 rt_hash_table[hash].chain);
849 * Since lookup is lockfree, the update writes
850 * must be ordered for consistency on SMP.
852 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
855 dst_hold(&rth->u.dst);
856 rth->u.dst.lastuse = now;
857 spin_unlock_bh(rt_hash_lock_addr(hash));
864 if (!atomic_read(&rth->u.dst.__refcnt)) {
865 u32 score = rt_score(rth);
867 if (score <= min_score) {
876 rthp = &rth->u.dst.rt_next;
880 /* ip_rt_gc_elasticity used to be average length of chain
881 * length, when exceeded gc becomes really aggressive.
883 * The second limit is less certain. At the moment it allows
884 * only 2 entries per bucket. We will see.
886 if (chain_length > ip_rt_gc_elasticity) {
887 *candp = cand->u.dst.rt_next;
892 /* Try to bind route to arp only if it is output
893 route or unicast forwarding path.
895 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
896 int err = arp_bind_neighbour(&rt->u.dst);
898 spin_unlock_bh(rt_hash_lock_addr(hash));
900 if (err != -ENOBUFS) {
905 /* Neighbour tables are full and nothing
906 can be released. Try to shrink route cache,
907 it is most likely it holds some neighbour records.
909 if (attempts-- > 0) {
910 int saved_elasticity = ip_rt_gc_elasticity;
911 int saved_int = ip_rt_gc_min_interval;
912 ip_rt_gc_elasticity = 1;
913 ip_rt_gc_min_interval = 0;
914 rt_garbage_collect();
915 ip_rt_gc_min_interval = saved_int;
916 ip_rt_gc_elasticity = saved_elasticity;
921 printk(KERN_WARNING "Neighbour table overflow.\n");
927 rt->u.dst.rt_next = rt_hash_table[hash].chain;
928 #if RT_CACHE_DEBUG >= 2
929 if (rt->u.dst.rt_next) {
931 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
932 NIPQUAD(rt->rt_dst));
933 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
934 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
938 rt_hash_table[hash].chain = rt;
939 spin_unlock_bh(rt_hash_lock_addr(hash));
944 void rt_bind_peer(struct rtable *rt, int create)
946 static DEFINE_SPINLOCK(rt_peer_lock);
947 struct inet_peer *peer;
949 peer = inet_getpeer(rt->rt_dst, create);
951 spin_lock_bh(&rt_peer_lock);
952 if (rt->peer == NULL) {
956 spin_unlock_bh(&rt_peer_lock);
962 * Peer allocation may fail only in serious out-of-memory conditions. However
963 * we still can generate some output.
964 * Random ID selection looks a bit dangerous because we have no chances to
965 * select ID being unique in a reasonable period of time.
966 * But broken packet identifier may be better than no packet at all.
968 static void ip_select_fb_ident(struct iphdr *iph)
970 static DEFINE_SPINLOCK(ip_fb_id_lock);
971 static u32 ip_fallback_id;
974 spin_lock_bh(&ip_fb_id_lock);
975 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
976 iph->id = htons(salt & 0xFFFF);
977 ip_fallback_id = salt;
978 spin_unlock_bh(&ip_fb_id_lock);
981 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
983 struct rtable *rt = (struct rtable *) dst;
986 if (rt->peer == NULL)
989 /* If peer is attached to destination, it is never detached,
990 so that we need not to grab a lock to dereference it.
993 iph->id = htons(inet_getid(rt->peer, more));
997 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
998 __builtin_return_address(0));
1000 ip_select_fb_ident(iph);
1003 static void rt_del(unsigned hash, struct rtable *rt)
1005 struct rtable **rthp;
1007 spin_lock_bh(rt_hash_lock_addr(hash));
1009 for (rthp = &rt_hash_table[hash].chain; *rthp;
1010 rthp = &(*rthp)->u.dst.rt_next)
1012 *rthp = rt->u.dst.rt_next;
1016 spin_unlock_bh(rt_hash_lock_addr(hash));
1019 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1020 __be32 saddr, struct net_device *dev)
1023 struct in_device *in_dev = in_dev_get(dev);
1024 struct rtable *rth, **rthp;
1025 __be32 skeys[2] = { saddr, 0 };
1026 int ikeys[2] = { dev->ifindex, 0 };
1027 struct netevent_redirect netevent;
1032 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1033 || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
1034 goto reject_redirect;
1036 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1037 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1038 goto reject_redirect;
1039 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1040 goto reject_redirect;
1042 if (inet_addr_type(new_gw) != RTN_UNICAST)
1043 goto reject_redirect;
1046 for (i = 0; i < 2; i++) {
1047 for (k = 0; k < 2; k++) {
1048 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
1050 rthp=&rt_hash_table[hash].chain;
1053 while ((rth = rcu_dereference(*rthp)) != NULL) {
1056 if (rth->fl.fl4_dst != daddr ||
1057 rth->fl.fl4_src != skeys[i] ||
1058 rth->fl.oif != ikeys[k] ||
1060 rthp = &rth->u.dst.rt_next;
1064 if (rth->rt_dst != daddr ||
1065 rth->rt_src != saddr ||
1067 rth->rt_gateway != old_gw ||
1068 rth->u.dst.dev != dev)
1071 dst_hold(&rth->u.dst);
1074 rt = dst_alloc(&ipv4_dst_ops);
1081 /* Copy all the information. */
1083 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1084 rt->u.dst.__use = 1;
1085 atomic_set(&rt->u.dst.__refcnt, 1);
1086 rt->u.dst.child = NULL;
1088 dev_hold(rt->u.dst.dev);
1090 in_dev_hold(rt->idev);
1091 rt->u.dst.obsolete = 0;
1092 rt->u.dst.lastuse = jiffies;
1093 rt->u.dst.path = &rt->u.dst;
1094 rt->u.dst.neighbour = NULL;
1095 rt->u.dst.hh = NULL;
1096 rt->u.dst.xfrm = NULL;
1098 rt->rt_flags |= RTCF_REDIRECTED;
1100 /* Gateway is different ... */
1101 rt->rt_gateway = new_gw;
1103 /* Redirect received -> path was valid */
1104 dst_confirm(&rth->u.dst);
1107 atomic_inc(&rt->peer->refcnt);
1109 if (arp_bind_neighbour(&rt->u.dst) ||
1110 !(rt->u.dst.neighbour->nud_state &
1112 if (rt->u.dst.neighbour)
1113 neigh_event_send(rt->u.dst.neighbour, NULL);
1119 netevent.old = &rth->u.dst;
1120 netevent.new = &rt->u.dst;
1121 call_netevent_notifiers(NETEVENT_REDIRECT,
1125 if (!rt_intern_hash(hash, rt, &rt))
1138 #ifdef CONFIG_IP_ROUTE_VERBOSE
1139 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1140 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1141 "%u.%u.%u.%u ignored.\n"
1142 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1143 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1144 NIPQUAD(saddr), NIPQUAD(daddr));
1149 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1151 struct rtable *rt = (struct rtable*)dst;
1152 struct dst_entry *ret = dst;
1155 if (dst->obsolete) {
1158 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1159 rt->u.dst.expires) {
1160 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1162 #if RT_CACHE_DEBUG >= 1
1163 printk(KERN_DEBUG "ip_rt_advice: redirect to "
1164 "%u.%u.%u.%u/%02x dropped\n",
1165 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1176 * 1. The first ip_rt_redirect_number redirects are sent
1177 * with exponential backoff, then we stop sending them at all,
1178 * assuming that the host ignores our redirects.
1179 * 2. If we did not see packets requiring redirects
1180 * during ip_rt_redirect_silence, we assume that the host
1181 * forgot redirected route and start to send redirects again.
1183 * This algorithm is much cheaper and more intelligent than dumb load limiting
1186 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1187 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1190 void ip_rt_send_redirect(struct sk_buff *skb)
1192 struct rtable *rt = (struct rtable*)skb->dst;
1193 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1198 if (!IN_DEV_TX_REDIRECTS(in_dev))
1201 /* No redirected packets during ip_rt_redirect_silence;
1202 * reset the algorithm.
1204 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1205 rt->u.dst.rate_tokens = 0;
1207 /* Too many ignored redirects; do not send anything
1208 * set u.dst.rate_last to the last seen redirected packet.
1210 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1211 rt->u.dst.rate_last = jiffies;
1215 /* Check for load limit; set rate_last to the latest sent
1218 if (rt->u.dst.rate_tokens == 0 ||
1220 (rt->u.dst.rate_last +
1221 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1222 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1223 rt->u.dst.rate_last = jiffies;
1224 ++rt->u.dst.rate_tokens;
1225 #ifdef CONFIG_IP_ROUTE_VERBOSE
1226 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1227 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1229 printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
1230 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1231 NIPQUAD(rt->rt_src), rt->rt_iif,
1232 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1239 static int ip_error(struct sk_buff *skb)
1241 struct rtable *rt = (struct rtable*)skb->dst;
1245 switch (rt->u.dst.error) {
1250 code = ICMP_HOST_UNREACH;
1253 code = ICMP_NET_UNREACH;
1256 code = ICMP_PKT_FILTERED;
1261 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1262 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1263 rt->u.dst.rate_tokens = ip_rt_error_burst;
1264 rt->u.dst.rate_last = now;
1265 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1266 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1267 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1270 out: kfree_skb(skb);
1275 * The last two values are not from the RFC but
1276 * are needed for AMPRnet AX.25 paths.
1279 static const unsigned short mtu_plateau[] =
1280 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1282 static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1286 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1287 if (old_mtu > mtu_plateau[i])
1288 return mtu_plateau[i];
1292 unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1295 unsigned short old_mtu = ntohs(iph->tot_len);
1297 __be32 skeys[2] = { iph->saddr, 0, };
1298 __be32 daddr = iph->daddr;
1299 unsigned short est_mtu = 0;
1301 if (ipv4_config.no_pmtu_disc)
1304 for (i = 0; i < 2; i++) {
1305 unsigned hash = rt_hash(daddr, skeys[i], 0);
1308 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1309 rth = rcu_dereference(rth->u.dst.rt_next)) {
1310 if (rth->fl.fl4_dst == daddr &&
1311 rth->fl.fl4_src == skeys[i] &&
1312 rth->rt_dst == daddr &&
1313 rth->rt_src == iph->saddr &&
1315 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1316 unsigned short mtu = new_mtu;
1318 if (new_mtu < 68 || new_mtu >= old_mtu) {
1320 /* BSD 4.2 compatibility hack :-( */
1322 old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
1323 old_mtu >= 68 + (iph->ihl << 2))
1324 old_mtu -= iph->ihl << 2;
1326 mtu = guess_mtu(old_mtu);
1328 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1329 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1330 dst_confirm(&rth->u.dst);
1331 if (mtu < ip_rt_min_pmtu) {
1332 mtu = ip_rt_min_pmtu;
1333 rth->u.dst.metrics[RTAX_LOCK-1] |=
1336 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1337 dst_set_expires(&rth->u.dst,
1346 return est_mtu ? : new_mtu;
1349 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1351 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
1352 !(dst_metric_locked(dst, RTAX_MTU))) {
1353 if (mtu < ip_rt_min_pmtu) {
1354 mtu = ip_rt_min_pmtu;
1355 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1357 dst->metrics[RTAX_MTU-1] = mtu;
1358 dst_set_expires(dst, ip_rt_mtu_expires);
1359 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1363 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1368 static void ipv4_dst_destroy(struct dst_entry *dst)
1370 struct rtable *rt = (struct rtable *) dst;
1371 struct inet_peer *peer = rt->peer;
1372 struct in_device *idev = rt->idev;
1385 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1388 struct rtable *rt = (struct rtable *) dst;
1389 struct in_device *idev = rt->idev;
1390 if (dev != init_net.loopback_dev && idev && idev->dev == dev) {
1391 struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev);
1392 if (loopback_idev) {
1393 rt->idev = loopback_idev;
1399 static void ipv4_link_failure(struct sk_buff *skb)
1403 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1405 rt = (struct rtable *) skb->dst;
1407 dst_set_expires(&rt->u.dst, 0);
1410 static int ip_rt_bug(struct sk_buff *skb)
1412 printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1413 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1414 skb->dev ? skb->dev->name : "?");
1420 We do not cache source address of outgoing interface,
1421 because it is used only by IP RR, TS and SRR options,
1422 so that it out of fast path.
1424 BTW remember: "addr" is allowed to be not aligned
1428 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1431 struct fib_result res;
1433 if (rt->fl.iif == 0)
1435 else if (fib_lookup(&rt->fl, &res) == 0) {
1436 src = FIB_RES_PREFSRC(res);
1439 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1441 memcpy(addr, &src, 4);
1444 #ifdef CONFIG_NET_CLS_ROUTE
1445 static void set_class_tag(struct rtable *rt, u32 tag)
1447 if (!(rt->u.dst.tclassid & 0xFFFF))
1448 rt->u.dst.tclassid |= tag & 0xFFFF;
1449 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1450 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1454 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1456 struct fib_info *fi = res->fi;
1459 if (FIB_RES_GW(*res) &&
1460 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1461 rt->rt_gateway = FIB_RES_GW(*res);
1462 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1463 sizeof(rt->u.dst.metrics));
1464 if (fi->fib_mtu == 0) {
1465 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1466 if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
1467 rt->rt_gateway != rt->rt_dst &&
1468 rt->u.dst.dev->mtu > 576)
1469 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1471 #ifdef CONFIG_NET_CLS_ROUTE
1472 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1475 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1477 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1478 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1479 if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
1480 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1481 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
1482 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1484 if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
1485 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1487 #ifdef CONFIG_NET_CLS_ROUTE
1488 #ifdef CONFIG_IP_MULTIPLE_TABLES
1489 set_class_tag(rt, fib_rules_tclass(res));
1491 set_class_tag(rt, itag);
1493 rt->rt_type = res->type;
1496 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1497 u8 tos, struct net_device *dev, int our)
1502 struct in_device *in_dev = in_dev_get(dev);
1505 /* Primary sanity checks. */
1510 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
1511 skb->protocol != htons(ETH_P_IP))
1514 if (ZERONET(saddr)) {
1515 if (!LOCAL_MCAST(daddr))
1517 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1518 } else if (fib_validate_source(saddr, 0, tos, 0,
1519 dev, &spec_dst, &itag) < 0)
1522 rth = dst_alloc(&ipv4_dst_ops);
1526 rth->u.dst.output= ip_rt_bug;
1528 atomic_set(&rth->u.dst.__refcnt, 1);
1529 rth->u.dst.flags= DST_HOST;
1530 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1531 rth->u.dst.flags |= DST_NOPOLICY;
1532 rth->fl.fl4_dst = daddr;
1533 rth->rt_dst = daddr;
1534 rth->fl.fl4_tos = tos;
1535 rth->fl.mark = skb->mark;
1536 rth->fl.fl4_src = saddr;
1537 rth->rt_src = saddr;
1538 #ifdef CONFIG_NET_CLS_ROUTE
1539 rth->u.dst.tclassid = itag;
1542 rth->fl.iif = dev->ifindex;
1543 rth->u.dst.dev = init_net.loopback_dev;
1544 dev_hold(rth->u.dst.dev);
1545 rth->idev = in_dev_get(rth->u.dst.dev);
1547 rth->rt_gateway = daddr;
1548 rth->rt_spec_dst= spec_dst;
1549 rth->rt_type = RTN_MULTICAST;
1550 rth->rt_flags = RTCF_MULTICAST;
1552 rth->u.dst.input= ip_local_deliver;
1553 rth->rt_flags |= RTCF_LOCAL;
1556 #ifdef CONFIG_IP_MROUTE
1557 if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1558 rth->u.dst.input = ip_mr_input;
1560 RT_CACHE_STAT_INC(in_slow_mc);
1563 hash = rt_hash(daddr, saddr, dev->ifindex);
1564 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1576 static void ip_handle_martian_source(struct net_device *dev,
1577 struct in_device *in_dev,
1578 struct sk_buff *skb,
1582 RT_CACHE_STAT_INC(in_martian_src);
1583 #ifdef CONFIG_IP_ROUTE_VERBOSE
1584 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1586 * RFC1812 recommendation, if source is martian,
1587 * the only hint is MAC header.
1589 printk(KERN_WARNING "martian source %u.%u.%u.%u from "
1590 "%u.%u.%u.%u, on dev %s\n",
1591 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1592 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1594 const unsigned char *p = skb_mac_header(skb);
1595 printk(KERN_WARNING "ll header: ");
1596 for (i = 0; i < dev->hard_header_len; i++, p++) {
1598 if (i < (dev->hard_header_len - 1))
1607 static inline int __mkroute_input(struct sk_buff *skb,
1608 struct fib_result* res,
1609 struct in_device *in_dev,
1610 __be32 daddr, __be32 saddr, u32 tos,
1611 struct rtable **result)
1616 struct in_device *out_dev;
1621 /* get a working reference to the output device */
1622 out_dev = in_dev_get(FIB_RES_DEV(*res));
1623 if (out_dev == NULL) {
1624 if (net_ratelimit())
1625 printk(KERN_CRIT "Bug in ip_route_input" \
1626 "_slow(). Please, report\n");
1631 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1632 in_dev->dev, &spec_dst, &itag);
1634 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1642 flags |= RTCF_DIRECTSRC;
1644 if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
1645 (IN_DEV_SHARED_MEDIA(out_dev) ||
1646 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1647 flags |= RTCF_DOREDIRECT;
1649 if (skb->protocol != htons(ETH_P_IP)) {
1650 /* Not IP (i.e. ARP). Do not create route, if it is
1651 * invalid for proxy arp. DNAT routes are always valid.
1653 if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
1660 rth = dst_alloc(&ipv4_dst_ops);
1666 atomic_set(&rth->u.dst.__refcnt, 1);
1667 rth->u.dst.flags= DST_HOST;
1668 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1669 rth->u.dst.flags |= DST_NOPOLICY;
1670 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1671 rth->u.dst.flags |= DST_NOXFRM;
1672 rth->fl.fl4_dst = daddr;
1673 rth->rt_dst = daddr;
1674 rth->fl.fl4_tos = tos;
1675 rth->fl.mark = skb->mark;
1676 rth->fl.fl4_src = saddr;
1677 rth->rt_src = saddr;
1678 rth->rt_gateway = daddr;
1680 rth->fl.iif = in_dev->dev->ifindex;
1681 rth->u.dst.dev = (out_dev)->dev;
1682 dev_hold(rth->u.dst.dev);
1683 rth->idev = in_dev_get(rth->u.dst.dev);
1685 rth->rt_spec_dst= spec_dst;
1687 rth->u.dst.input = ip_forward;
1688 rth->u.dst.output = ip_output;
1690 rt_set_nexthop(rth, res, itag);
1692 rth->rt_flags = flags;
1697 /* release the working reference to the output device */
1698 in_dev_put(out_dev);
1702 static inline int ip_mkroute_input(struct sk_buff *skb,
1703 struct fib_result* res,
1704 const struct flowi *fl,
1705 struct in_device *in_dev,
1706 __be32 daddr, __be32 saddr, u32 tos)
1708 struct rtable* rth = NULL;
1712 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1713 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1714 fib_select_multipath(fl, res);
1717 /* create a routing cache entry */
1718 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1722 /* put it into the cache */
1723 hash = rt_hash(daddr, saddr, fl->iif);
1724 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1728 * NOTE. We drop all the packets that has local source
1729 * addresses, because every properly looped back packet
1730 * must have correct destination already attached by output routine.
1732 * Such approach solves two big problems:
1733 * 1. Not simplex devices are handled properly.
1734 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1737 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1738 u8 tos, struct net_device *dev)
1740 struct fib_result res;
1741 struct in_device *in_dev = in_dev_get(dev);
1742 struct flowi fl = { .nl_u = { .ip4_u =
1746 .scope = RT_SCOPE_UNIVERSE,
1749 .iif = dev->ifindex };
1752 struct rtable * rth;
1758 /* IP on this device is disabled. */
1763 /* Check for the most weird martians, which can be not detected
1767 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
1768 goto martian_source;
1770 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1773 /* Accept zero addresses only to limited broadcast;
1774 * I even do not know to fix it or not. Waiting for complains :-)
1777 goto martian_source;
1779 if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
1780 goto martian_destination;
1783 * Now we are ready to route packet.
1785 if ((err = fib_lookup(&fl, &res)) != 0) {
1786 if (!IN_DEV_FORWARD(in_dev))
1792 RT_CACHE_STAT_INC(in_slow_tot);
1794 if (res.type == RTN_BROADCAST)
1797 if (res.type == RTN_LOCAL) {
1799 result = fib_validate_source(saddr, daddr, tos,
1800 init_net.loopback_dev->ifindex,
1801 dev, &spec_dst, &itag);
1803 goto martian_source;
1805 flags |= RTCF_DIRECTSRC;
1810 if (!IN_DEV_FORWARD(in_dev))
1812 if (res.type != RTN_UNICAST)
1813 goto martian_destination;
1815 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1823 if (skb->protocol != htons(ETH_P_IP))
1827 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1829 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1832 goto martian_source;
1834 flags |= RTCF_DIRECTSRC;
1836 flags |= RTCF_BROADCAST;
1837 res.type = RTN_BROADCAST;
1838 RT_CACHE_STAT_INC(in_brd);
1841 rth = dst_alloc(&ipv4_dst_ops);
1845 rth->u.dst.output= ip_rt_bug;
1847 atomic_set(&rth->u.dst.__refcnt, 1);
1848 rth->u.dst.flags= DST_HOST;
1849 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1850 rth->u.dst.flags |= DST_NOPOLICY;
1851 rth->fl.fl4_dst = daddr;
1852 rth->rt_dst = daddr;
1853 rth->fl.fl4_tos = tos;
1854 rth->fl.mark = skb->mark;
1855 rth->fl.fl4_src = saddr;
1856 rth->rt_src = saddr;
1857 #ifdef CONFIG_NET_CLS_ROUTE
1858 rth->u.dst.tclassid = itag;
1861 rth->fl.iif = dev->ifindex;
1862 rth->u.dst.dev = init_net.loopback_dev;
1863 dev_hold(rth->u.dst.dev);
1864 rth->idev = in_dev_get(rth->u.dst.dev);
1865 rth->rt_gateway = daddr;
1866 rth->rt_spec_dst= spec_dst;
1867 rth->u.dst.input= ip_local_deliver;
1868 rth->rt_flags = flags|RTCF_LOCAL;
1869 if (res.type == RTN_UNREACHABLE) {
1870 rth->u.dst.input= ip_error;
1871 rth->u.dst.error= -err;
1872 rth->rt_flags &= ~RTCF_LOCAL;
1874 rth->rt_type = res.type;
1875 hash = rt_hash(daddr, saddr, fl.iif);
1876 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1880 RT_CACHE_STAT_INC(in_no_route);
1881 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1882 res.type = RTN_UNREACHABLE;
1886 * Do not cache martian addresses: they should be logged (RFC1812)
1888 martian_destination:
1889 RT_CACHE_STAT_INC(in_martian_dst);
1890 #ifdef CONFIG_IP_ROUTE_VERBOSE
1891 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1892 printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
1893 "%u.%u.%u.%u, dev %s\n",
1894 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1898 err = -EHOSTUNREACH;
1910 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1914 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1915 u8 tos, struct net_device *dev)
1917 struct rtable * rth;
1919 int iif = dev->ifindex;
1921 tos &= IPTOS_RT_MASK;
1922 hash = rt_hash(daddr, saddr, iif);
1925 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1926 rth = rcu_dereference(rth->u.dst.rt_next)) {
1927 if (rth->fl.fl4_dst == daddr &&
1928 rth->fl.fl4_src == saddr &&
1929 rth->fl.iif == iif &&
1931 rth->fl.mark == skb->mark &&
1932 rth->fl.fl4_tos == tos) {
1933 rth->u.dst.lastuse = jiffies;
1934 dst_hold(&rth->u.dst);
1936 RT_CACHE_STAT_INC(in_hit);
1938 skb->dst = (struct dst_entry*)rth;
1941 RT_CACHE_STAT_INC(in_hlist_search);
1945 /* Multicast recognition logic is moved from route cache to here.
1946 The problem was that too many Ethernet cards have broken/missing
1947 hardware multicast filters :-( As result the host on multicasting
1948 network acquires a lot of useless route cache entries, sort of
1949 SDR messages from all the world. Now we try to get rid of them.
1950 Really, provided software IP multicast filter is organized
1951 reasonably (at least, hashed), it does not result in a slowdown
1952 comparing with route cache reject entries.
1953 Note, that multicast routers are not affected, because
1954 route cache entry is created eventually.
1956 if (MULTICAST(daddr)) {
1957 struct in_device *in_dev;
1960 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
1961 int our = ip_check_mc(in_dev, daddr, saddr,
1962 ip_hdr(skb)->protocol);
1964 #ifdef CONFIG_IP_MROUTE
1965 || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1969 return ip_route_input_mc(skb, daddr, saddr,
1976 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
1979 static inline int __mkroute_output(struct rtable **result,
1980 struct fib_result* res,
1981 const struct flowi *fl,
1982 const struct flowi *oldflp,
1983 struct net_device *dev_out,
1987 struct in_device *in_dev;
1988 u32 tos = RT_FL_TOS(oldflp);
1991 if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
1994 if (fl->fl4_dst == htonl(0xFFFFFFFF))
1995 res->type = RTN_BROADCAST;
1996 else if (MULTICAST(fl->fl4_dst))
1997 res->type = RTN_MULTICAST;
1998 else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
2001 if (dev_out->flags & IFF_LOOPBACK)
2002 flags |= RTCF_LOCAL;
2004 /* get work reference to inet device */
2005 in_dev = in_dev_get(dev_out);
2009 if (res->type == RTN_BROADCAST) {
2010 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2012 fib_info_put(res->fi);
2015 } else if (res->type == RTN_MULTICAST) {
2016 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2017 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2019 flags &= ~RTCF_LOCAL;
2020 /* If multicast route do not exist use
2021 default one, but do not gateway in this case.
2024 if (res->fi && res->prefixlen < 4) {
2025 fib_info_put(res->fi);
2031 rth = dst_alloc(&ipv4_dst_ops);
2037 atomic_set(&rth->u.dst.__refcnt, 1);
2038 rth->u.dst.flags= DST_HOST;
2039 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2040 rth->u.dst.flags |= DST_NOXFRM;
2041 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2042 rth->u.dst.flags |= DST_NOPOLICY;
2044 rth->fl.fl4_dst = oldflp->fl4_dst;
2045 rth->fl.fl4_tos = tos;
2046 rth->fl.fl4_src = oldflp->fl4_src;
2047 rth->fl.oif = oldflp->oif;
2048 rth->fl.mark = oldflp->mark;
2049 rth->rt_dst = fl->fl4_dst;
2050 rth->rt_src = fl->fl4_src;
2051 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2052 /* get references to the devices that are to be hold by the routing
2054 rth->u.dst.dev = dev_out;
2056 rth->idev = in_dev_get(dev_out);
2057 rth->rt_gateway = fl->fl4_dst;
2058 rth->rt_spec_dst= fl->fl4_src;
2060 rth->u.dst.output=ip_output;
2062 RT_CACHE_STAT_INC(out_slow_tot);
2064 if (flags & RTCF_LOCAL) {
2065 rth->u.dst.input = ip_local_deliver;
2066 rth->rt_spec_dst = fl->fl4_dst;
2068 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2069 rth->rt_spec_dst = fl->fl4_src;
2070 if (flags & RTCF_LOCAL &&
2071 !(dev_out->flags & IFF_LOOPBACK)) {
2072 rth->u.dst.output = ip_mc_output;
2073 RT_CACHE_STAT_INC(out_slow_mc);
2075 #ifdef CONFIG_IP_MROUTE
2076 if (res->type == RTN_MULTICAST) {
2077 if (IN_DEV_MFORWARD(in_dev) &&
2078 !LOCAL_MCAST(oldflp->fl4_dst)) {
2079 rth->u.dst.input = ip_mr_input;
2080 rth->u.dst.output = ip_mc_output;
2086 rt_set_nexthop(rth, res, 0);
2088 rth->rt_flags = flags;
2092 /* release work reference to inet device */
2098 static inline int ip_mkroute_output(struct rtable **rp,
2099 struct fib_result* res,
2100 const struct flowi *fl,
2101 const struct flowi *oldflp,
2102 struct net_device *dev_out,
2105 struct rtable *rth = NULL;
2106 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2109 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2110 err = rt_intern_hash(hash, rth, rp);
2117 * Major route resolver routine.
2120 static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2122 u32 tos = RT_FL_TOS(oldflp);
2123 struct flowi fl = { .nl_u = { .ip4_u =
2124 { .daddr = oldflp->fl4_dst,
2125 .saddr = oldflp->fl4_src,
2126 .tos = tos & IPTOS_RT_MASK,
2127 .scope = ((tos & RTO_ONLINK) ?
2131 .mark = oldflp->mark,
2132 .iif = init_net.loopback_dev->ifindex,
2133 .oif = oldflp->oif };
2134 struct fib_result res;
2136 struct net_device *dev_out = NULL;
2142 #ifdef CONFIG_IP_MULTIPLE_TABLES
2146 if (oldflp->fl4_src) {
2148 if (MULTICAST(oldflp->fl4_src) ||
2149 BADCLASS(oldflp->fl4_src) ||
2150 ZERONET(oldflp->fl4_src))
2153 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2154 dev_out = ip_dev_find(oldflp->fl4_src);
2155 if (dev_out == NULL)
2158 /* I removed check for oif == dev_out->oif here.
2159 It was wrong for two reasons:
2160 1. ip_dev_find(saddr) can return wrong iface, if saddr is
2161 assigned to multiple interfaces.
2162 2. Moreover, we are allowed to send packets with saddr
2163 of another iface. --ANK
2166 if (oldflp->oif == 0
2167 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2168 /* Special hack: user can direct multicasts
2169 and limited broadcast via necessary interface
2170 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2171 This hack is not just for fun, it allows
2172 vic,vat and friends to work.
2173 They bind socket to loopback, set ttl to zero
2174 and expect that it will work.
2175 From the viewpoint of routing cache they are broken,
2176 because we are not allowed to build multicast path
2177 with loopback source addr (look, routing cache
2178 cannot know, that ttl is zero, so that packet
2179 will not leave this host and route is valid).
2180 Luckily, this hack is good workaround.
2183 fl.oif = dev_out->ifindex;
2193 dev_out = dev_get_by_index(&init_net, oldflp->oif);
2195 if (dev_out == NULL)
2198 /* RACE: Check return value of inet_select_addr instead. */
2199 if (__in_dev_get_rtnl(dev_out) == NULL) {
2201 goto out; /* Wrong error code */
2204 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2206 fl.fl4_src = inet_select_addr(dev_out, 0,
2211 if (MULTICAST(oldflp->fl4_dst))
2212 fl.fl4_src = inet_select_addr(dev_out, 0,
2214 else if (!oldflp->fl4_dst)
2215 fl.fl4_src = inet_select_addr(dev_out, 0,
2221 fl.fl4_dst = fl.fl4_src;
2223 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2226 dev_out = init_net.loopback_dev;
2228 fl.oif = init_net.loopback_dev->ifindex;
2229 res.type = RTN_LOCAL;
2230 flags |= RTCF_LOCAL;
2234 if (fib_lookup(&fl, &res)) {
2237 /* Apparently, routing tables are wrong. Assume,
2238 that the destination is on link.
2241 Because we are allowed to send to iface
2242 even if it has NO routes and NO assigned
2243 addresses. When oif is specified, routing
2244 tables are looked up with only one purpose:
2245 to catch if destination is gatewayed, rather than
2246 direct. Moreover, if MSG_DONTROUTE is set,
2247 we send packet, ignoring both routing tables
2248 and ifaddr state. --ANK
2251 We could make it even if oif is unknown,
2252 likely IPv6, but we do not.
2255 if (fl.fl4_src == 0)
2256 fl.fl4_src = inet_select_addr(dev_out, 0,
2258 res.type = RTN_UNICAST;
2268 if (res.type == RTN_LOCAL) {
2270 fl.fl4_src = fl.fl4_dst;
2273 dev_out = init_net.loopback_dev;
2275 fl.oif = dev_out->ifindex;
2277 fib_info_put(res.fi);
2279 flags |= RTCF_LOCAL;
2283 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2284 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2285 fib_select_multipath(&fl, &res);
2288 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2289 fib_select_default(&fl, &res);
2292 fl.fl4_src = FIB_RES_PREFSRC(res);
2296 dev_out = FIB_RES_DEV(res);
2298 fl.oif = dev_out->ifindex;
2302 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2312 int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2317 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
2320 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2321 rth = rcu_dereference(rth->u.dst.rt_next)) {
2322 if (rth->fl.fl4_dst == flp->fl4_dst &&
2323 rth->fl.fl4_src == flp->fl4_src &&
2325 rth->fl.oif == flp->oif &&
2326 rth->fl.mark == flp->mark &&
2327 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2328 (IPTOS_RT_MASK | RTO_ONLINK))) {
2329 rth->u.dst.lastuse = jiffies;
2330 dst_hold(&rth->u.dst);
2332 RT_CACHE_STAT_INC(out_hit);
2333 rcu_read_unlock_bh();
2337 RT_CACHE_STAT_INC(out_hlist_search);
2339 rcu_read_unlock_bh();
2341 return ip_route_output_slow(rp, flp);
2344 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2346 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2350 static struct dst_ops ipv4_dst_blackhole_ops = {
2352 .protocol = __constant_htons(ETH_P_IP),
2353 .destroy = ipv4_dst_destroy,
2354 .check = ipv4_dst_check,
2355 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2356 .entry_size = sizeof(struct rtable),
2360 static int ipv4_blackhole_output(struct sk_buff *skb)
2366 static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
2368 struct rtable *ort = *rp;
2369 struct rtable *rt = (struct rtable *)
2370 dst_alloc(&ipv4_dst_blackhole_ops);
2373 struct dst_entry *new = &rt->u.dst;
2375 atomic_set(&new->__refcnt, 1);
2377 new->input = ipv4_blackhole_output;
2378 new->output = ipv4_blackhole_output;
2379 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2381 new->dev = ort->u.dst.dev;
2387 rt->idev = ort->idev;
2389 in_dev_hold(rt->idev);
2390 rt->rt_flags = ort->rt_flags;
2391 rt->rt_type = ort->rt_type;
2392 rt->rt_dst = ort->rt_dst;
2393 rt->rt_src = ort->rt_src;
2394 rt->rt_iif = ort->rt_iif;
2395 rt->rt_gateway = ort->rt_gateway;
2396 rt->rt_spec_dst = ort->rt_spec_dst;
2397 rt->peer = ort->peer;
2399 atomic_inc(&rt->peer->refcnt);
2404 dst_release(&(*rp)->u.dst);
2406 return (rt ? 0 : -ENOMEM);
2409 int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
2413 if ((err = __ip_route_output_key(rp, flp)) != 0)
2418 flp->fl4_src = (*rp)->rt_src;
2420 flp->fl4_dst = (*rp)->rt_dst;
2421 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
2422 if (err == -EREMOTE)
2423 err = ipv4_dst_blackhole(rp, flp, sk);
2431 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2433 int ip_route_output_key(struct rtable **rp, struct flowi *flp)
2435 return ip_route_output_flow(rp, flp, NULL, 0);
2438 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2439 int nowait, unsigned int flags)
2441 struct rtable *rt = (struct rtable*)skb->dst;
2443 struct nlmsghdr *nlh;
2445 u32 id = 0, ts = 0, tsage = 0, error;
2447 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2451 r = nlmsg_data(nlh);
2452 r->rtm_family = AF_INET;
2453 r->rtm_dst_len = 32;
2455 r->rtm_tos = rt->fl.fl4_tos;
2456 r->rtm_table = RT_TABLE_MAIN;
2457 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2458 r->rtm_type = rt->rt_type;
2459 r->rtm_scope = RT_SCOPE_UNIVERSE;
2460 r->rtm_protocol = RTPROT_UNSPEC;
2461 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2462 if (rt->rt_flags & RTCF_NOTIFY)
2463 r->rtm_flags |= RTM_F_NOTIFY;
2465 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2467 if (rt->fl.fl4_src) {
2468 r->rtm_src_len = 32;
2469 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2472 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2473 #ifdef CONFIG_NET_CLS_ROUTE
2474 if (rt->u.dst.tclassid)
2475 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2478 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2479 else if (rt->rt_src != rt->fl.fl4_src)
2480 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2482 if (rt->rt_dst != rt->rt_gateway)
2483 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2485 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2486 goto nla_put_failure;
2488 error = rt->u.dst.error;
2489 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2491 id = rt->peer->ip_id_count;
2492 if (rt->peer->tcp_ts_stamp) {
2493 ts = rt->peer->tcp_ts;
2494 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2499 #ifdef CONFIG_IP_MROUTE
2500 __be32 dst = rt->rt_dst;
2502 if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
2503 IPV4_DEVCONF_ALL(MC_FORWARDING)) {
2504 int err = ipmr_get_route(skb, r, nowait);
2509 goto nla_put_failure;
2511 if (err == -EMSGSIZE)
2512 goto nla_put_failure;
2518 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2521 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2522 expires, error) < 0)
2523 goto nla_put_failure;
2525 return nlmsg_end(skb, nlh);
2528 nlmsg_cancel(skb, nlh);
2532 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2535 struct nlattr *tb[RTA_MAX+1];
2536 struct rtable *rt = NULL;
2541 struct sk_buff *skb;
2543 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2547 rtm = nlmsg_data(nlh);
2549 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2555 /* Reserve room for dummy headers, this skb can pass
2556 through good chunk of routing engine.
2558 skb_reset_mac_header(skb);
2559 skb_reset_network_header(skb);
2561 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2562 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2563 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2565 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2566 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2567 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2570 struct net_device *dev;
2572 dev = __dev_get_by_index(&init_net, iif);
2578 skb->protocol = htons(ETH_P_IP);
2581 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2584 rt = (struct rtable*) skb->dst;
2585 if (err == 0 && rt->u.dst.error)
2586 err = -rt->u.dst.error;
2593 .tos = rtm->rtm_tos,
2596 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2598 err = ip_route_output_key(&rt, &fl);
2604 skb->dst = &rt->u.dst;
2605 if (rtm->rtm_flags & RTM_F_NOTIFY)
2606 rt->rt_flags |= RTCF_NOTIFY;
2608 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2609 RTM_NEWROUTE, 0, 0);
2613 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
2622 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2629 s_idx = idx = cb->args[1];
2630 for (h = 0; h <= rt_hash_mask; h++) {
2631 if (h < s_h) continue;
2635 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2636 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2639 skb->dst = dst_clone(&rt->u.dst);
2640 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2641 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2642 1, NLM_F_MULTI) <= 0) {
2643 dst_release(xchg(&skb->dst, NULL));
2644 rcu_read_unlock_bh();
2647 dst_release(xchg(&skb->dst, NULL));
2649 rcu_read_unlock_bh();
2658 void ip_rt_multicast_event(struct in_device *in_dev)
2663 #ifdef CONFIG_SYSCTL
2664 static int flush_delay;
2666 static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2667 struct file *filp, void __user *buffer,
2668 size_t *lenp, loff_t *ppos)
2671 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2672 rt_cache_flush(flush_delay);
2679 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2682 void __user *oldval,
2683 size_t __user *oldlenp,
2684 void __user *newval,
2688 if (newlen != sizeof(int))
2690 if (get_user(delay, (int __user *)newval))
2692 rt_cache_flush(delay);
2696 ctl_table ipv4_route_table[] = {
2698 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2699 .procname = "flush",
2700 .data = &flush_delay,
2701 .maxlen = sizeof(int),
2703 .proc_handler = &ipv4_sysctl_rtcache_flush,
2704 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2707 .ctl_name = NET_IPV4_ROUTE_MIN_DELAY,
2708 .procname = "min_delay",
2709 .data = &ip_rt_min_delay,
2710 .maxlen = sizeof(int),
2712 .proc_handler = &proc_dointvec_jiffies,
2713 .strategy = &sysctl_jiffies,
2716 .ctl_name = NET_IPV4_ROUTE_MAX_DELAY,
2717 .procname = "max_delay",
2718 .data = &ip_rt_max_delay,
2719 .maxlen = sizeof(int),
2721 .proc_handler = &proc_dointvec_jiffies,
2722 .strategy = &sysctl_jiffies,
2725 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2726 .procname = "gc_thresh",
2727 .data = &ipv4_dst_ops.gc_thresh,
2728 .maxlen = sizeof(int),
2730 .proc_handler = &proc_dointvec,
2733 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
2734 .procname = "max_size",
2735 .data = &ip_rt_max_size,
2736 .maxlen = sizeof(int),
2738 .proc_handler = &proc_dointvec,
2741 /* Deprecated. Use gc_min_interval_ms */
2743 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2744 .procname = "gc_min_interval",
2745 .data = &ip_rt_gc_min_interval,
2746 .maxlen = sizeof(int),
2748 .proc_handler = &proc_dointvec_jiffies,
2749 .strategy = &sysctl_jiffies,
2752 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
2753 .procname = "gc_min_interval_ms",
2754 .data = &ip_rt_gc_min_interval,
2755 .maxlen = sizeof(int),
2757 .proc_handler = &proc_dointvec_ms_jiffies,
2758 .strategy = &sysctl_ms_jiffies,
2761 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
2762 .procname = "gc_timeout",
2763 .data = &ip_rt_gc_timeout,
2764 .maxlen = sizeof(int),
2766 .proc_handler = &proc_dointvec_jiffies,
2767 .strategy = &sysctl_jiffies,
2770 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
2771 .procname = "gc_interval",
2772 .data = &ip_rt_gc_interval,
2773 .maxlen = sizeof(int),
2775 .proc_handler = &proc_dointvec_jiffies,
2776 .strategy = &sysctl_jiffies,
2779 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
2780 .procname = "redirect_load",
2781 .data = &ip_rt_redirect_load,
2782 .maxlen = sizeof(int),
2784 .proc_handler = &proc_dointvec,
2787 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
2788 .procname = "redirect_number",
2789 .data = &ip_rt_redirect_number,
2790 .maxlen = sizeof(int),
2792 .proc_handler = &proc_dointvec,
2795 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
2796 .procname = "redirect_silence",
2797 .data = &ip_rt_redirect_silence,
2798 .maxlen = sizeof(int),
2800 .proc_handler = &proc_dointvec,
2803 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
2804 .procname = "error_cost",
2805 .data = &ip_rt_error_cost,
2806 .maxlen = sizeof(int),
2808 .proc_handler = &proc_dointvec,
2811 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
2812 .procname = "error_burst",
2813 .data = &ip_rt_error_burst,
2814 .maxlen = sizeof(int),
2816 .proc_handler = &proc_dointvec,
2819 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
2820 .procname = "gc_elasticity",
2821 .data = &ip_rt_gc_elasticity,
2822 .maxlen = sizeof(int),
2824 .proc_handler = &proc_dointvec,
2827 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
2828 .procname = "mtu_expires",
2829 .data = &ip_rt_mtu_expires,
2830 .maxlen = sizeof(int),
2832 .proc_handler = &proc_dointvec_jiffies,
2833 .strategy = &sysctl_jiffies,
2836 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
2837 .procname = "min_pmtu",
2838 .data = &ip_rt_min_pmtu,
2839 .maxlen = sizeof(int),
2841 .proc_handler = &proc_dointvec,
2844 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
2845 .procname = "min_adv_mss",
2846 .data = &ip_rt_min_advmss,
2847 .maxlen = sizeof(int),
2849 .proc_handler = &proc_dointvec,
2852 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
2853 .procname = "secret_interval",
2854 .data = &ip_rt_secret_interval,
2855 .maxlen = sizeof(int),
2857 .proc_handler = &proc_dointvec_jiffies,
2858 .strategy = &sysctl_jiffies,
2864 #ifdef CONFIG_NET_CLS_ROUTE
2865 struct ip_rt_acct *ip_rt_acct;
2867 /* This code sucks. But you should have seen it before! --RR */
2869 /* IP route accounting ptr for this logical cpu number. */
2870 #define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
2872 #ifdef CONFIG_PROC_FS
2873 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
2874 int length, int *eof, void *data)
2878 if ((offset & 3) || (length & 3))
2881 if (offset >= sizeof(struct ip_rt_acct) * 256) {
2886 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
2887 length = sizeof(struct ip_rt_acct) * 256 - offset;
2891 offset /= sizeof(u32);
2894 u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
2895 u32 *dst = (u32 *) buffer;
2897 /* Copy first cpu. */
2899 memcpy(dst, src, length);
2901 /* Add the other cpus in, one int at a time */
2902 for_each_possible_cpu(i) {
2905 src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
2907 for (j = 0; j < length/4; j++)
2913 #endif /* CONFIG_PROC_FS */
2914 #endif /* CONFIG_NET_CLS_ROUTE */
2916 static __initdata unsigned long rhash_entries;
2917 static int __init set_rhash_entries(char *str)
2921 rhash_entries = simple_strtoul(str, &str, 0);
2924 __setup("rhash_entries=", set_rhash_entries);
2926 int __init ip_rt_init(void)
2930 rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
2931 (jiffies ^ (jiffies >> 7)));
2933 #ifdef CONFIG_NET_CLS_ROUTE
2937 (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
2939 ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
2941 panic("IP: failed to allocate ip_rt_acct\n");
2942 memset(ip_rt_acct, 0, PAGE_SIZE << order);
2946 ipv4_dst_ops.kmem_cachep =
2947 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2948 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2950 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2952 rt_hash_table = (struct rt_hash_bucket *)
2953 alloc_large_system_hash("IP route cache",
2954 sizeof(struct rt_hash_bucket),
2956 (num_physpages >= 128 * 1024) ?
2962 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
2963 rt_hash_lock_init();
2965 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
2966 ip_rt_max_size = (rt_hash_mask + 1) * 16;
2971 init_timer(&rt_flush_timer);
2972 rt_flush_timer.function = rt_run_flush;
2973 init_timer(&rt_secret_timer);
2974 rt_secret_timer.function = rt_secret_rebuild;
2976 /* All the timers, started at system startup tend
2977 to synchronize. Perturb it a bit.
2979 schedule_delayed_work(&expires_work,
2980 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
2982 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
2983 ip_rt_secret_interval;
2984 add_timer(&rt_secret_timer);
2986 #ifdef CONFIG_PROC_FS
2988 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
2989 if (!proc_net_fops_create(&init_net, "rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
2990 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
2991 init_net.proc_net_stat))) {
2994 rtstat_pde->proc_fops = &rt_cpu_seq_fops;
2996 #ifdef CONFIG_NET_CLS_ROUTE
2997 create_proc_read_entry("rt_acct", 0, init_net.proc_net, ip_rt_acct_read, NULL);
3004 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3009 EXPORT_SYMBOL(__ip_select_ident);
3010 EXPORT_SYMBOL(ip_route_input);
3011 EXPORT_SYMBOL(ip_route_output_key);