2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <linux/bitops.h>
71 #include <linux/types.h>
72 #include <linux/kernel.h>
74 #include <linux/bootmem.h>
75 #include <linux/string.h>
76 #include <linux/socket.h>
77 #include <linux/sockios.h>
78 #include <linux/errno.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/proc_fs.h>
83 #include <linux/init.h>
84 #include <linux/workqueue.h>
85 #include <linux/skbuff.h>
86 #include <linux/inetdevice.h>
87 #include <linux/igmp.h>
88 #include <linux/pkt_sched.h>
89 #include <linux/mroute.h>
90 #include <linux/netfilter_ipv4.h>
91 #include <linux/random.h>
92 #include <linux/jhash.h>
93 #include <linux/rcupdate.h>
94 #include <linux/times.h>
96 #include <net/net_namespace.h>
97 #include <net/protocol.h>
99 #include <net/route.h>
100 #include <net/inetpeer.h>
101 #include <net/sock.h>
102 #include <net/ip_fib.h>
105 #include <net/icmp.h>
106 #include <net/xfrm.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
113 #define RT_FL_TOS(oldflp) \
114 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
116 #define IP_MAX_MTU 0xFFF0
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
122 static int ip_rt_gc_interval = 60 * HZ;
123 static int ip_rt_gc_min_interval = HZ / 2;
124 static int ip_rt_redirect_number = 9;
125 static int ip_rt_redirect_load = HZ / 50;
126 static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost = HZ;
128 static int ip_rt_error_burst = 5 * HZ;
129 static int ip_rt_gc_elasticity = 8;
130 static int ip_rt_mtu_expires = 10 * 60 * HZ;
131 static int ip_rt_min_pmtu = 512 + 20 + 20;
132 static int ip_rt_min_advmss = 256;
133 static int ip_rt_secret_interval = 10 * 60 * HZ;
135 #define RTprint(a...) printk(KERN_DEBUG a)
137 static void rt_worker_func(struct work_struct *work);
138 static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
139 static struct timer_list rt_secret_timer;
142 * Interface to generic destination cache.
145 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
146 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static void ipv4_dst_ifdown(struct dst_entry *dst,
148 struct net_device *dev, int how);
149 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
150 static void ipv4_link_failure(struct sk_buff *skb);
151 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
152 static int rt_garbage_collect(struct dst_ops *ops);
155 static struct dst_ops ipv4_dst_ops = {
157 .protocol = __constant_htons(ETH_P_IP),
158 .gc = rt_garbage_collect,
159 .check = ipv4_dst_check,
160 .destroy = ipv4_dst_destroy,
161 .ifdown = ipv4_dst_ifdown,
162 .negative_advice = ipv4_negative_advice,
163 .link_failure = ipv4_link_failure,
164 .update_pmtu = ip_rt_update_pmtu,
165 .local_out = ip_local_out,
166 .entry_size = sizeof(struct rtable),
167 .entries = ATOMIC_INIT(0),
170 #define ECN_OR_COST(class) TC_PRIO_##class
172 const __u8 ip_tos2prio[16] = {
176 ECN_OR_COST(BESTEFFORT),
182 ECN_OR_COST(INTERACTIVE),
184 ECN_OR_COST(INTERACTIVE),
185 TC_PRIO_INTERACTIVE_BULK,
186 ECN_OR_COST(INTERACTIVE_BULK),
187 TC_PRIO_INTERACTIVE_BULK,
188 ECN_OR_COST(INTERACTIVE_BULK)
196 /* The locking scheme is rather straight forward:
198 * 1) Read-Copy Update protects the buckets of the central route hash.
199 * 2) Only writers remove entries, and they hold the lock
200 * as they look at rtable reference counts.
201 * 3) Only readers acquire references to rtable entries,
202 * they do so with atomic increments and with the
206 struct rt_hash_bucket {
207 struct rtable *chain;
209 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
210 defined(CONFIG_PROVE_LOCKING)
212 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
213 * The size of this table is a power of two and depends on the number of CPUS.
214 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
216 #ifdef CONFIG_LOCKDEP
217 # define RT_HASH_LOCK_SZ 256
220 # define RT_HASH_LOCK_SZ 4096
222 # define RT_HASH_LOCK_SZ 2048
224 # define RT_HASH_LOCK_SZ 1024
226 # define RT_HASH_LOCK_SZ 512
228 # define RT_HASH_LOCK_SZ 256
232 static spinlock_t *rt_hash_locks;
233 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
235 static __init void rt_hash_lock_init(void)
239 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
242 panic("IP: failed to allocate rt_hash_locks\n");
244 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
245 spin_lock_init(&rt_hash_locks[i]);
248 # define rt_hash_lock_addr(slot) NULL
250 static inline void rt_hash_lock_init(void)
255 static struct rt_hash_bucket *rt_hash_table;
256 static unsigned rt_hash_mask;
257 static unsigned int rt_hash_log;
258 static atomic_t rt_genid;
260 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
261 #define RT_CACHE_STAT_INC(field) \
262 (__raw_get_cpu_var(rt_cache_stat).field++)
264 static unsigned int rt_hash_code(u32 daddr, u32 saddr)
266 return jhash_2words(daddr, saddr, atomic_read(&rt_genid))
270 #define rt_hash(daddr, saddr, idx) \
271 rt_hash_code((__force u32)(__be32)(daddr),\
272 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
274 #ifdef CONFIG_PROC_FS
275 struct rt_cache_iter_state {
280 static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
282 struct rtable *r = NULL;
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
286 r = rcu_dereference(rt_hash_table[st->bucket].chain);
288 if (r->rt_genid == st->genid)
290 r = rcu_dereference(r->u.dst.rt_next);
292 rcu_read_unlock_bh();
297 static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r)
299 r = r->u.dst.rt_next;
301 rcu_read_unlock_bh();
302 if (--st->bucket < 0)
305 r = rt_hash_table[st->bucket].chain;
307 return rcu_dereference(r);
310 static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos)
312 struct rtable *r = rt_cache_get_first(st);
315 while (pos && (r = rt_cache_get_next(st, r))) {
316 if (r->rt_genid != st->genid)
320 return pos ? NULL : r;
323 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
325 struct rt_cache_iter_state *st = seq->private;
328 return rt_cache_get_idx(st, *pos - 1);
329 st->genid = atomic_read(&rt_genid);
330 return SEQ_START_TOKEN;
333 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
336 struct rt_cache_iter_state *st = seq->private;
338 if (v == SEQ_START_TOKEN)
339 r = rt_cache_get_first(st);
341 r = rt_cache_get_next(st, v);
346 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
348 if (v && v != SEQ_START_TOKEN)
349 rcu_read_unlock_bh();
352 static int rt_cache_seq_show(struct seq_file *seq, void *v)
354 if (v == SEQ_START_TOKEN)
355 seq_printf(seq, "%-127s\n",
356 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
357 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
360 struct rtable *r = v;
363 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
364 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
365 r->u.dst.dev ? r->u.dst.dev->name : "*",
366 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
367 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
368 r->u.dst.__use, 0, (unsigned long)r->rt_src,
369 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
370 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
371 dst_metric(&r->u.dst, RTAX_WINDOW),
372 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
373 dst_metric(&r->u.dst, RTAX_RTTVAR)),
375 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
376 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
379 seq_printf(seq, "%-127s\n", temp);
384 static const struct seq_operations rt_cache_seq_ops = {
385 .start = rt_cache_seq_start,
386 .next = rt_cache_seq_next,
387 .stop = rt_cache_seq_stop,
388 .show = rt_cache_seq_show,
391 static int rt_cache_seq_open(struct inode *inode, struct file *file)
393 return seq_open_private(file, &rt_cache_seq_ops,
394 sizeof(struct rt_cache_iter_state));
397 static const struct file_operations rt_cache_seq_fops = {
398 .owner = THIS_MODULE,
399 .open = rt_cache_seq_open,
402 .release = seq_release_private,
406 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
411 return SEQ_START_TOKEN;
413 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
414 if (!cpu_possible(cpu))
417 return &per_cpu(rt_cache_stat, cpu);
422 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
426 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
427 if (!cpu_possible(cpu))
430 return &per_cpu(rt_cache_stat, cpu);
436 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
441 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
443 struct rt_cache_stat *st = v;
445 if (v == SEQ_START_TOKEN) {
446 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
450 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
451 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
452 atomic_read(&ipv4_dst_ops.entries),
475 static const struct seq_operations rt_cpu_seq_ops = {
476 .start = rt_cpu_seq_start,
477 .next = rt_cpu_seq_next,
478 .stop = rt_cpu_seq_stop,
479 .show = rt_cpu_seq_show,
483 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
485 return seq_open(file, &rt_cpu_seq_ops);
488 static const struct file_operations rt_cpu_seq_fops = {
489 .owner = THIS_MODULE,
490 .open = rt_cpu_seq_open,
493 .release = seq_release,
496 #ifdef CONFIG_NET_CLS_ROUTE
497 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
498 int length, int *eof, void *data)
502 if ((offset & 3) || (length & 3))
505 if (offset >= sizeof(struct ip_rt_acct) * 256) {
510 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
511 length = sizeof(struct ip_rt_acct) * 256 - offset;
515 offset /= sizeof(u32);
518 u32 *dst = (u32 *) buffer;
521 memset(dst, 0, length);
523 for_each_possible_cpu(i) {
527 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
528 for (j = 0; j < length/4; j++)
536 static __init int ip_rt_proc_init(struct net *net)
538 struct proc_dir_entry *pde;
540 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
545 pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat);
549 pde->proc_fops = &rt_cpu_seq_fops;
551 #ifdef CONFIG_NET_CLS_ROUTE
552 pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
553 ip_rt_acct_read, NULL);
559 #ifdef CONFIG_NET_CLS_ROUTE
561 remove_proc_entry("rt_cache", net->proc_net_stat);
564 remove_proc_entry("rt_cache", net->proc_net);
569 static inline int ip_rt_proc_init(struct net *net)
573 #endif /* CONFIG_PROC_FS */
575 static __inline__ void rt_free(struct rtable *rt)
577 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
580 static __inline__ void rt_drop(struct rtable *rt)
583 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
586 static __inline__ int rt_fast_clean(struct rtable *rth)
588 /* Kill broadcast/multicast entries very aggresively, if they
589 collide in hash table with more useful entries */
590 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
591 rth->fl.iif && rth->u.dst.rt_next;
594 static __inline__ int rt_valuable(struct rtable *rth)
596 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
600 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
605 if (atomic_read(&rth->u.dst.__refcnt))
609 if (rth->u.dst.expires &&
610 time_after_eq(jiffies, rth->u.dst.expires))
613 age = jiffies - rth->u.dst.lastuse;
615 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
616 (age <= tmo2 && rt_valuable(rth)))
622 /* Bits of score are:
624 * 30: not quite useless
625 * 29..0: usage counter
627 static inline u32 rt_score(struct rtable *rt)
629 u32 score = jiffies - rt->u.dst.lastuse;
631 score = ~score & ~(3<<30);
637 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
643 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
645 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
646 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
647 (fl1->mark ^ fl2->mark) |
648 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
649 *(u16 *)&fl2->nl_u.ip4_u.tos) |
650 (fl1->oif ^ fl2->oif) |
651 (fl1->iif ^ fl2->iif)) == 0;
654 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
656 return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net;
660 * Perform a full scan of hash table and free all entries.
661 * Can be called by a softirq or a process.
662 * In the later case, we want to be reschedule if necessary
664 static void rt_do_flush(int process_context)
667 struct rtable *rth, *next;
669 for (i = 0; i <= rt_hash_mask; i++) {
670 if (process_context && need_resched())
672 rth = rt_hash_table[i].chain;
676 spin_lock_bh(rt_hash_lock_addr(i));
677 rth = rt_hash_table[i].chain;
678 rt_hash_table[i].chain = NULL;
679 spin_unlock_bh(rt_hash_lock_addr(i));
681 for (; rth; rth = next) {
682 next = rth->u.dst.rt_next;
688 static void rt_check_expire(void)
690 static unsigned int rover;
691 unsigned int i = rover, goal;
692 struct rtable *rth, **rthp;
695 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
696 if (ip_rt_gc_timeout > 1)
697 do_div(mult, ip_rt_gc_timeout);
698 goal = (unsigned int)mult;
699 if (goal > rt_hash_mask)
700 goal = rt_hash_mask + 1;
701 for (; goal > 0; goal--) {
702 unsigned long tmo = ip_rt_gc_timeout;
704 i = (i + 1) & rt_hash_mask;
705 rthp = &rt_hash_table[i].chain;
712 spin_lock_bh(rt_hash_lock_addr(i));
713 while ((rth = *rthp) != NULL) {
714 if (rth->rt_genid != atomic_read(&rt_genid)) {
715 *rthp = rth->u.dst.rt_next;
719 if (rth->u.dst.expires) {
720 /* Entry is expired even if it is in use */
721 if (time_before_eq(jiffies, rth->u.dst.expires)) {
723 rthp = &rth->u.dst.rt_next;
726 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
728 rthp = &rth->u.dst.rt_next;
732 /* Cleanup aged off entries. */
733 *rthp = rth->u.dst.rt_next;
736 spin_unlock_bh(rt_hash_lock_addr(i));
742 * rt_worker_func() is run in process context.
743 * we call rt_check_expire() to scan part of the hash table
745 static void rt_worker_func(struct work_struct *work)
748 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
752 * Pertubation of rt_genid by a small quantity [1..256]
753 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
754 * many times (2^24) without giving recent rt_genid.
755 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
757 static void rt_cache_invalidate(void)
759 unsigned char shuffle;
761 get_random_bytes(&shuffle, sizeof(shuffle));
762 atomic_add(shuffle + 1U, &rt_genid);
766 * delay < 0 : invalidate cache (fast : entries will be deleted later)
767 * delay >= 0 : invalidate & flush cache (can be long)
769 void rt_cache_flush(int delay)
771 rt_cache_invalidate();
773 rt_do_flush(!in_softirq());
777 * We change rt_genid and let gc do the cleanup
779 static void rt_secret_rebuild(unsigned long dummy)
781 rt_cache_invalidate();
782 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval);
786 Short description of GC goals.
788 We want to build algorithm, which will keep routing cache
789 at some equilibrium point, when number of aged off entries
790 is kept approximately equal to newly generated ones.
792 Current expiration strength is variable "expire".
793 We try to adjust it dynamically, so that if networking
794 is idle expires is large enough to keep enough of warm entries,
795 and when load increases it reduces to limit cache size.
798 static int rt_garbage_collect(struct dst_ops *ops)
800 static unsigned long expire = RT_GC_TIMEOUT;
801 static unsigned long last_gc;
803 static int equilibrium;
804 struct rtable *rth, **rthp;
805 unsigned long now = jiffies;
809 * Garbage collection is pretty expensive,
810 * do not make it too frequently.
813 RT_CACHE_STAT_INC(gc_total);
815 if (now - last_gc < ip_rt_gc_min_interval &&
816 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
817 RT_CACHE_STAT_INC(gc_ignored);
821 /* Calculate number of entries, which we want to expire now. */
822 goal = atomic_read(&ipv4_dst_ops.entries) -
823 (ip_rt_gc_elasticity << rt_hash_log);
825 if (equilibrium < ipv4_dst_ops.gc_thresh)
826 equilibrium = ipv4_dst_ops.gc_thresh;
827 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
829 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
830 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
833 /* We are in dangerous area. Try to reduce cache really
836 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
837 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
840 if (now - last_gc >= ip_rt_gc_min_interval)
851 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
852 unsigned long tmo = expire;
854 k = (k + 1) & rt_hash_mask;
855 rthp = &rt_hash_table[k].chain;
856 spin_lock_bh(rt_hash_lock_addr(k));
857 while ((rth = *rthp) != NULL) {
858 if (rth->rt_genid == atomic_read(&rt_genid) &&
859 !rt_may_expire(rth, tmo, expire)) {
861 rthp = &rth->u.dst.rt_next;
864 *rthp = rth->u.dst.rt_next;
868 spin_unlock_bh(rt_hash_lock_addr(k));
877 /* Goal is not achieved. We stop process if:
879 - if expire reduced to zero. Otherwise, expire is halfed.
880 - if table is not full.
881 - if we are called from interrupt.
882 - jiffies check is just fallback/debug loop breaker.
883 We will not spin here for long time in any case.
886 RT_CACHE_STAT_INC(gc_goal_miss);
892 #if RT_CACHE_DEBUG >= 2
893 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
894 atomic_read(&ipv4_dst_ops.entries), goal, i);
897 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
899 } while (!in_softirq() && time_before_eq(jiffies, now));
901 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
904 printk(KERN_WARNING "dst cache overflow\n");
905 RT_CACHE_STAT_INC(gc_dst_overflow);
909 expire += ip_rt_gc_min_interval;
910 if (expire > ip_rt_gc_timeout ||
911 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
912 expire = ip_rt_gc_timeout;
913 #if RT_CACHE_DEBUG >= 2
914 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
915 atomic_read(&ipv4_dst_ops.entries), goal, rover);
920 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
922 struct rtable *rth, **rthp;
924 struct rtable *cand, **candp;
927 int attempts = !in_softirq();
936 rthp = &rt_hash_table[hash].chain;
938 spin_lock_bh(rt_hash_lock_addr(hash));
939 while ((rth = *rthp) != NULL) {
940 if (rth->rt_genid != atomic_read(&rt_genid)) {
941 *rthp = rth->u.dst.rt_next;
945 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
947 *rthp = rth->u.dst.rt_next;
949 * Since lookup is lockfree, the deletion
950 * must be visible to another weakly ordered CPU before
951 * the insertion at the start of the hash chain.
953 rcu_assign_pointer(rth->u.dst.rt_next,
954 rt_hash_table[hash].chain);
956 * Since lookup is lockfree, the update writes
957 * must be ordered for consistency on SMP.
959 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
961 dst_use(&rth->u.dst, now);
962 spin_unlock_bh(rt_hash_lock_addr(hash));
969 if (!atomic_read(&rth->u.dst.__refcnt)) {
970 u32 score = rt_score(rth);
972 if (score <= min_score) {
981 rthp = &rth->u.dst.rt_next;
985 /* ip_rt_gc_elasticity used to be average length of chain
986 * length, when exceeded gc becomes really aggressive.
988 * The second limit is less certain. At the moment it allows
989 * only 2 entries per bucket. We will see.
991 if (chain_length > ip_rt_gc_elasticity) {
992 *candp = cand->u.dst.rt_next;
997 /* Try to bind route to arp only if it is output
998 route or unicast forwarding path.
1000 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1001 int err = arp_bind_neighbour(&rt->u.dst);
1003 spin_unlock_bh(rt_hash_lock_addr(hash));
1005 if (err != -ENOBUFS) {
1010 /* Neighbour tables are full and nothing
1011 can be released. Try to shrink route cache,
1012 it is most likely it holds some neighbour records.
1014 if (attempts-- > 0) {
1015 int saved_elasticity = ip_rt_gc_elasticity;
1016 int saved_int = ip_rt_gc_min_interval;
1017 ip_rt_gc_elasticity = 1;
1018 ip_rt_gc_min_interval = 0;
1019 rt_garbage_collect(&ipv4_dst_ops);
1020 ip_rt_gc_min_interval = saved_int;
1021 ip_rt_gc_elasticity = saved_elasticity;
1025 if (net_ratelimit())
1026 printk(KERN_WARNING "Neighbour table overflow.\n");
1032 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1033 #if RT_CACHE_DEBUG >= 2
1034 if (rt->u.dst.rt_next) {
1036 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1037 NIPQUAD(rt->rt_dst));
1038 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1039 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1043 rt_hash_table[hash].chain = rt;
1044 spin_unlock_bh(rt_hash_lock_addr(hash));
1049 void rt_bind_peer(struct rtable *rt, int create)
1051 static DEFINE_SPINLOCK(rt_peer_lock);
1052 struct inet_peer *peer;
1054 peer = inet_getpeer(rt->rt_dst, create);
1056 spin_lock_bh(&rt_peer_lock);
1057 if (rt->peer == NULL) {
1061 spin_unlock_bh(&rt_peer_lock);
1067 * Peer allocation may fail only in serious out-of-memory conditions. However
1068 * we still can generate some output.
1069 * Random ID selection looks a bit dangerous because we have no chances to
1070 * select ID being unique in a reasonable period of time.
1071 * But broken packet identifier may be better than no packet at all.
1073 static void ip_select_fb_ident(struct iphdr *iph)
1075 static DEFINE_SPINLOCK(ip_fb_id_lock);
1076 static u32 ip_fallback_id;
1079 spin_lock_bh(&ip_fb_id_lock);
1080 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1081 iph->id = htons(salt & 0xFFFF);
1082 ip_fallback_id = salt;
1083 spin_unlock_bh(&ip_fb_id_lock);
1086 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1088 struct rtable *rt = (struct rtable *) dst;
1091 if (rt->peer == NULL)
1092 rt_bind_peer(rt, 1);
1094 /* If peer is attached to destination, it is never detached,
1095 so that we need not to grab a lock to dereference it.
1098 iph->id = htons(inet_getid(rt->peer, more));
1102 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1103 __builtin_return_address(0));
1105 ip_select_fb_ident(iph);
1108 static void rt_del(unsigned hash, struct rtable *rt)
1110 struct rtable **rthp, *aux;
1112 rthp = &rt_hash_table[hash].chain;
1113 spin_lock_bh(rt_hash_lock_addr(hash));
1115 while ((aux = *rthp) != NULL) {
1116 if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) {
1117 *rthp = aux->u.dst.rt_next;
1121 rthp = &aux->u.dst.rt_next;
1123 spin_unlock_bh(rt_hash_lock_addr(hash));
1126 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1127 __be32 saddr, struct net_device *dev)
1130 struct in_device *in_dev = in_dev_get(dev);
1131 struct rtable *rth, **rthp;
1132 __be32 skeys[2] = { saddr, 0 };
1133 int ikeys[2] = { dev->ifindex, 0 };
1134 struct netevent_redirect netevent;
1139 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1140 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1141 || ipv4_is_zeronet(new_gw))
1142 goto reject_redirect;
1144 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1145 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1146 goto reject_redirect;
1147 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1148 goto reject_redirect;
1150 if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST)
1151 goto reject_redirect;
1154 for (i = 0; i < 2; i++) {
1155 for (k = 0; k < 2; k++) {
1156 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
1158 rthp=&rt_hash_table[hash].chain;
1161 while ((rth = rcu_dereference(*rthp)) != NULL) {
1164 if (rth->fl.fl4_dst != daddr ||
1165 rth->fl.fl4_src != skeys[i] ||
1166 rth->fl.oif != ikeys[k] ||
1168 rth->rt_genid != atomic_read(&rt_genid)) {
1169 rthp = &rth->u.dst.rt_next;
1173 if (rth->rt_dst != daddr ||
1174 rth->rt_src != saddr ||
1176 rth->rt_gateway != old_gw ||
1177 rth->u.dst.dev != dev)
1180 dst_hold(&rth->u.dst);
1183 rt = dst_alloc(&ipv4_dst_ops);
1190 /* Copy all the information. */
1192 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1193 rt->u.dst.__use = 1;
1194 atomic_set(&rt->u.dst.__refcnt, 1);
1195 rt->u.dst.child = NULL;
1197 dev_hold(rt->u.dst.dev);
1199 in_dev_hold(rt->idev);
1200 rt->u.dst.obsolete = 0;
1201 rt->u.dst.lastuse = jiffies;
1202 rt->u.dst.path = &rt->u.dst;
1203 rt->u.dst.neighbour = NULL;
1204 rt->u.dst.hh = NULL;
1205 rt->u.dst.xfrm = NULL;
1206 rt->rt_genid = atomic_read(&rt_genid);
1207 rt->rt_flags |= RTCF_REDIRECTED;
1209 /* Gateway is different ... */
1210 rt->rt_gateway = new_gw;
1212 /* Redirect received -> path was valid */
1213 dst_confirm(&rth->u.dst);
1216 atomic_inc(&rt->peer->refcnt);
1218 if (arp_bind_neighbour(&rt->u.dst) ||
1219 !(rt->u.dst.neighbour->nud_state &
1221 if (rt->u.dst.neighbour)
1222 neigh_event_send(rt->u.dst.neighbour, NULL);
1228 netevent.old = &rth->u.dst;
1229 netevent.new = &rt->u.dst;
1230 call_netevent_notifiers(NETEVENT_REDIRECT,
1234 if (!rt_intern_hash(hash, rt, &rt))
1247 #ifdef CONFIG_IP_ROUTE_VERBOSE
1248 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1249 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1250 "%u.%u.%u.%u ignored.\n"
1251 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1252 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1253 NIPQUAD(saddr), NIPQUAD(daddr));
1258 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1260 struct rtable *rt = (struct rtable*)dst;
1261 struct dst_entry *ret = dst;
1264 if (dst->obsolete) {
1267 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1268 rt->u.dst.expires) {
1269 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1271 #if RT_CACHE_DEBUG >= 1
1272 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1273 "%u.%u.%u.%u/%02x dropped\n",
1274 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1285 * 1. The first ip_rt_redirect_number redirects are sent
1286 * with exponential backoff, then we stop sending them at all,
1287 * assuming that the host ignores our redirects.
1288 * 2. If we did not see packets requiring redirects
1289 * during ip_rt_redirect_silence, we assume that the host
1290 * forgot redirected route and start to send redirects again.
1292 * This algorithm is much cheaper and more intelligent than dumb load limiting
1295 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1296 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1299 void ip_rt_send_redirect(struct sk_buff *skb)
1301 struct rtable *rt = (struct rtable*)skb->dst;
1302 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1307 if (!IN_DEV_TX_REDIRECTS(in_dev))
1310 /* No redirected packets during ip_rt_redirect_silence;
1311 * reset the algorithm.
1313 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1314 rt->u.dst.rate_tokens = 0;
1316 /* Too many ignored redirects; do not send anything
1317 * set u.dst.rate_last to the last seen redirected packet.
1319 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1320 rt->u.dst.rate_last = jiffies;
1324 /* Check for load limit; set rate_last to the latest sent
1327 if (rt->u.dst.rate_tokens == 0 ||
1329 (rt->u.dst.rate_last +
1330 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1331 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1332 rt->u.dst.rate_last = jiffies;
1333 ++rt->u.dst.rate_tokens;
1334 #ifdef CONFIG_IP_ROUTE_VERBOSE
1335 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1336 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1338 printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
1339 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1340 NIPQUAD(rt->rt_src), rt->rt_iif,
1341 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1348 static int ip_error(struct sk_buff *skb)
1350 struct rtable *rt = (struct rtable*)skb->dst;
1354 switch (rt->u.dst.error) {
1359 code = ICMP_HOST_UNREACH;
1362 code = ICMP_NET_UNREACH;
1363 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
1366 code = ICMP_PKT_FILTERED;
1371 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1372 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1373 rt->u.dst.rate_tokens = ip_rt_error_burst;
1374 rt->u.dst.rate_last = now;
1375 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1376 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1377 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1380 out: kfree_skb(skb);
1385 * The last two values are not from the RFC but
1386 * are needed for AMPRnet AX.25 paths.
1389 static const unsigned short mtu_plateau[] =
1390 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1392 static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1397 if (old_mtu > mtu_plateau[i])
1398 return mtu_plateau[i];
1402 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1403 unsigned short new_mtu)
1406 unsigned short old_mtu = ntohs(iph->tot_len);
1408 __be32 skeys[2] = { iph->saddr, 0, };
1409 __be32 daddr = iph->daddr;
1410 unsigned short est_mtu = 0;
1412 if (ipv4_config.no_pmtu_disc)
1415 for (i = 0; i < 2; i++) {
1416 unsigned hash = rt_hash(daddr, skeys[i], 0);
1419 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1420 rth = rcu_dereference(rth->u.dst.rt_next)) {
1421 if (rth->fl.fl4_dst == daddr &&
1422 rth->fl.fl4_src == skeys[i] &&
1423 rth->rt_dst == daddr &&
1424 rth->rt_src == iph->saddr &&
1426 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
1427 rth->u.dst.dev->nd_net == net &&
1428 rth->rt_genid == atomic_read(&rt_genid)) {
1429 unsigned short mtu = new_mtu;
1431 if (new_mtu < 68 || new_mtu >= old_mtu) {
1433 /* BSD 4.2 compatibility hack :-( */
1435 old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
1436 old_mtu >= 68 + (iph->ihl << 2))
1437 old_mtu -= iph->ihl << 2;
1439 mtu = guess_mtu(old_mtu);
1441 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1442 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1443 dst_confirm(&rth->u.dst);
1444 if (mtu < ip_rt_min_pmtu) {
1445 mtu = ip_rt_min_pmtu;
1446 rth->u.dst.metrics[RTAX_LOCK-1] |=
1449 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1450 dst_set_expires(&rth->u.dst,
1459 return est_mtu ? : new_mtu;
1462 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1464 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
1465 !(dst_metric_locked(dst, RTAX_MTU))) {
1466 if (mtu < ip_rt_min_pmtu) {
1467 mtu = ip_rt_min_pmtu;
1468 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1470 dst->metrics[RTAX_MTU-1] = mtu;
1471 dst_set_expires(dst, ip_rt_mtu_expires);
1472 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1476 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1481 static void ipv4_dst_destroy(struct dst_entry *dst)
1483 struct rtable *rt = (struct rtable *) dst;
1484 struct inet_peer *peer = rt->peer;
1485 struct in_device *idev = rt->idev;
1498 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1501 struct rtable *rt = (struct rtable *) dst;
1502 struct in_device *idev = rt->idev;
1503 if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) {
1504 struct in_device *loopback_idev =
1505 in_dev_get(dev->nd_net->loopback_dev);
1506 if (loopback_idev) {
1507 rt->idev = loopback_idev;
1513 static void ipv4_link_failure(struct sk_buff *skb)
1517 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1519 rt = (struct rtable *) skb->dst;
1521 dst_set_expires(&rt->u.dst, 0);
1524 static int ip_rt_bug(struct sk_buff *skb)
1526 printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1527 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1528 skb->dev ? skb->dev->name : "?");
1534 We do not cache source address of outgoing interface,
1535 because it is used only by IP RR, TS and SRR options,
1536 so that it out of fast path.
1538 BTW remember: "addr" is allowed to be not aligned
1542 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1545 struct fib_result res;
1547 if (rt->fl.iif == 0)
1549 else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) {
1550 src = FIB_RES_PREFSRC(res);
1553 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1555 memcpy(addr, &src, 4);
1558 #ifdef CONFIG_NET_CLS_ROUTE
1559 static void set_class_tag(struct rtable *rt, u32 tag)
1561 if (!(rt->u.dst.tclassid & 0xFFFF))
1562 rt->u.dst.tclassid |= tag & 0xFFFF;
1563 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1564 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1568 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1570 struct fib_info *fi = res->fi;
1573 if (FIB_RES_GW(*res) &&
1574 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1575 rt->rt_gateway = FIB_RES_GW(*res);
1576 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1577 sizeof(rt->u.dst.metrics));
1578 if (fi->fib_mtu == 0) {
1579 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1580 if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
1581 rt->rt_gateway != rt->rt_dst &&
1582 rt->u.dst.dev->mtu > 576)
1583 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1585 #ifdef CONFIG_NET_CLS_ROUTE
1586 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1589 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1591 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1592 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1593 if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
1594 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1595 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
1596 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1598 if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
1599 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1601 #ifdef CONFIG_NET_CLS_ROUTE
1602 #ifdef CONFIG_IP_MULTIPLE_TABLES
1603 set_class_tag(rt, fib_rules_tclass(res));
1605 set_class_tag(rt, itag);
1607 rt->rt_type = res->type;
1610 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1611 u8 tos, struct net_device *dev, int our)
1616 struct in_device *in_dev = in_dev_get(dev);
1619 /* Primary sanity checks. */
1624 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1625 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1628 if (ipv4_is_zeronet(saddr)) {
1629 if (!ipv4_is_local_multicast(daddr))
1631 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1632 } else if (fib_validate_source(saddr, 0, tos, 0,
1633 dev, &spec_dst, &itag) < 0)
1636 rth = dst_alloc(&ipv4_dst_ops);
1640 rth->u.dst.output= ip_rt_bug;
1642 atomic_set(&rth->u.dst.__refcnt, 1);
1643 rth->u.dst.flags= DST_HOST;
1644 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1645 rth->u.dst.flags |= DST_NOPOLICY;
1646 rth->fl.fl4_dst = daddr;
1647 rth->rt_dst = daddr;
1648 rth->fl.fl4_tos = tos;
1649 rth->fl.mark = skb->mark;
1650 rth->fl.fl4_src = saddr;
1651 rth->rt_src = saddr;
1652 #ifdef CONFIG_NET_CLS_ROUTE
1653 rth->u.dst.tclassid = itag;
1656 rth->fl.iif = dev->ifindex;
1657 rth->u.dst.dev = init_net.loopback_dev;
1658 dev_hold(rth->u.dst.dev);
1659 rth->idev = in_dev_get(rth->u.dst.dev);
1661 rth->rt_gateway = daddr;
1662 rth->rt_spec_dst= spec_dst;
1663 rth->rt_genid = atomic_read(&rt_genid);
1664 rth->rt_flags = RTCF_MULTICAST;
1665 rth->rt_type = RTN_MULTICAST;
1667 rth->u.dst.input= ip_local_deliver;
1668 rth->rt_flags |= RTCF_LOCAL;
1671 #ifdef CONFIG_IP_MROUTE
1672 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1673 rth->u.dst.input = ip_mr_input;
1675 RT_CACHE_STAT_INC(in_slow_mc);
1678 hash = rt_hash(daddr, saddr, dev->ifindex);
1679 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1691 static void ip_handle_martian_source(struct net_device *dev,
1692 struct in_device *in_dev,
1693 struct sk_buff *skb,
1697 RT_CACHE_STAT_INC(in_martian_src);
1698 #ifdef CONFIG_IP_ROUTE_VERBOSE
1699 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1701 * RFC1812 recommendation, if source is martian,
1702 * the only hint is MAC header.
1704 printk(KERN_WARNING "martian source %u.%u.%u.%u from "
1705 "%u.%u.%u.%u, on dev %s\n",
1706 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1707 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1709 const unsigned char *p = skb_mac_header(skb);
1710 printk(KERN_WARNING "ll header: ");
1711 for (i = 0; i < dev->hard_header_len; i++, p++) {
1713 if (i < (dev->hard_header_len - 1))
1722 static inline int __mkroute_input(struct sk_buff *skb,
1723 struct fib_result* res,
1724 struct in_device *in_dev,
1725 __be32 daddr, __be32 saddr, u32 tos,
1726 struct rtable **result)
1731 struct in_device *out_dev;
1736 /* get a working reference to the output device */
1737 out_dev = in_dev_get(FIB_RES_DEV(*res));
1738 if (out_dev == NULL) {
1739 if (net_ratelimit())
1740 printk(KERN_CRIT "Bug in ip_route_input" \
1741 "_slow(). Please, report\n");
1746 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1747 in_dev->dev, &spec_dst, &itag);
1749 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1757 flags |= RTCF_DIRECTSRC;
1759 if (out_dev == in_dev && err && !(flags & RTCF_MASQ) &&
1760 (IN_DEV_SHARED_MEDIA(out_dev) ||
1761 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1762 flags |= RTCF_DOREDIRECT;
1764 if (skb->protocol != htons(ETH_P_IP)) {
1765 /* Not IP (i.e. ARP). Do not create route, if it is
1766 * invalid for proxy arp. DNAT routes are always valid.
1768 if (out_dev == in_dev) {
1775 rth = dst_alloc(&ipv4_dst_ops);
1781 atomic_set(&rth->u.dst.__refcnt, 1);
1782 rth->u.dst.flags= DST_HOST;
1783 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1784 rth->u.dst.flags |= DST_NOPOLICY;
1785 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1786 rth->u.dst.flags |= DST_NOXFRM;
1787 rth->fl.fl4_dst = daddr;
1788 rth->rt_dst = daddr;
1789 rth->fl.fl4_tos = tos;
1790 rth->fl.mark = skb->mark;
1791 rth->fl.fl4_src = saddr;
1792 rth->rt_src = saddr;
1793 rth->rt_gateway = daddr;
1795 rth->fl.iif = in_dev->dev->ifindex;
1796 rth->u.dst.dev = (out_dev)->dev;
1797 dev_hold(rth->u.dst.dev);
1798 rth->idev = in_dev_get(rth->u.dst.dev);
1800 rth->rt_spec_dst= spec_dst;
1802 rth->u.dst.input = ip_forward;
1803 rth->u.dst.output = ip_output;
1804 rth->rt_genid = atomic_read(&rt_genid);
1806 rt_set_nexthop(rth, res, itag);
1808 rth->rt_flags = flags;
1813 /* release the working reference to the output device */
1814 in_dev_put(out_dev);
1818 static inline int ip_mkroute_input(struct sk_buff *skb,
1819 struct fib_result* res,
1820 const struct flowi *fl,
1821 struct in_device *in_dev,
1822 __be32 daddr, __be32 saddr, u32 tos)
1824 struct rtable* rth = NULL;
1828 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1829 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1830 fib_select_multipath(fl, res);
1833 /* create a routing cache entry */
1834 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1838 /* put it into the cache */
1839 hash = rt_hash(daddr, saddr, fl->iif);
1840 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1844 * NOTE. We drop all the packets that has local source
1845 * addresses, because every properly looped back packet
1846 * must have correct destination already attached by output routine.
1848 * Such approach solves two big problems:
1849 * 1. Not simplex devices are handled properly.
1850 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1853 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1854 u8 tos, struct net_device *dev)
1856 struct fib_result res;
1857 struct in_device *in_dev = in_dev_get(dev);
1858 struct flowi fl = { .nl_u = { .ip4_u =
1862 .scope = RT_SCOPE_UNIVERSE,
1865 .iif = dev->ifindex };
1868 struct rtable * rth;
1873 struct net * net = dev->nd_net;
1875 /* IP on this device is disabled. */
1880 /* Check for the most weird martians, which can be not detected
1884 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1885 ipv4_is_loopback(saddr))
1886 goto martian_source;
1888 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1891 /* Accept zero addresses only to limited broadcast;
1892 * I even do not know to fix it or not. Waiting for complains :-)
1894 if (ipv4_is_zeronet(saddr))
1895 goto martian_source;
1897 if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
1898 ipv4_is_loopback(daddr))
1899 goto martian_destination;
1902 * Now we are ready to route packet.
1904 if ((err = fib_lookup(net, &fl, &res)) != 0) {
1905 if (!IN_DEV_FORWARD(in_dev))
1911 RT_CACHE_STAT_INC(in_slow_tot);
1913 if (res.type == RTN_BROADCAST)
1916 if (res.type == RTN_LOCAL) {
1918 result = fib_validate_source(saddr, daddr, tos,
1919 net->loopback_dev->ifindex,
1920 dev, &spec_dst, &itag);
1922 goto martian_source;
1924 flags |= RTCF_DIRECTSRC;
1929 if (!IN_DEV_FORWARD(in_dev))
1931 if (res.type != RTN_UNICAST)
1932 goto martian_destination;
1934 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1942 if (skb->protocol != htons(ETH_P_IP))
1945 if (ipv4_is_zeronet(saddr))
1946 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1948 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1951 goto martian_source;
1953 flags |= RTCF_DIRECTSRC;
1955 flags |= RTCF_BROADCAST;
1956 res.type = RTN_BROADCAST;
1957 RT_CACHE_STAT_INC(in_brd);
1960 rth = dst_alloc(&ipv4_dst_ops);
1964 rth->u.dst.output= ip_rt_bug;
1965 rth->rt_genid = atomic_read(&rt_genid);
1967 atomic_set(&rth->u.dst.__refcnt, 1);
1968 rth->u.dst.flags= DST_HOST;
1969 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1970 rth->u.dst.flags |= DST_NOPOLICY;
1971 rth->fl.fl4_dst = daddr;
1972 rth->rt_dst = daddr;
1973 rth->fl.fl4_tos = tos;
1974 rth->fl.mark = skb->mark;
1975 rth->fl.fl4_src = saddr;
1976 rth->rt_src = saddr;
1977 #ifdef CONFIG_NET_CLS_ROUTE
1978 rth->u.dst.tclassid = itag;
1981 rth->fl.iif = dev->ifindex;
1982 rth->u.dst.dev = net->loopback_dev;
1983 dev_hold(rth->u.dst.dev);
1984 rth->idev = in_dev_get(rth->u.dst.dev);
1985 rth->rt_gateway = daddr;
1986 rth->rt_spec_dst= spec_dst;
1987 rth->u.dst.input= ip_local_deliver;
1988 rth->rt_flags = flags|RTCF_LOCAL;
1989 if (res.type == RTN_UNREACHABLE) {
1990 rth->u.dst.input= ip_error;
1991 rth->u.dst.error= -err;
1992 rth->rt_flags &= ~RTCF_LOCAL;
1994 rth->rt_type = res.type;
1995 hash = rt_hash(daddr, saddr, fl.iif);
1996 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
2000 RT_CACHE_STAT_INC(in_no_route);
2001 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2002 res.type = RTN_UNREACHABLE;
2008 * Do not cache martian addresses: they should be logged (RFC1812)
2010 martian_destination:
2011 RT_CACHE_STAT_INC(in_martian_dst);
2012 #ifdef CONFIG_IP_ROUTE_VERBOSE
2013 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2014 printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
2015 "%u.%u.%u.%u, dev %s\n",
2016 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2020 err = -EHOSTUNREACH;
2032 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2036 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2037 u8 tos, struct net_device *dev)
2039 struct rtable * rth;
2041 int iif = dev->ifindex;
2044 net = skb->dev->nd_net;
2045 tos &= IPTOS_RT_MASK;
2046 hash = rt_hash(daddr, saddr, iif);
2049 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2050 rth = rcu_dereference(rth->u.dst.rt_next)) {
2051 if (rth->fl.fl4_dst == daddr &&
2052 rth->fl.fl4_src == saddr &&
2053 rth->fl.iif == iif &&
2055 rth->fl.mark == skb->mark &&
2056 rth->fl.fl4_tos == tos &&
2057 rth->u.dst.dev->nd_net == net &&
2058 rth->rt_genid == atomic_read(&rt_genid)) {
2059 dst_use(&rth->u.dst, jiffies);
2060 RT_CACHE_STAT_INC(in_hit);
2062 skb->dst = (struct dst_entry*)rth;
2065 RT_CACHE_STAT_INC(in_hlist_search);
2069 /* Multicast recognition logic is moved from route cache to here.
2070 The problem was that too many Ethernet cards have broken/missing
2071 hardware multicast filters :-( As result the host on multicasting
2072 network acquires a lot of useless route cache entries, sort of
2073 SDR messages from all the world. Now we try to get rid of them.
2074 Really, provided software IP multicast filter is organized
2075 reasonably (at least, hashed), it does not result in a slowdown
2076 comparing with route cache reject entries.
2077 Note, that multicast routers are not affected, because
2078 route cache entry is created eventually.
2080 if (ipv4_is_multicast(daddr)) {
2081 struct in_device *in_dev;
2084 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2085 int our = ip_check_mc(in_dev, daddr, saddr,
2086 ip_hdr(skb)->protocol);
2088 #ifdef CONFIG_IP_MROUTE
2089 || (!ipv4_is_local_multicast(daddr) &&
2090 IN_DEV_MFORWARD(in_dev))
2094 return ip_route_input_mc(skb, daddr, saddr,
2101 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2104 static inline int __mkroute_output(struct rtable **result,
2105 struct fib_result* res,
2106 const struct flowi *fl,
2107 const struct flowi *oldflp,
2108 struct net_device *dev_out,
2112 struct in_device *in_dev;
2113 u32 tos = RT_FL_TOS(oldflp);
2116 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2119 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2120 res->type = RTN_BROADCAST;
2121 else if (ipv4_is_multicast(fl->fl4_dst))
2122 res->type = RTN_MULTICAST;
2123 else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
2126 if (dev_out->flags & IFF_LOOPBACK)
2127 flags |= RTCF_LOCAL;
2129 /* get work reference to inet device */
2130 in_dev = in_dev_get(dev_out);
2134 if (res->type == RTN_BROADCAST) {
2135 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2137 fib_info_put(res->fi);
2140 } else if (res->type == RTN_MULTICAST) {
2141 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2142 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2144 flags &= ~RTCF_LOCAL;
2145 /* If multicast route do not exist use
2146 default one, but do not gateway in this case.
2149 if (res->fi && res->prefixlen < 4) {
2150 fib_info_put(res->fi);
2156 rth = dst_alloc(&ipv4_dst_ops);
2162 atomic_set(&rth->u.dst.__refcnt, 1);
2163 rth->u.dst.flags= DST_HOST;
2164 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2165 rth->u.dst.flags |= DST_NOXFRM;
2166 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2167 rth->u.dst.flags |= DST_NOPOLICY;
2169 rth->fl.fl4_dst = oldflp->fl4_dst;
2170 rth->fl.fl4_tos = tos;
2171 rth->fl.fl4_src = oldflp->fl4_src;
2172 rth->fl.oif = oldflp->oif;
2173 rth->fl.mark = oldflp->mark;
2174 rth->rt_dst = fl->fl4_dst;
2175 rth->rt_src = fl->fl4_src;
2176 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2177 /* get references to the devices that are to be hold by the routing
2179 rth->u.dst.dev = dev_out;
2181 rth->idev = in_dev_get(dev_out);
2182 rth->rt_gateway = fl->fl4_dst;
2183 rth->rt_spec_dst= fl->fl4_src;
2185 rth->u.dst.output=ip_output;
2186 rth->rt_genid = atomic_read(&rt_genid);
2188 RT_CACHE_STAT_INC(out_slow_tot);
2190 if (flags & RTCF_LOCAL) {
2191 rth->u.dst.input = ip_local_deliver;
2192 rth->rt_spec_dst = fl->fl4_dst;
2194 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2195 rth->rt_spec_dst = fl->fl4_src;
2196 if (flags & RTCF_LOCAL &&
2197 !(dev_out->flags & IFF_LOOPBACK)) {
2198 rth->u.dst.output = ip_mc_output;
2199 RT_CACHE_STAT_INC(out_slow_mc);
2201 #ifdef CONFIG_IP_MROUTE
2202 if (res->type == RTN_MULTICAST) {
2203 if (IN_DEV_MFORWARD(in_dev) &&
2204 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2205 rth->u.dst.input = ip_mr_input;
2206 rth->u.dst.output = ip_mc_output;
2212 rt_set_nexthop(rth, res, 0);
2214 rth->rt_flags = flags;
2218 /* release work reference to inet device */
2224 static inline int ip_mkroute_output(struct rtable **rp,
2225 struct fib_result* res,
2226 const struct flowi *fl,
2227 const struct flowi *oldflp,
2228 struct net_device *dev_out,
2231 struct rtable *rth = NULL;
2232 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2235 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2236 err = rt_intern_hash(hash, rth, rp);
2243 * Major route resolver routine.
2246 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2247 const struct flowi *oldflp)
2249 u32 tos = RT_FL_TOS(oldflp);
2250 struct flowi fl = { .nl_u = { .ip4_u =
2251 { .daddr = oldflp->fl4_dst,
2252 .saddr = oldflp->fl4_src,
2253 .tos = tos & IPTOS_RT_MASK,
2254 .scope = ((tos & RTO_ONLINK) ?
2258 .mark = oldflp->mark,
2259 .iif = net->loopback_dev->ifindex,
2260 .oif = oldflp->oif };
2261 struct fib_result res;
2263 struct net_device *dev_out = NULL;
2269 #ifdef CONFIG_IP_MULTIPLE_TABLES
2273 if (oldflp->fl4_src) {
2275 if (ipv4_is_multicast(oldflp->fl4_src) ||
2276 ipv4_is_lbcast(oldflp->fl4_src) ||
2277 ipv4_is_zeronet(oldflp->fl4_src))
2280 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2281 dev_out = ip_dev_find(net, oldflp->fl4_src);
2282 if (dev_out == NULL)
2285 /* I removed check for oif == dev_out->oif here.
2286 It was wrong for two reasons:
2287 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2288 is assigned to multiple interfaces.
2289 2. Moreover, we are allowed to send packets with saddr
2290 of another iface. --ANK
2293 if (oldflp->oif == 0
2294 && (ipv4_is_multicast(oldflp->fl4_dst) ||
2295 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2296 /* Special hack: user can direct multicasts
2297 and limited broadcast via necessary interface
2298 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2299 This hack is not just for fun, it allows
2300 vic,vat and friends to work.
2301 They bind socket to loopback, set ttl to zero
2302 and expect that it will work.
2303 From the viewpoint of routing cache they are broken,
2304 because we are not allowed to build multicast path
2305 with loopback source addr (look, routing cache
2306 cannot know, that ttl is zero, so that packet
2307 will not leave this host and route is valid).
2308 Luckily, this hack is good workaround.
2311 fl.oif = dev_out->ifindex;
2321 dev_out = dev_get_by_index(net, oldflp->oif);
2323 if (dev_out == NULL)
2326 /* RACE: Check return value of inet_select_addr instead. */
2327 if (__in_dev_get_rtnl(dev_out) == NULL) {
2329 goto out; /* Wrong error code */
2332 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2333 oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2335 fl.fl4_src = inet_select_addr(dev_out, 0,
2340 if (ipv4_is_multicast(oldflp->fl4_dst))
2341 fl.fl4_src = inet_select_addr(dev_out, 0,
2343 else if (!oldflp->fl4_dst)
2344 fl.fl4_src = inet_select_addr(dev_out, 0,
2350 fl.fl4_dst = fl.fl4_src;
2352 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2355 dev_out = net->loopback_dev;
2357 fl.oif = net->loopback_dev->ifindex;
2358 res.type = RTN_LOCAL;
2359 flags |= RTCF_LOCAL;
2363 if (fib_lookup(net, &fl, &res)) {
2366 /* Apparently, routing tables are wrong. Assume,
2367 that the destination is on link.
2370 Because we are allowed to send to iface
2371 even if it has NO routes and NO assigned
2372 addresses. When oif is specified, routing
2373 tables are looked up with only one purpose:
2374 to catch if destination is gatewayed, rather than
2375 direct. Moreover, if MSG_DONTROUTE is set,
2376 we send packet, ignoring both routing tables
2377 and ifaddr state. --ANK
2380 We could make it even if oif is unknown,
2381 likely IPv6, but we do not.
2384 if (fl.fl4_src == 0)
2385 fl.fl4_src = inet_select_addr(dev_out, 0,
2387 res.type = RTN_UNICAST;
2397 if (res.type == RTN_LOCAL) {
2399 fl.fl4_src = fl.fl4_dst;
2402 dev_out = net->loopback_dev;
2404 fl.oif = dev_out->ifindex;
2406 fib_info_put(res.fi);
2408 flags |= RTCF_LOCAL;
2412 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2413 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2414 fib_select_multipath(&fl, &res);
2417 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2418 fib_select_default(net, &fl, &res);
2421 fl.fl4_src = FIB_RES_PREFSRC(res);
2425 dev_out = FIB_RES_DEV(res);
2427 fl.oif = dev_out->ifindex;
2431 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2441 int __ip_route_output_key(struct net *net, struct rtable **rp,
2442 const struct flowi *flp)
2447 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
2450 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2451 rth = rcu_dereference(rth->u.dst.rt_next)) {
2452 if (rth->fl.fl4_dst == flp->fl4_dst &&
2453 rth->fl.fl4_src == flp->fl4_src &&
2455 rth->fl.oif == flp->oif &&
2456 rth->fl.mark == flp->mark &&
2457 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2458 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2459 rth->u.dst.dev->nd_net == net &&
2460 rth->rt_genid == atomic_read(&rt_genid)) {
2461 dst_use(&rth->u.dst, jiffies);
2462 RT_CACHE_STAT_INC(out_hit);
2463 rcu_read_unlock_bh();
2467 RT_CACHE_STAT_INC(out_hlist_search);
2469 rcu_read_unlock_bh();
2471 return ip_route_output_slow(net, rp, flp);
2474 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2476 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2480 static struct dst_ops ipv4_dst_blackhole_ops = {
2482 .protocol = __constant_htons(ETH_P_IP),
2483 .destroy = ipv4_dst_destroy,
2484 .check = ipv4_dst_check,
2485 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2486 .entry_size = sizeof(struct rtable),
2487 .entries = ATOMIC_INIT(0),
2491 static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
2493 struct rtable *ort = *rp;
2494 struct rtable *rt = (struct rtable *)
2495 dst_alloc(&ipv4_dst_blackhole_ops);
2498 struct dst_entry *new = &rt->u.dst;
2500 atomic_set(&new->__refcnt, 1);
2502 new->input = dst_discard;
2503 new->output = dst_discard;
2504 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2506 new->dev = ort->u.dst.dev;
2512 rt->idev = ort->idev;
2514 in_dev_hold(rt->idev);
2515 rt->rt_genid = atomic_read(&rt_genid);
2516 rt->rt_flags = ort->rt_flags;
2517 rt->rt_type = ort->rt_type;
2518 rt->rt_dst = ort->rt_dst;
2519 rt->rt_src = ort->rt_src;
2520 rt->rt_iif = ort->rt_iif;
2521 rt->rt_gateway = ort->rt_gateway;
2522 rt->rt_spec_dst = ort->rt_spec_dst;
2523 rt->peer = ort->peer;
2525 atomic_inc(&rt->peer->refcnt);
2530 dst_release(&(*rp)->u.dst);
2532 return (rt ? 0 : -ENOMEM);
2535 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2536 struct sock *sk, int flags)
2540 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2545 flp->fl4_src = (*rp)->rt_src;
2547 flp->fl4_dst = (*rp)->rt_dst;
2548 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2549 flags ? XFRM_LOOKUP_WAIT : 0);
2550 if (err == -EREMOTE)
2551 err = ipv4_dst_blackhole(rp, flp, sk);
2559 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2561 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2563 return ip_route_output_flow(net, rp, flp, NULL, 0);
2566 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2567 int nowait, unsigned int flags)
2569 struct rtable *rt = (struct rtable*)skb->dst;
2571 struct nlmsghdr *nlh;
2573 u32 id = 0, ts = 0, tsage = 0, error;
2575 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2579 r = nlmsg_data(nlh);
2580 r->rtm_family = AF_INET;
2581 r->rtm_dst_len = 32;
2583 r->rtm_tos = rt->fl.fl4_tos;
2584 r->rtm_table = RT_TABLE_MAIN;
2585 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2586 r->rtm_type = rt->rt_type;
2587 r->rtm_scope = RT_SCOPE_UNIVERSE;
2588 r->rtm_protocol = RTPROT_UNSPEC;
2589 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2590 if (rt->rt_flags & RTCF_NOTIFY)
2591 r->rtm_flags |= RTM_F_NOTIFY;
2593 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2595 if (rt->fl.fl4_src) {
2596 r->rtm_src_len = 32;
2597 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2600 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2601 #ifdef CONFIG_NET_CLS_ROUTE
2602 if (rt->u.dst.tclassid)
2603 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2606 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2607 else if (rt->rt_src != rt->fl.fl4_src)
2608 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2610 if (rt->rt_dst != rt->rt_gateway)
2611 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2613 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2614 goto nla_put_failure;
2616 error = rt->u.dst.error;
2617 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2619 id = rt->peer->ip_id_count;
2620 if (rt->peer->tcp_ts_stamp) {
2621 ts = rt->peer->tcp_ts;
2622 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2627 #ifdef CONFIG_IP_MROUTE
2628 __be32 dst = rt->rt_dst;
2630 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2631 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
2632 int err = ipmr_get_route(skb, r, nowait);
2637 goto nla_put_failure;
2639 if (err == -EMSGSIZE)
2640 goto nla_put_failure;
2646 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2649 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2650 expires, error) < 0)
2651 goto nla_put_failure;
2653 return nlmsg_end(skb, nlh);
2656 nlmsg_cancel(skb, nlh);
2660 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2662 struct net *net = in_skb->sk->sk_net;
2664 struct nlattr *tb[RTA_MAX+1];
2665 struct rtable *rt = NULL;
2670 struct sk_buff *skb;
2672 if (net != &init_net)
2675 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2679 rtm = nlmsg_data(nlh);
2681 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2687 /* Reserve room for dummy headers, this skb can pass
2688 through good chunk of routing engine.
2690 skb_reset_mac_header(skb);
2691 skb_reset_network_header(skb);
2693 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2694 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2695 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2697 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2698 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2699 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2702 struct net_device *dev;
2704 dev = __dev_get_by_index(&init_net, iif);
2710 skb->protocol = htons(ETH_P_IP);
2713 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2716 rt = (struct rtable*) skb->dst;
2717 if (err == 0 && rt->u.dst.error)
2718 err = -rt->u.dst.error;
2725 .tos = rtm->rtm_tos,
2728 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2730 err = ip_route_output_key(&init_net, &rt, &fl);
2736 skb->dst = &rt->u.dst;
2737 if (rtm->rtm_flags & RTM_F_NOTIFY)
2738 rt->rt_flags |= RTCF_NOTIFY;
2740 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2741 RTM_NEWROUTE, 0, 0);
2745 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
2754 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2763 s_idx = idx = cb->args[1];
2764 for (h = s_h; h <= rt_hash_mask; h++) {
2766 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2767 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2770 if (rt->rt_genid != atomic_read(&rt_genid))
2772 skb->dst = dst_clone(&rt->u.dst);
2773 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2774 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2775 1, NLM_F_MULTI) <= 0) {
2776 dst_release(xchg(&skb->dst, NULL));
2777 rcu_read_unlock_bh();
2780 dst_release(xchg(&skb->dst, NULL));
2782 rcu_read_unlock_bh();
2792 void ip_rt_multicast_event(struct in_device *in_dev)
2797 #ifdef CONFIG_SYSCTL
2798 static int flush_delay;
2800 static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2801 struct file *filp, void __user *buffer,
2802 size_t *lenp, loff_t *ppos)
2805 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2806 rt_cache_flush(flush_delay);
2813 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2816 void __user *oldval,
2817 size_t __user *oldlenp,
2818 void __user *newval,
2822 if (newlen != sizeof(int))
2824 if (get_user(delay, (int __user *)newval))
2826 rt_cache_flush(delay);
2830 ctl_table ipv4_route_table[] = {
2832 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2833 .procname = "flush",
2834 .data = &flush_delay,
2835 .maxlen = sizeof(int),
2837 .proc_handler = &ipv4_sysctl_rtcache_flush,
2838 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2841 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2842 .procname = "gc_thresh",
2843 .data = &ipv4_dst_ops.gc_thresh,
2844 .maxlen = sizeof(int),
2846 .proc_handler = &proc_dointvec,
2849 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
2850 .procname = "max_size",
2851 .data = &ip_rt_max_size,
2852 .maxlen = sizeof(int),
2854 .proc_handler = &proc_dointvec,
2857 /* Deprecated. Use gc_min_interval_ms */
2859 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2860 .procname = "gc_min_interval",
2861 .data = &ip_rt_gc_min_interval,
2862 .maxlen = sizeof(int),
2864 .proc_handler = &proc_dointvec_jiffies,
2865 .strategy = &sysctl_jiffies,
2868 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
2869 .procname = "gc_min_interval_ms",
2870 .data = &ip_rt_gc_min_interval,
2871 .maxlen = sizeof(int),
2873 .proc_handler = &proc_dointvec_ms_jiffies,
2874 .strategy = &sysctl_ms_jiffies,
2877 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
2878 .procname = "gc_timeout",
2879 .data = &ip_rt_gc_timeout,
2880 .maxlen = sizeof(int),
2882 .proc_handler = &proc_dointvec_jiffies,
2883 .strategy = &sysctl_jiffies,
2886 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
2887 .procname = "gc_interval",
2888 .data = &ip_rt_gc_interval,
2889 .maxlen = sizeof(int),
2891 .proc_handler = &proc_dointvec_jiffies,
2892 .strategy = &sysctl_jiffies,
2895 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
2896 .procname = "redirect_load",
2897 .data = &ip_rt_redirect_load,
2898 .maxlen = sizeof(int),
2900 .proc_handler = &proc_dointvec,
2903 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
2904 .procname = "redirect_number",
2905 .data = &ip_rt_redirect_number,
2906 .maxlen = sizeof(int),
2908 .proc_handler = &proc_dointvec,
2911 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
2912 .procname = "redirect_silence",
2913 .data = &ip_rt_redirect_silence,
2914 .maxlen = sizeof(int),
2916 .proc_handler = &proc_dointvec,
2919 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
2920 .procname = "error_cost",
2921 .data = &ip_rt_error_cost,
2922 .maxlen = sizeof(int),
2924 .proc_handler = &proc_dointvec,
2927 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
2928 .procname = "error_burst",
2929 .data = &ip_rt_error_burst,
2930 .maxlen = sizeof(int),
2932 .proc_handler = &proc_dointvec,
2935 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
2936 .procname = "gc_elasticity",
2937 .data = &ip_rt_gc_elasticity,
2938 .maxlen = sizeof(int),
2940 .proc_handler = &proc_dointvec,
2943 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
2944 .procname = "mtu_expires",
2945 .data = &ip_rt_mtu_expires,
2946 .maxlen = sizeof(int),
2948 .proc_handler = &proc_dointvec_jiffies,
2949 .strategy = &sysctl_jiffies,
2952 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
2953 .procname = "min_pmtu",
2954 .data = &ip_rt_min_pmtu,
2955 .maxlen = sizeof(int),
2957 .proc_handler = &proc_dointvec,
2960 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
2961 .procname = "min_adv_mss",
2962 .data = &ip_rt_min_advmss,
2963 .maxlen = sizeof(int),
2965 .proc_handler = &proc_dointvec,
2968 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
2969 .procname = "secret_interval",
2970 .data = &ip_rt_secret_interval,
2971 .maxlen = sizeof(int),
2973 .proc_handler = &proc_dointvec_jiffies,
2974 .strategy = &sysctl_jiffies,
2980 #ifdef CONFIG_NET_CLS_ROUTE
2981 struct ip_rt_acct *ip_rt_acct __read_mostly;
2982 #endif /* CONFIG_NET_CLS_ROUTE */
2984 static __initdata unsigned long rhash_entries;
2985 static int __init set_rhash_entries(char *str)
2989 rhash_entries = simple_strtoul(str, &str, 0);
2992 __setup("rhash_entries=", set_rhash_entries);
2994 int __init ip_rt_init(void)
2998 atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^
2999 (jiffies ^ (jiffies >> 7))));
3001 #ifdef CONFIG_NET_CLS_ROUTE
3002 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3004 panic("IP: failed to allocate ip_rt_acct\n");
3007 ipv4_dst_ops.kmem_cachep =
3008 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3009 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3011 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3013 rt_hash_table = (struct rt_hash_bucket *)
3014 alloc_large_system_hash("IP route cache",
3015 sizeof(struct rt_hash_bucket),
3017 (num_physpages >= 128 * 1024) ?
3023 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3024 rt_hash_lock_init();
3026 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3027 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3032 setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
3034 /* All the timers, started at system startup tend
3035 to synchronize. Perturb it a bit.
3037 schedule_delayed_work(&expires_work,
3038 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3040 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
3041 ip_rt_secret_interval;
3042 add_timer(&rt_secret_timer);
3044 if (ip_rt_proc_init(&init_net))
3045 printk(KERN_ERR "Unable to create route proc files\n");
3050 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3055 EXPORT_SYMBOL(__ip_select_ident);
3056 EXPORT_SYMBOL(ip_route_input);
3057 EXPORT_SYMBOL(ip_route_output_key);