1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry {
29 struct flow_cache_entry *next;
38 atomic_t flow_cache_genid = ATOMIC_INIT(0);
40 static u32 flow_hash_shift;
41 #define flow_hash_size (1 << flow_hash_shift)
42 static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
44 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
46 static struct kmem_cache *flow_cachep __read_mostly;
48 static int flow_lwm, flow_hwm;
50 struct flow_percpu_info {
55 static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
57 #define flow_hash_rnd_recalc(cpu) \
58 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
59 #define flow_hash_rnd(cpu) \
60 (per_cpu(flow_hash_info, cpu).hash_rnd)
61 #define flow_count(cpu) \
62 (per_cpu(flow_hash_info, cpu).count)
64 static struct timer_list flow_hash_rnd_timer;
66 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
68 struct flow_flush_info {
70 struct completion completion;
72 static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
74 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
76 static void flow_cache_new_hashrnd(unsigned long arg)
80 for_each_possible_cpu(i)
81 flow_hash_rnd_recalc(i) = 1;
83 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&flow_hash_rnd_timer);
87 static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
90 atomic_dec(fle->object_ref);
91 kmem_cache_free(flow_cachep, fle);
95 static void __flow_cache_shrink(int cpu, int shrink_to)
97 struct flow_cache_entry *fle, **flp;
100 for (i = 0; i < flow_hash_size; i++) {
103 flp = &flow_table(cpu)[i];
104 while ((fle = *flp) != NULL && k < shrink_to) {
108 while ((fle = *flp) != NULL) {
110 flow_entry_kill(cpu, fle);
115 static void flow_cache_shrink(int cpu)
117 int shrink_to = flow_lwm / flow_hash_size;
119 __flow_cache_shrink(cpu, shrink_to);
122 static void flow_new_hash_rnd(int cpu)
124 get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
125 flow_hash_rnd_recalc(cpu) = 0;
127 __flow_cache_shrink(cpu, 0);
130 static u32 flow_hash_code(struct flowi *key, int cpu)
132 u32 *k = (u32 *) key;
134 return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
135 (flow_hash_size - 1));
138 #if (BITS_PER_LONG == 64)
139 typedef u64 flow_compare_t;
141 typedef u32 flow_compare_t;
144 /* I hear what you're saying, use memcmp. But memcmp cannot make
145 * important assumptions that we can here, such as alignment and
148 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
150 flow_compare_t *k1, *k1_lim, *k2;
151 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
153 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
155 k1 = (flow_compare_t *) key1;
156 k1_lim = k1 + n_elem;
158 k2 = (flow_compare_t *) key2;
163 } while (k1 < k1_lim);
168 void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
169 flow_resolve_t resolver)
171 struct flow_cache_entry *fle, **head;
176 cpu = smp_processor_id();
179 /* Packet really early in init? Making flow_cache_init a
180 * pre-smp initcall would solve this. --RR */
181 if (!flow_table(cpu))
184 if (flow_hash_rnd_recalc(cpu))
185 flow_new_hash_rnd(cpu);
186 hash = flow_hash_code(key, cpu);
188 head = &flow_table(cpu)[hash];
189 for (fle = *head; fle; fle = fle->next) {
190 if (fle->family == family &&
192 flow_key_compare(key, &fle->key) == 0) {
193 if (fle->genid == atomic_read(&flow_cache_genid)) {
194 void *ret = fle->object;
197 atomic_inc(fle->object_ref);
207 if (flow_count(cpu) > flow_hwm)
208 flow_cache_shrink(cpu);
210 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
214 fle->family = family;
216 memcpy(&fle->key, key, sizeof(*key));
228 err = resolver(net, key, family, dir, &obj, &obj_ref);
231 fle->genid = atomic_read(&flow_cache_genid);
234 atomic_dec(fle->object_ref);
237 fle->object_ref = obj_ref;
239 atomic_inc(fle->object_ref);
249 static void flow_cache_flush_tasklet(unsigned long data)
251 struct flow_flush_info *info = (void *)data;
255 cpu = smp_processor_id();
256 for (i = 0; i < flow_hash_size; i++) {
257 struct flow_cache_entry *fle;
259 fle = flow_table(cpu)[i];
260 for (; fle; fle = fle->next) {
261 unsigned genid = atomic_read(&flow_cache_genid);
263 if (!fle->object || fle->genid == genid)
267 atomic_dec(fle->object_ref);
271 if (atomic_dec_and_test(&info->cpuleft))
272 complete(&info->completion);
275 static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
276 static void flow_cache_flush_per_cpu(void *data)
278 struct flow_flush_info *info = data;
280 struct tasklet_struct *tasklet;
282 cpu = smp_processor_id();
284 tasklet = flow_flush_tasklet(cpu);
285 tasklet->data = (unsigned long)info;
286 tasklet_schedule(tasklet);
289 void flow_cache_flush(void)
291 struct flow_flush_info info;
292 static DEFINE_MUTEX(flow_flush_sem);
294 /* Don't want cpus going down or up during this. */
296 mutex_lock(&flow_flush_sem);
297 atomic_set(&info.cpuleft, num_online_cpus());
298 init_completion(&info.completion);
301 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
302 flow_cache_flush_tasklet((unsigned long)&info);
305 wait_for_completion(&info.completion);
306 mutex_unlock(&flow_flush_sem);
310 static void __init flow_cache_cpu_prepare(int cpu)
312 struct tasklet_struct *tasklet;
316 (PAGE_SIZE << order) <
317 (sizeof(struct flow_cache_entry *)*flow_hash_size);
321 flow_table(cpu) = (struct flow_cache_entry **)
322 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
323 if (!flow_table(cpu))
324 panic("NET: failed to allocate flow cache order %lu\n", order);
326 flow_hash_rnd_recalc(cpu) = 1;
329 tasklet = flow_flush_tasklet(cpu);
330 tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
333 static int flow_cache_cpu(struct notifier_block *nfb,
334 unsigned long action,
337 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
338 __flow_cache_shrink((unsigned long)hcpu, 0);
342 static int __init flow_cache_init(void)
346 flow_cachep = kmem_cache_create("flow_cache",
347 sizeof(struct flow_cache_entry),
350 flow_hash_shift = 10;
351 flow_lwm = 2 * flow_hash_size;
352 flow_hwm = 4 * flow_hash_size;
354 setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
355 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
356 add_timer(&flow_hash_rnd_timer);
358 for_each_possible_cpu(i)
359 flow_cache_cpu_prepare(i);
361 hotcpu_notifier(flow_cache_cpu, 0);
365 module_init(flow_cache_init);
367 EXPORT_SYMBOL(flow_cache_genid);
368 EXPORT_SYMBOL(flow_cache_lookup);