Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
[linux-2.6] / lib / percpu_counter.c
1 /*
2  * Fast batching percpu counters.
3  */
4
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11
12 #ifdef CONFIG_HOTPLUG_CPU
13 static LIST_HEAD(percpu_counters);
14 static DEFINE_MUTEX(percpu_counters_lock);
15 #endif
16
17 void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
18 {
19         long count;
20         s32 *pcount;
21         int cpu = get_cpu();
22
23         pcount = per_cpu_ptr(fbc->counters, cpu);
24         count = *pcount + amount;
25         if (count >= FBC_BATCH || count <= -FBC_BATCH) {
26                 spin_lock(&fbc->lock);
27                 fbc->count += count;
28                 *pcount = 0;
29                 spin_unlock(&fbc->lock);
30         } else {
31                 *pcount = count;
32         }
33         put_cpu();
34 }
35 EXPORT_SYMBOL(percpu_counter_mod);
36
37 /*
38  * Add up all the per-cpu counts, return the result.  This is a more accurate
39  * but much slower version of percpu_counter_read_positive()
40  */
41 s64 percpu_counter_sum(struct percpu_counter *fbc)
42 {
43         s64 ret;
44         int cpu;
45
46         spin_lock(&fbc->lock);
47         ret = fbc->count;
48         for_each_online_cpu(cpu) {
49                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
50                 ret += *pcount;
51         }
52         spin_unlock(&fbc->lock);
53         return ret < 0 ? 0 : ret;
54 }
55 EXPORT_SYMBOL(percpu_counter_sum);
56
57 void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
58 {
59         spin_lock_init(&fbc->lock);
60         fbc->count = amount;
61         fbc->counters = alloc_percpu(s32);
62 #ifdef CONFIG_HOTPLUG_CPU
63         mutex_lock(&percpu_counters_lock);
64         list_add(&fbc->list, &percpu_counters);
65         mutex_unlock(&percpu_counters_lock);
66 #endif
67 }
68 EXPORT_SYMBOL(percpu_counter_init);
69
70 void percpu_counter_destroy(struct percpu_counter *fbc)
71 {
72         free_percpu(fbc->counters);
73 #ifdef CONFIG_HOTPLUG_CPU
74         mutex_lock(&percpu_counters_lock);
75         list_del(&fbc->list);
76         mutex_unlock(&percpu_counters_lock);
77 #endif
78 }
79 EXPORT_SYMBOL(percpu_counter_destroy);
80
81 #ifdef CONFIG_HOTPLUG_CPU
82 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
83                                         unsigned long action, void *hcpu)
84 {
85         unsigned int cpu;
86         struct percpu_counter *fbc;
87
88         if (action != CPU_DEAD)
89                 return NOTIFY_OK;
90
91         cpu = (unsigned long)hcpu;
92         mutex_lock(&percpu_counters_lock);
93         list_for_each_entry(fbc, &percpu_counters, list) {
94                 s32 *pcount;
95
96                 spin_lock(&fbc->lock);
97                 pcount = per_cpu_ptr(fbc->counters, cpu);
98                 fbc->count += *pcount;
99                 *pcount = 0;
100                 spin_unlock(&fbc->lock);
101         }
102         mutex_unlock(&percpu_counters_lock);
103         return NOTIFY_OK;
104 }
105
106 static int __init percpu_counter_startup(void)
107 {
108         hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
109         return 0;
110 }
111 module_init(percpu_counter_startup);
112 #endif