Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17
18 #ifdef CONFIG_VM_EVENT_COUNTERS
19 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
20 EXPORT_PER_CPU_SYMBOL(vm_event_states);
21
22 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
23 {
24         int cpu;
25         int i;
26
27         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28
29         for_each_cpu_mask(cpu, *cpumask) {
30                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
31
32                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
33                         ret[i] += this->event[i];
34         }
35 }
36
37 /*
38  * Accumulate the vm event counters across all CPUs.
39  * The result is unavoidably approximate - it can change
40  * during and after execution of this function.
41 */
42 void all_vm_events(unsigned long *ret)
43 {
44         sum_vm_events(ret, &cpu_online_map);
45 }
46 EXPORT_SYMBOL_GPL(all_vm_events);
47
48 #ifdef CONFIG_HOTPLUG
49 /*
50  * Fold the foreign cpu events into our own.
51  *
52  * This is adding to the events on one processor
53  * but keeps the global counts constant.
54  */
55 void vm_events_fold_cpu(int cpu)
56 {
57         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
58         int i;
59
60         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
61                 count_vm_events(i, fold_state->event[i]);
62                 fold_state->event[i] = 0;
63         }
64 }
65 #endif /* CONFIG_HOTPLUG */
66
67 #endif /* CONFIG_VM_EVENT_COUNTERS */
68
69 /*
70  * Manage combined zone based / global counters
71  *
72  * vm_stat contains the global counters
73  */
74 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75 EXPORT_SYMBOL(vm_stat);
76
77 #ifdef CONFIG_SMP
78
79 static int calculate_threshold(struct zone *zone)
80 {
81         int threshold;
82         int mem;        /* memory in 128 MB units */
83
84         /*
85          * The threshold scales with the number of processors and the amount
86          * of memory per zone. More memory means that we can defer updates for
87          * longer, more processors could lead to more contention.
88          * fls() is used to have a cheap way of logarithmic scaling.
89          *
90          * Some sample thresholds:
91          *
92          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
93          * ------------------------------------------------------------------
94          * 8            1               1       0.9-1 GB        4
95          * 16           2               2       0.9-1 GB        4
96          * 20           2               2       1-2 GB          5
97          * 24           2               2       2-4 GB          6
98          * 28           2               2       4-8 GB          7
99          * 32           2               2       8-16 GB         8
100          * 4            2               2       <128M           1
101          * 30           4               3       2-4 GB          5
102          * 48           4               3       8-16 GB         8
103          * 32           8               4       1-2 GB          4
104          * 32           8               4       0.9-1GB         4
105          * 10           16              5       <128M           1
106          * 40           16              5       900M            4
107          * 70           64              7       2-4 GB          5
108          * 84           64              7       4-8 GB          6
109          * 108          512             9       4-8 GB          6
110          * 125          1024            10      8-16 GB         8
111          * 125          1024            10      16-32 GB        9
112          */
113
114         mem = zone->present_pages >> (27 - PAGE_SHIFT);
115
116         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
117
118         /*
119          * Maximum threshold is 125
120          */
121         threshold = min(125, threshold);
122
123         return threshold;
124 }
125
126 /*
127  * Refresh the thresholds for each zone.
128  */
129 static void refresh_zone_stat_thresholds(void)
130 {
131         struct zone *zone;
132         int cpu;
133         int threshold;
134
135         for_each_zone(zone) {
136
137                 if (!zone->present_pages)
138                         continue;
139
140                 threshold = calculate_threshold(zone);
141
142                 for_each_online_cpu(cpu)
143                         zone_pcp(zone, cpu)->stat_threshold = threshold;
144         }
145 }
146
147 /*
148  * For use when we know that interrupts are disabled.
149  */
150 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
151                                 int delta)
152 {
153         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
154         s8 *p = pcp->vm_stat_diff + item;
155         long x;
156
157         x = delta + *p;
158
159         if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
160                 zone_page_state_add(x, zone, item);
161                 x = 0;
162         }
163         *p = x;
164 }
165 EXPORT_SYMBOL(__mod_zone_page_state);
166
167 /*
168  * For an unknown interrupt state
169  */
170 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
171                                         int delta)
172 {
173         unsigned long flags;
174
175         local_irq_save(flags);
176         __mod_zone_page_state(zone, item, delta);
177         local_irq_restore(flags);
178 }
179 EXPORT_SYMBOL(mod_zone_page_state);
180
181 /*
182  * Optimized increment and decrement functions.
183  *
184  * These are only for a single page and therefore can take a struct page *
185  * argument instead of struct zone *. This allows the inclusion of the code
186  * generated for page_zone(page) into the optimized functions.
187  *
188  * No overflow check is necessary and therefore the differential can be
189  * incremented or decremented in place which may allow the compilers to
190  * generate better code.
191  * The increment or decrement is known and therefore one boundary check can
192  * be omitted.
193  *
194  * NOTE: These functions are very performance sensitive. Change only
195  * with care.
196  *
197  * Some processors have inc/dec instructions that are atomic vs an interrupt.
198  * However, the code must first determine the differential location in a zone
199  * based on the processor number and then inc/dec the counter. There is no
200  * guarantee without disabling preemption that the processor will not change
201  * in between and therefore the atomicity vs. interrupt cannot be exploited
202  * in a useful way here.
203  */
204 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
205 {
206         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
207         s8 *p = pcp->vm_stat_diff + item;
208
209         (*p)++;
210
211         if (unlikely(*p > pcp->stat_threshold)) {
212                 int overstep = pcp->stat_threshold / 2;
213
214                 zone_page_state_add(*p + overstep, zone, item);
215                 *p = -overstep;
216         }
217 }
218
219 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
220 {
221         __inc_zone_state(page_zone(page), item);
222 }
223 EXPORT_SYMBOL(__inc_zone_page_state);
224
225 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
226 {
227         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
228         s8 *p = pcp->vm_stat_diff + item;
229
230         (*p)--;
231
232         if (unlikely(*p < - pcp->stat_threshold)) {
233                 int overstep = pcp->stat_threshold / 2;
234
235                 zone_page_state_add(*p - overstep, zone, item);
236                 *p = overstep;
237         }
238 }
239
240 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
241 {
242         __dec_zone_state(page_zone(page), item);
243 }
244 EXPORT_SYMBOL(__dec_zone_page_state);
245
246 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
247 {
248         unsigned long flags;
249
250         local_irq_save(flags);
251         __inc_zone_state(zone, item);
252         local_irq_restore(flags);
253 }
254
255 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
256 {
257         unsigned long flags;
258         struct zone *zone;
259
260         zone = page_zone(page);
261         local_irq_save(flags);
262         __inc_zone_state(zone, item);
263         local_irq_restore(flags);
264 }
265 EXPORT_SYMBOL(inc_zone_page_state);
266
267 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
268 {
269         unsigned long flags;
270
271         local_irq_save(flags);
272         __dec_zone_page_state(page, item);
273         local_irq_restore(flags);
274 }
275 EXPORT_SYMBOL(dec_zone_page_state);
276
277 /*
278  * Update the zone counters for one cpu.
279  *
280  * The cpu specified must be either the current cpu or a processor that
281  * is not online. If it is the current cpu then the execution thread must
282  * be pinned to the current cpu.
283  *
284  * Note that refresh_cpu_vm_stats strives to only access
285  * node local memory. The per cpu pagesets on remote zones are placed
286  * in the memory local to the processor using that pageset. So the
287  * loop over all zones will access a series of cachelines local to
288  * the processor.
289  *
290  * The call to zone_page_state_add updates the cachelines with the
291  * statistics in the remote zone struct as well as the global cachelines
292  * with the global counters. These could cause remote node cache line
293  * bouncing and will have to be only done when necessary.
294  */
295 void refresh_cpu_vm_stats(int cpu)
296 {
297         struct zone *zone;
298         int i;
299         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
300
301         for_each_zone(zone) {
302                 struct per_cpu_pageset *p;
303
304                 if (!populated_zone(zone))
305                         continue;
306
307                 p = zone_pcp(zone, cpu);
308
309                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
310                         if (p->vm_stat_diff[i]) {
311                                 unsigned long flags;
312                                 int v;
313
314                                 local_irq_save(flags);
315                                 v = p->vm_stat_diff[i];
316                                 p->vm_stat_diff[i] = 0;
317                                 local_irq_restore(flags);
318                                 atomic_long_add(v, &zone->vm_stat[i]);
319                                 global_diff[i] += v;
320 #ifdef CONFIG_NUMA
321                                 /* 3 seconds idle till flush */
322                                 p->expire = 3;
323 #endif
324                         }
325 #ifdef CONFIG_NUMA
326                 /*
327                  * Deal with draining the remote pageset of this
328                  * processor
329                  *
330                  * Check if there are pages remaining in this pageset
331                  * if not then there is nothing to expire.
332                  */
333                 if (!p->expire || !p->pcp.count)
334                         continue;
335
336                 /*
337                  * We never drain zones local to this processor.
338                  */
339                 if (zone_to_nid(zone) == numa_node_id()) {
340                         p->expire = 0;
341                         continue;
342                 }
343
344                 p->expire--;
345                 if (p->expire)
346                         continue;
347
348                 if (p->pcp.count)
349                         drain_zone_pages(zone, &p->pcp);
350 #endif
351         }
352
353         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
354                 if (global_diff[i])
355                         atomic_long_add(global_diff[i], &vm_stat[i]);
356 }
357
358 #endif
359
360 #ifdef CONFIG_NUMA
361 /*
362  * zonelist = the list of zones passed to the allocator
363  * z        = the zone from which the allocation occurred.
364  *
365  * Must be called with interrupts disabled.
366  */
367 void zone_statistics(struct zonelist *zonelist, struct zone *z)
368 {
369         if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
370                 __inc_zone_state(z, NUMA_HIT);
371         } else {
372                 __inc_zone_state(z, NUMA_MISS);
373                 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
374         }
375         if (z->node == numa_node_id())
376                 __inc_zone_state(z, NUMA_LOCAL);
377         else
378                 __inc_zone_state(z, NUMA_OTHER);
379 }
380 #endif
381
382 #ifdef CONFIG_PROC_FS
383
384 #include <linux/seq_file.h>
385
386 static char * const migratetype_names[MIGRATE_TYPES] = {
387         "Unmovable",
388         "Reclaimable",
389         "Movable",
390         "Reserve",
391 };
392
393 static void *frag_start(struct seq_file *m, loff_t *pos)
394 {
395         pg_data_t *pgdat;
396         loff_t node = *pos;
397         for (pgdat = first_online_pgdat();
398              pgdat && node;
399              pgdat = next_online_pgdat(pgdat))
400                 --node;
401
402         return pgdat;
403 }
404
405 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
406 {
407         pg_data_t *pgdat = (pg_data_t *)arg;
408
409         (*pos)++;
410         return next_online_pgdat(pgdat);
411 }
412
413 static void frag_stop(struct seq_file *m, void *arg)
414 {
415 }
416
417 /* Walk all the zones in a node and print using a callback */
418 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
419                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
420 {
421         struct zone *zone;
422         struct zone *node_zones = pgdat->node_zones;
423         unsigned long flags;
424
425         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
426                 if (!populated_zone(zone))
427                         continue;
428
429                 spin_lock_irqsave(&zone->lock, flags);
430                 print(m, pgdat, zone);
431                 spin_unlock_irqrestore(&zone->lock, flags);
432         }
433 }
434
435 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
436                                                 struct zone *zone)
437 {
438         int order;
439
440         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
441         for (order = 0; order < MAX_ORDER; ++order)
442                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
443         seq_putc(m, '\n');
444 }
445
446 /*
447  * This walks the free areas for each zone.
448  */
449 static int frag_show(struct seq_file *m, void *arg)
450 {
451         pg_data_t *pgdat = (pg_data_t *)arg;
452         walk_zones_in_node(m, pgdat, frag_show_print);
453         return 0;
454 }
455
456 static void pagetypeinfo_showfree_print(struct seq_file *m,
457                                         pg_data_t *pgdat, struct zone *zone)
458 {
459         int order, mtype;
460
461         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
462                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
463                                         pgdat->node_id,
464                                         zone->name,
465                                         migratetype_names[mtype]);
466                 for (order = 0; order < MAX_ORDER; ++order) {
467                         unsigned long freecount = 0;
468                         struct free_area *area;
469                         struct list_head *curr;
470
471                         area = &(zone->free_area[order]);
472
473                         list_for_each(curr, &area->free_list[mtype])
474                                 freecount++;
475                         seq_printf(m, "%6lu ", freecount);
476                 }
477                 seq_putc(m, '\n');
478         }
479 }
480
481 /* Print out the free pages at each order for each migatetype */
482 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
483 {
484         int order;
485         pg_data_t *pgdat = (pg_data_t *)arg;
486
487         /* Print header */
488         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
489         for (order = 0; order < MAX_ORDER; ++order)
490                 seq_printf(m, "%6d ", order);
491         seq_putc(m, '\n');
492
493         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
494
495         return 0;
496 }
497
498 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
499                                         pg_data_t *pgdat, struct zone *zone)
500 {
501         int mtype;
502         unsigned long pfn;
503         unsigned long start_pfn = zone->zone_start_pfn;
504         unsigned long end_pfn = start_pfn + zone->spanned_pages;
505         unsigned long count[MIGRATE_TYPES] = { 0, };
506
507         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
508                 struct page *page;
509
510                 if (!pfn_valid(pfn))
511                         continue;
512
513                 page = pfn_to_page(pfn);
514                 mtype = get_pageblock_migratetype(page);
515
516                 count[mtype]++;
517         }
518
519         /* Print counts */
520         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
521         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
522                 seq_printf(m, "%12lu ", count[mtype]);
523         seq_putc(m, '\n');
524 }
525
526 /* Print out the free pages at each order for each migratetype */
527 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
528 {
529         int mtype;
530         pg_data_t *pgdat = (pg_data_t *)arg;
531
532         seq_printf(m, "\n%-23s", "Number of blocks type ");
533         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
534                 seq_printf(m, "%12s ", migratetype_names[mtype]);
535         seq_putc(m, '\n');
536         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
537
538         return 0;
539 }
540
541 /*
542  * This prints out statistics in relation to grouping pages by mobility.
543  * It is expensive to collect so do not constantly read the file.
544  */
545 static int pagetypeinfo_show(struct seq_file *m, void *arg)
546 {
547         pg_data_t *pgdat = (pg_data_t *)arg;
548
549         seq_printf(m, "Page block order: %d\n", pageblock_order);
550         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
551         seq_putc(m, '\n');
552         pagetypeinfo_showfree(m, pgdat);
553         pagetypeinfo_showblockcount(m, pgdat);
554
555         return 0;
556 }
557
558 const struct seq_operations fragmentation_op = {
559         .start  = frag_start,
560         .next   = frag_next,
561         .stop   = frag_stop,
562         .show   = frag_show,
563 };
564
565 const struct seq_operations pagetypeinfo_op = {
566         .start  = frag_start,
567         .next   = frag_next,
568         .stop   = frag_stop,
569         .show   = pagetypeinfo_show,
570 };
571
572 #ifdef CONFIG_ZONE_DMA
573 #define TEXT_FOR_DMA(xx) xx "_dma",
574 #else
575 #define TEXT_FOR_DMA(xx)
576 #endif
577
578 #ifdef CONFIG_ZONE_DMA32
579 #define TEXT_FOR_DMA32(xx) xx "_dma32",
580 #else
581 #define TEXT_FOR_DMA32(xx)
582 #endif
583
584 #ifdef CONFIG_HIGHMEM
585 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
586 #else
587 #define TEXT_FOR_HIGHMEM(xx)
588 #endif
589
590 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
591                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
592
593 static const char * const vmstat_text[] = {
594         /* Zoned VM counters */
595         "nr_free_pages",
596         "nr_inactive",
597         "nr_active",
598         "nr_anon_pages",
599         "nr_mapped",
600         "nr_file_pages",
601         "nr_dirty",
602         "nr_writeback",
603         "nr_slab_reclaimable",
604         "nr_slab_unreclaimable",
605         "nr_page_table_pages",
606         "nr_unstable",
607         "nr_bounce",
608         "nr_vmscan_write",
609
610 #ifdef CONFIG_NUMA
611         "numa_hit",
612         "numa_miss",
613         "numa_foreign",
614         "numa_interleave",
615         "numa_local",
616         "numa_other",
617 #endif
618
619 #ifdef CONFIG_VM_EVENT_COUNTERS
620         "pgpgin",
621         "pgpgout",
622         "pswpin",
623         "pswpout",
624
625         TEXTS_FOR_ZONES("pgalloc")
626
627         "pgfree",
628         "pgactivate",
629         "pgdeactivate",
630
631         "pgfault",
632         "pgmajfault",
633
634         TEXTS_FOR_ZONES("pgrefill")
635         TEXTS_FOR_ZONES("pgsteal")
636         TEXTS_FOR_ZONES("pgscan_kswapd")
637         TEXTS_FOR_ZONES("pgscan_direct")
638
639         "pginodesteal",
640         "slabs_scanned",
641         "kswapd_steal",
642         "kswapd_inodesteal",
643         "pageoutrun",
644         "allocstall",
645
646         "pgrotated",
647 #endif
648 };
649
650 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
651                                                         struct zone *zone)
652 {
653         int i;
654         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
655         seq_printf(m,
656                    "\n  pages free     %lu"
657                    "\n        min      %lu"
658                    "\n        low      %lu"
659                    "\n        high     %lu"
660                    "\n        scanned  %lu (a: %lu i: %lu)"
661                    "\n        spanned  %lu"
662                    "\n        present  %lu",
663                    zone_page_state(zone, NR_FREE_PAGES),
664                    zone->pages_min,
665                    zone->pages_low,
666                    zone->pages_high,
667                    zone->pages_scanned,
668                    zone->nr_scan_active, zone->nr_scan_inactive,
669                    zone->spanned_pages,
670                    zone->present_pages);
671
672         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
673                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
674                                 zone_page_state(zone, i));
675
676         seq_printf(m,
677                    "\n        protection: (%lu",
678                    zone->lowmem_reserve[0]);
679         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
680                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
681         seq_printf(m,
682                    ")"
683                    "\n  pagesets");
684         for_each_online_cpu(i) {
685                 struct per_cpu_pageset *pageset;
686
687                 pageset = zone_pcp(zone, i);
688                 seq_printf(m,
689                            "\n    cpu: %i"
690                            "\n              count: %i"
691                            "\n              high:  %i"
692                            "\n              batch: %i",
693                            i,
694                            pageset->pcp.count,
695                            pageset->pcp.high,
696                            pageset->pcp.batch);
697 #ifdef CONFIG_SMP
698                 seq_printf(m, "\n  vm stats threshold: %d",
699                                 pageset->stat_threshold);
700 #endif
701         }
702         seq_printf(m,
703                    "\n  all_unreclaimable: %u"
704                    "\n  prev_priority:     %i"
705                    "\n  start_pfn:         %lu",
706                            zone_is_all_unreclaimable(zone),
707                    zone->prev_priority,
708                    zone->zone_start_pfn);
709         seq_putc(m, '\n');
710 }
711
712 /*
713  * Output information about zones in @pgdat.
714  */
715 static int zoneinfo_show(struct seq_file *m, void *arg)
716 {
717         pg_data_t *pgdat = (pg_data_t *)arg;
718         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
719         return 0;
720 }
721
722 const struct seq_operations zoneinfo_op = {
723         .start  = frag_start, /* iterate over all zones. The same as in
724                                * fragmentation. */
725         .next   = frag_next,
726         .stop   = frag_stop,
727         .show   = zoneinfo_show,
728 };
729
730 static void *vmstat_start(struct seq_file *m, loff_t *pos)
731 {
732         unsigned long *v;
733 #ifdef CONFIG_VM_EVENT_COUNTERS
734         unsigned long *e;
735 #endif
736         int i;
737
738         if (*pos >= ARRAY_SIZE(vmstat_text))
739                 return NULL;
740
741 #ifdef CONFIG_VM_EVENT_COUNTERS
742         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
743                         + sizeof(struct vm_event_state), GFP_KERNEL);
744 #else
745         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
746                         GFP_KERNEL);
747 #endif
748         m->private = v;
749         if (!v)
750                 return ERR_PTR(-ENOMEM);
751         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
752                 v[i] = global_page_state(i);
753 #ifdef CONFIG_VM_EVENT_COUNTERS
754         e = v + NR_VM_ZONE_STAT_ITEMS;
755         all_vm_events(e);
756         e[PGPGIN] /= 2;         /* sectors -> kbytes */
757         e[PGPGOUT] /= 2;
758 #endif
759         return v + *pos;
760 }
761
762 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
763 {
764         (*pos)++;
765         if (*pos >= ARRAY_SIZE(vmstat_text))
766                 return NULL;
767         return (unsigned long *)m->private + *pos;
768 }
769
770 static int vmstat_show(struct seq_file *m, void *arg)
771 {
772         unsigned long *l = arg;
773         unsigned long off = l - (unsigned long *)m->private;
774
775         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
776         return 0;
777 }
778
779 static void vmstat_stop(struct seq_file *m, void *arg)
780 {
781         kfree(m->private);
782         m->private = NULL;
783 }
784
785 const struct seq_operations vmstat_op = {
786         .start  = vmstat_start,
787         .next   = vmstat_next,
788         .stop   = vmstat_stop,
789         .show   = vmstat_show,
790 };
791
792 #endif /* CONFIG_PROC_FS */
793
794 #ifdef CONFIG_SMP
795 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
796 int sysctl_stat_interval __read_mostly = HZ;
797
798 static void vmstat_update(struct work_struct *w)
799 {
800         refresh_cpu_vm_stats(smp_processor_id());
801         schedule_delayed_work(&__get_cpu_var(vmstat_work),
802                 sysctl_stat_interval);
803 }
804
805 static void __cpuinit start_cpu_timer(int cpu)
806 {
807         struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
808
809         INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
810         schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
811 }
812
813 /*
814  * Use the cpu notifier to insure that the thresholds are recalculated
815  * when necessary.
816  */
817 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
818                 unsigned long action,
819                 void *hcpu)
820 {
821         long cpu = (long)hcpu;
822
823         switch (action) {
824         case CPU_ONLINE:
825         case CPU_ONLINE_FROZEN:
826                 start_cpu_timer(cpu);
827                 break;
828         case CPU_DOWN_PREPARE:
829         case CPU_DOWN_PREPARE_FROZEN:
830                 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
831                 per_cpu(vmstat_work, cpu).work.func = NULL;
832                 break;
833         case CPU_DOWN_FAILED:
834         case CPU_DOWN_FAILED_FROZEN:
835                 start_cpu_timer(cpu);
836                 break;
837         case CPU_DEAD:
838         case CPU_DEAD_FROZEN:
839                 refresh_zone_stat_thresholds();
840                 break;
841         default:
842                 break;
843         }
844         return NOTIFY_OK;
845 }
846
847 static struct notifier_block __cpuinitdata vmstat_notifier =
848         { &vmstat_cpuup_callback, NULL, 0 };
849
850 static int __init setup_vmstat(void)
851 {
852         int cpu;
853
854         refresh_zone_stat_thresholds();
855         register_cpu_notifier(&vmstat_notifier);
856
857         for_each_online_cpu(cpu)
858                 start_cpu_timer(cpu);
859         return 0;
860 }
861 module_init(setup_vmstat)
862 #endif