1 #include <linux/cpumask.h>
4 #include <linux/init.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/proc_fs.h>
8 #include <linux/sched.h>
9 #include <linux/seq_file.h>
10 #include <linux/slab.h>
11 #include <linux/time.h>
12 #include <linux/irqnr.h>
13 #include <asm/cputime.h>
15 #ifndef arch_irq_stat_cpu
16 #define arch_irq_stat_cpu(cpu) 0
19 #define arch_irq_stat() 0
21 #ifndef arch_idle_time
22 #define arch_idle_time(cpu) 0
25 static int show_stat(struct seq_file *p, void *v)
29 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
32 struct timespec boottime;
33 unsigned int per_irq_sum;
35 user = nice = system = idle = iowait =
36 irq = softirq = steal = cputime64_zero;
37 guest = cputime64_zero;
38 getboottime(&boottime);
39 jif = boottime.tv_sec;
41 for_each_possible_cpu(i) {
42 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
43 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
44 system = cputime64_add(system, kstat_cpu(i).cpustat.system);
45 idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
46 idle = cputime64_add(idle, arch_idle_time(i));
47 iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
48 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
49 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
50 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
51 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
53 sum += kstat_irqs_cpu(j, i);
55 sum += arch_irq_stat_cpu(i);
57 sum += arch_irq_stat();
59 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
60 (unsigned long long)cputime64_to_clock_t(user),
61 (unsigned long long)cputime64_to_clock_t(nice),
62 (unsigned long long)cputime64_to_clock_t(system),
63 (unsigned long long)cputime64_to_clock_t(idle),
64 (unsigned long long)cputime64_to_clock_t(iowait),
65 (unsigned long long)cputime64_to_clock_t(irq),
66 (unsigned long long)cputime64_to_clock_t(softirq),
67 (unsigned long long)cputime64_to_clock_t(steal),
68 (unsigned long long)cputime64_to_clock_t(guest));
69 for_each_online_cpu(i) {
71 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
72 user = kstat_cpu(i).cpustat.user;
73 nice = kstat_cpu(i).cpustat.nice;
74 system = kstat_cpu(i).cpustat.system;
75 idle = kstat_cpu(i).cpustat.idle;
76 idle = cputime64_add(idle, arch_idle_time(i));
77 iowait = kstat_cpu(i).cpustat.iowait;
78 irq = kstat_cpu(i).cpustat.irq;
79 softirq = kstat_cpu(i).cpustat.softirq;
80 steal = kstat_cpu(i).cpustat.steal;
81 guest = kstat_cpu(i).cpustat.guest;
83 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
85 (unsigned long long)cputime64_to_clock_t(user),
86 (unsigned long long)cputime64_to_clock_t(nice),
87 (unsigned long long)cputime64_to_clock_t(system),
88 (unsigned long long)cputime64_to_clock_t(idle),
89 (unsigned long long)cputime64_to_clock_t(iowait),
90 (unsigned long long)cputime64_to_clock_t(irq),
91 (unsigned long long)cputime64_to_clock_t(softirq),
92 (unsigned long long)cputime64_to_clock_t(steal),
93 (unsigned long long)cputime64_to_clock_t(guest));
95 seq_printf(p, "intr %llu", (unsigned long long)sum);
97 /* sum again ? it could be updated? */
100 for_each_possible_cpu(i)
101 per_irq_sum += kstat_irqs_cpu(j, i);
103 seq_printf(p, " %u", per_irq_sum);
110 "procs_running %lu\n"
111 "procs_blocked %lu\n",
112 nr_context_switches(),
121 static int stat_open(struct inode *inode, struct file *file)
123 unsigned size = 4096 * (1 + num_possible_cpus() / 32);
128 /* don't ask for more than the kmalloc() max size, currently 128 KB */
129 if (size > 128 * 1024)
131 buf = kmalloc(size, GFP_KERNEL);
135 res = single_open(file, show_stat, NULL);
137 m = file->private_data;
145 static const struct file_operations proc_stat_operations = {
149 .release = single_release,
152 static int __init proc_stat_init(void)
154 proc_create("stat", 0, NULL, &proc_stat_operations);
157 module_init(proc_stat_init);