3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/mount.h>
6 #include <linux/ptrace.h>
7 #include <linux/seq_file.h>
11 * Logic: we've got two memory sums for each process, "shared", and
12 * "non-shared". Shared memory may get counted more then once, for
13 * each process that owns it. Non-shared memory is counted
16 void task_mem(struct seq_file *m, struct mm_struct *mm)
18 struct vm_list_struct *vml;
19 unsigned long bytes = 0, sbytes = 0, slack = 0;
21 down_read(&mm->mmap_sem);
22 for (vml = mm->context.vmlist; vml; vml = vml->next) {
26 bytes += kobjsize(vml);
27 if (atomic_read(&mm->mm_count) > 1 ||
28 atomic_read(&vml->vma->vm_usage) > 1
30 sbytes += kobjsize((void *) vml->vma->vm_start);
31 sbytes += kobjsize(vml->vma);
33 bytes += kobjsize((void *) vml->vma->vm_start);
34 bytes += kobjsize(vml->vma);
35 slack += kobjsize((void *) vml->vma->vm_start) -
36 (vml->vma->vm_end - vml->vma->vm_start);
40 if (atomic_read(&mm->mm_count) > 1)
41 sbytes += kobjsize(mm);
43 bytes += kobjsize(mm);
45 if (current->fs && atomic_read(¤t->fs->count) > 1)
46 sbytes += kobjsize(current->fs);
48 bytes += kobjsize(current->fs);
50 if (current->files && atomic_read(¤t->files->count) > 1)
51 sbytes += kobjsize(current->files);
53 bytes += kobjsize(current->files);
55 if (current->sighand && atomic_read(¤t->sighand->count) > 1)
56 sbytes += kobjsize(current->sighand);
58 bytes += kobjsize(current->sighand);
60 bytes += kobjsize(current); /* includes kernel stack */
64 "Slack:\t%8lu bytes\n"
65 "Shared:\t%8lu bytes\n",
66 bytes, slack, sbytes);
68 up_read(&mm->mmap_sem);
71 unsigned long task_vsize(struct mm_struct *mm)
73 struct vm_list_struct *tbp;
74 unsigned long vsize = 0;
76 down_read(&mm->mmap_sem);
77 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
79 vsize += kobjsize((void *) tbp->vma->vm_start);
81 up_read(&mm->mmap_sem);
85 int task_statm(struct mm_struct *mm, int *shared, int *text,
86 int *data, int *resident)
88 struct vm_list_struct *tbp;
89 int size = kobjsize(mm);
91 down_read(&mm->mmap_sem);
92 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
93 size += kobjsize(tbp);
95 size += kobjsize(tbp->vma);
96 size += kobjsize((void *) tbp->vma->vm_start);
100 size += (*text = mm->end_code - mm->start_code);
101 size += (*data = mm->start_stack - mm->start_data);
102 up_read(&mm->mmap_sem);
108 * display mapping lines for a particular process's /proc/pid/maps
110 static int show_map(struct seq_file *m, void *_vml)
112 struct vm_list_struct *vml = _vml;
113 struct proc_maps_private *priv = m->private;
114 struct task_struct *task = priv->task;
116 if (maps_protect && !ptrace_may_attach(task))
119 return nommu_vma_show(m, vml->vma);
122 static void *m_start(struct seq_file *m, loff_t *pos)
124 struct proc_maps_private *priv = m->private;
125 struct vm_list_struct *vml;
126 struct mm_struct *mm;
129 /* pin the task and mm whilst we play with them */
130 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
134 mm = mm_for_maps(priv->task);
136 put_task_struct(priv->task);
141 /* start from the Nth VMA */
142 for (vml = mm->context.vmlist; vml; vml = vml->next)
148 static void m_stop(struct seq_file *m, void *_vml)
150 struct proc_maps_private *priv = m->private;
153 struct mm_struct *mm = priv->task->mm;
154 up_read(&mm->mmap_sem);
156 put_task_struct(priv->task);
160 static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
162 struct vm_list_struct *vml = _vml;
165 return vml ? vml->next : NULL;
168 static const struct seq_operations proc_pid_maps_ops = {
175 static int maps_open(struct inode *inode, struct file *file)
177 struct proc_maps_private *priv;
180 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
182 priv->pid = proc_pid(inode);
183 ret = seq_open(file, &proc_pid_maps_ops);
185 struct seq_file *m = file->private_data;
194 const struct file_operations proc_maps_operations = {
198 .release = seq_release_private,