3 #include <linux/file.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
9 * Logic: we've got two memory sums for each process, "shared", and
10 * "non-shared". Shared memory may get counted more then once, for
11 * each process that owns it. Non-shared memory is counted
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 struct vm_list_struct *vml;
17 unsigned long bytes = 0, sbytes = 0, slack = 0;
19 down_read(&mm->mmap_sem);
20 for (vml = mm->context.vmlist; vml; vml = vml->next) {
24 bytes += kobjsize(vml);
25 if (atomic_read(&mm->mm_count) > 1 ||
26 atomic_read(&vml->vma->vm_usage) > 1
28 sbytes += kobjsize((void *) vml->vma->vm_start);
29 sbytes += kobjsize(vml->vma);
31 bytes += kobjsize((void *) vml->vma->vm_start);
32 bytes += kobjsize(vml->vma);
33 slack += kobjsize((void *) vml->vma->vm_start) -
34 (vml->vma->vm_end - vml->vma->vm_start);
38 if (atomic_read(&mm->mm_count) > 1)
39 sbytes += kobjsize(mm);
41 bytes += kobjsize(mm);
43 if (current->fs && atomic_read(¤t->fs->count) > 1)
44 sbytes += kobjsize(current->fs);
46 bytes += kobjsize(current->fs);
48 if (current->files && atomic_read(¤t->files->count) > 1)
49 sbytes += kobjsize(current->files);
51 bytes += kobjsize(current->files);
53 if (current->sighand && atomic_read(¤t->sighand->count) > 1)
54 sbytes += kobjsize(current->sighand);
56 bytes += kobjsize(current->sighand);
58 bytes += kobjsize(current); /* includes kernel stack */
60 buffer += sprintf(buffer,
62 "Slack:\t%8lu bytes\n"
63 "Shared:\t%8lu bytes\n",
64 bytes, slack, sbytes);
66 up_read(&mm->mmap_sem);
70 unsigned long task_vsize(struct mm_struct *mm)
72 struct vm_list_struct *tbp;
73 unsigned long vsize = 0;
75 down_read(&mm->mmap_sem);
76 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
78 vsize += kobjsize((void *) tbp->vma->vm_start);
80 up_read(&mm->mmap_sem);
84 int task_statm(struct mm_struct *mm, int *shared, int *text,
85 int *data, int *resident)
87 struct vm_list_struct *tbp;
88 int size = kobjsize(mm);
90 down_read(&mm->mmap_sem);
91 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
92 size += kobjsize(tbp);
94 size += kobjsize(tbp->vma);
95 size += kobjsize((void *) tbp->vma->vm_start);
99 size += (*text = mm->end_code - mm->start_code);
100 size += (*data = mm->start_stack - mm->start_data);
101 up_read(&mm->mmap_sem);
106 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
108 struct vm_list_struct *vml;
109 struct vm_area_struct *vma;
110 struct task_struct *task = get_proc_task(inode);
111 struct mm_struct *mm = get_task_mm(task);
112 int result = -ENOENT;
116 down_read(&mm->mmap_sem);
118 vml = mm->context.vmlist;
121 if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
129 *mnt = mntget(vma->vm_file->f_vfsmnt);
130 *dentry = dget(vma->vm_file->f_dentry);
134 up_read(&mm->mmap_sem);
141 * Albert D. Cahalan suggested to fake entries for the traditional
142 * sections here. This might be worth investigating.
144 static int show_map(struct seq_file *m, void *v)
148 static void *m_start(struct seq_file *m, loff_t *pos)
152 static void m_stop(struct seq_file *m, void *v)
155 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
159 static struct seq_operations proc_pid_maps_op = {
166 static int maps_open(struct inode *inode, struct file *file)
169 ret = seq_open(file, &proc_pid_maps_op);
171 struct seq_file *m = file->private_data;
177 struct file_operations proc_maps_operations = {
181 .release = seq_release,