Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[linux-2.6] / fs / proc / task_nommu.c
1
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include "internal.h"
7
8 /*
9  * Logic: we've got two memory sums for each process, "shared", and
10  * "non-shared". Shared memory may get counted more then once, for
11  * each process that owns it. Non-shared memory is counted
12  * accurately.
13  */
14 char *task_mem(struct mm_struct *mm, char *buffer)
15 {
16         struct vm_list_struct *vml;
17         unsigned long bytes = 0, sbytes = 0, slack = 0;
18         
19         down_read(&mm->mmap_sem);
20         for (vml = mm->context.vmlist; vml; vml = vml->next) {
21                 if (!vml->vma)
22                         continue;
23
24                 bytes += kobjsize(vml);
25                 if (atomic_read(&mm->mm_count) > 1 ||
26                     atomic_read(&vml->vma->vm_usage) > 1
27                     ) {
28                         sbytes += kobjsize((void *) vml->vma->vm_start);
29                         sbytes += kobjsize(vml->vma);
30                 } else {
31                         bytes += kobjsize((void *) vml->vma->vm_start);
32                         bytes += kobjsize(vml->vma);
33                         slack += kobjsize((void *) vml->vma->vm_start) -
34                                 (vml->vma->vm_end - vml->vma->vm_start);
35                 }
36         }
37
38         if (atomic_read(&mm->mm_count) > 1)
39                 sbytes += kobjsize(mm);
40         else
41                 bytes += kobjsize(mm);
42         
43         if (current->fs && atomic_read(&current->fs->count) > 1)
44                 sbytes += kobjsize(current->fs);
45         else
46                 bytes += kobjsize(current->fs);
47
48         if (current->files && atomic_read(&current->files->count) > 1)
49                 sbytes += kobjsize(current->files);
50         else
51                 bytes += kobjsize(current->files);
52
53         if (current->sighand && atomic_read(&current->sighand->count) > 1)
54                 sbytes += kobjsize(current->sighand);
55         else
56                 bytes += kobjsize(current->sighand);
57
58         bytes += kobjsize(current); /* includes kernel stack */
59
60         buffer += sprintf(buffer,
61                 "Mem:\t%8lu bytes\n"
62                 "Slack:\t%8lu bytes\n"
63                 "Shared:\t%8lu bytes\n",
64                 bytes, slack, sbytes);
65
66         up_read(&mm->mmap_sem);
67         return buffer;
68 }
69
70 unsigned long task_vsize(struct mm_struct *mm)
71 {
72         struct vm_list_struct *tbp;
73         unsigned long vsize = 0;
74
75         down_read(&mm->mmap_sem);
76         for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
77                 if (tbp->vma)
78                         vsize += kobjsize((void *) tbp->vma->vm_start);
79         }
80         up_read(&mm->mmap_sem);
81         return vsize;
82 }
83
84 int task_statm(struct mm_struct *mm, int *shared, int *text,
85                int *data, int *resident)
86 {
87         struct vm_list_struct *tbp;
88         int size = kobjsize(mm);
89
90         down_read(&mm->mmap_sem);
91         for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
92                 size += kobjsize(tbp);
93                 if (tbp->vma) {
94                         size += kobjsize(tbp->vma);
95                         size += kobjsize((void *) tbp->vma->vm_start);
96                 }
97         }
98
99         size += (*text = mm->end_code - mm->start_code);
100         size += (*data = mm->start_stack - mm->start_data);
101         up_read(&mm->mmap_sem);
102         *resident = size;
103         return size;
104 }
105
106 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
107 {
108         struct vm_list_struct *vml;
109         struct vm_area_struct *vma;
110         struct task_struct *task = proc_task(inode);
111         struct mm_struct *mm = get_task_mm(task);
112         int result = -ENOENT;
113
114         if (!mm)
115                 goto out;
116         down_read(&mm->mmap_sem);
117
118         vml = mm->context.vmlist;
119         vma = NULL;
120         while (vml) {
121                 if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
122                         vma = vml->vma;
123                         break;
124                 }
125                 vml = vml->next;
126         }
127
128         if (vma) {
129                 *mnt = mntget(vma->vm_file->f_vfsmnt);
130                 *dentry = dget(vma->vm_file->f_dentry);
131                 result = 0;
132         }
133
134         up_read(&mm->mmap_sem);
135         mmput(mm);
136 out:
137         return result;
138 }
139
140 /*
141  * Albert D. Cahalan suggested to fake entries for the traditional
142  * sections here.  This might be worth investigating.
143  */
144 static int show_map(struct seq_file *m, void *v)
145 {
146         return 0;
147 }
148 static void *m_start(struct seq_file *m, loff_t *pos)
149 {
150         return NULL;
151 }
152 static void m_stop(struct seq_file *m, void *v)
153 {
154 }
155 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
156 {
157         return NULL;
158 }
159 struct seq_operations proc_pid_maps_op = {
160         .start  = m_start,
161         .next   = m_next,
162         .stop   = m_stop,
163         .show   = show_map
164 };