2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 unsigned long data, text, lib;
18 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
19 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
20 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
21 buffer += sprintf(buffer,
30 (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
31 mm->locked_vm << (PAGE_SHIFT-10),
32 get_mm_counter(mm, rss) << (PAGE_SHIFT-10),
33 data << (PAGE_SHIFT-10),
34 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
35 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
39 unsigned long task_vsize(struct mm_struct *mm)
41 return PAGE_SIZE * mm->total_vm;
44 int task_statm(struct mm_struct *mm, int *shared, int *text,
45 int *data, int *resident)
47 int rss = get_mm_counter(mm, rss);
49 *shared = rss - get_mm_counter(mm, anon_rss);
50 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
52 *data = mm->total_vm - mm->shared_vm;
57 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
59 struct vm_area_struct * vma;
61 struct task_struct *task = proc_task(inode);
62 struct mm_struct * mm = get_task_mm(task);
66 down_read(&mm->mmap_sem);
70 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
76 *mnt = mntget(vma->vm_file->f_vfsmnt);
77 *dentry = dget(vma->vm_file->f_dentry);
81 up_read(&mm->mmap_sem);
87 static void pad_len_spaces(struct seq_file *m, int len)
89 len = 25 + sizeof(void*) * 6 - len;
92 seq_printf(m, "%*c", len, ' ');
97 unsigned long resident;
98 unsigned long shared_clean;
99 unsigned long shared_dirty;
100 unsigned long private_clean;
101 unsigned long private_dirty;
104 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
106 struct task_struct *task = m->private;
107 struct vm_area_struct *vma = v;
108 struct mm_struct *mm = vma->vm_mm;
109 struct file *file = vma->vm_file;
110 int flags = vma->vm_flags;
111 unsigned long ino = 0;
116 struct inode *inode = vma->vm_file->f_dentry->d_inode;
117 dev = inode->i_sb->s_dev;
121 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
124 flags & VM_READ ? 'r' : '-',
125 flags & VM_WRITE ? 'w' : '-',
126 flags & VM_EXEC ? 'x' : '-',
127 flags & VM_MAYSHARE ? 's' : 'p',
128 vma->vm_pgoff << PAGE_SHIFT,
129 MAJOR(dev), MINOR(dev), ino, &len);
132 * Print the dentry name for named mappings, and a
133 * special [heap] marker for the heap:
136 pad_len_spaces(m, len);
137 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
140 if (vma->vm_start <= mm->start_brk &&
141 vma->vm_end >= mm->brk) {
142 pad_len_spaces(m, len);
143 seq_puts(m, "[heap]");
145 if (vma->vm_start <= mm->start_stack &&
146 vma->vm_end >= mm->start_stack) {
148 pad_len_spaces(m, len);
149 seq_puts(m, "[stack]");
153 pad_len_spaces(m, len);
154 seq_puts(m, "[vdso]");
163 "Shared_Clean: %8lu kB\n"
164 "Shared_Dirty: %8lu kB\n"
165 "Private_Clean: %8lu kB\n"
166 "Private_Dirty: %8lu kB\n",
167 (vma->vm_end - vma->vm_start) >> 10,
169 mss->shared_clean >> 10,
170 mss->shared_dirty >> 10,
171 mss->private_clean >> 10,
172 mss->private_dirty >> 10);
174 if (m->count < m->size) /* vma is copied successfully */
175 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
179 static int show_map(struct seq_file *m, void *v)
181 return show_map_internal(m, v, 0);
184 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
185 unsigned long addr, unsigned long end,
186 struct mem_size_stats *mss)
192 pte = pte_offset_map(pmd, addr);
195 if (pte_none(ptent) || !pte_present(ptent))
198 mss->resident += PAGE_SIZE;
199 pfn = pte_pfn(ptent);
203 page = pfn_to_page(pfn);
204 if (page_count(page) >= 2) {
205 if (pte_dirty(ptent))
206 mss->shared_dirty += PAGE_SIZE;
208 mss->shared_clean += PAGE_SIZE;
210 if (pte_dirty(ptent))
211 mss->private_dirty += PAGE_SIZE;
213 mss->private_clean += PAGE_SIZE;
215 } while (pte++, addr += PAGE_SIZE, addr != end);
217 cond_resched_lock(&vma->vm_mm->page_table_lock);
220 static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
221 unsigned long addr, unsigned long end,
222 struct mem_size_stats *mss)
227 pmd = pmd_offset(pud, addr);
229 next = pmd_addr_end(addr, end);
230 if (pmd_none_or_clear_bad(pmd))
232 smaps_pte_range(vma, pmd, addr, next, mss);
233 } while (pmd++, addr = next, addr != end);
236 static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
237 unsigned long addr, unsigned long end,
238 struct mem_size_stats *mss)
243 pud = pud_offset(pgd, addr);
245 next = pud_addr_end(addr, end);
246 if (pud_none_or_clear_bad(pud))
248 smaps_pmd_range(vma, pud, addr, next, mss);
249 } while (pud++, addr = next, addr != end);
252 static inline void smaps_pgd_range(struct vm_area_struct *vma,
253 unsigned long addr, unsigned long end,
254 struct mem_size_stats *mss)
259 pgd = pgd_offset(vma->vm_mm, addr);
261 next = pgd_addr_end(addr, end);
262 if (pgd_none_or_clear_bad(pgd))
264 smaps_pud_range(vma, pgd, addr, next, mss);
265 } while (pgd++, addr = next, addr != end);
268 static int show_smap(struct seq_file *m, void *v)
270 struct vm_area_struct *vma = v;
271 struct mm_struct *mm = vma->vm_mm;
272 struct mem_size_stats mss;
274 memset(&mss, 0, sizeof mss);
277 spin_lock(&mm->page_table_lock);
278 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
279 spin_unlock(&mm->page_table_lock);
282 return show_map_internal(m, v, &mss);
285 static void *m_start(struct seq_file *m, loff_t *pos)
287 struct task_struct *task = m->private;
288 unsigned long last_addr = m->version;
289 struct mm_struct *mm;
290 struct vm_area_struct *vma, *tail_vma;
294 * We remember last_addr rather than next_addr to hit with
295 * mmap_cache most of the time. We have zero last_addr at
296 * the beginning and also after lseek. We will have -1 last_addr
297 * after the end of the vmas.
300 if (last_addr == -1UL)
303 mm = get_task_mm(task);
307 tail_vma = get_gate_vma(task);
308 down_read(&mm->mmap_sem);
310 /* Start with last addr hint */
311 if (last_addr && (vma = find_vma(mm, last_addr))) {
317 * Check the vma index is within the range and do
318 * sequential scan until m_index.
321 if ((unsigned long)l < mm->map_count) {
328 if (l != mm->map_count)
329 tail_vma = NULL; /* After gate vma */
335 /* End of vmas has been reached */
336 m->version = (tail_vma != NULL)? 0: -1UL;
337 up_read(&mm->mmap_sem);
342 static void m_stop(struct seq_file *m, void *v)
344 struct task_struct *task = m->private;
345 struct vm_area_struct *vma = v;
346 if (vma && vma != get_gate_vma(task)) {
347 struct mm_struct *mm = vma->vm_mm;
348 up_read(&mm->mmap_sem);
353 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
355 struct task_struct *task = m->private;
356 struct vm_area_struct *vma = v;
357 struct vm_area_struct *tail_vma = get_gate_vma(task);
360 if (vma && (vma != tail_vma) && vma->vm_next)
363 return (vma != tail_vma)? tail_vma: NULL;
366 struct seq_operations proc_pid_maps_op = {
373 struct seq_operations proc_pid_smaps_op = {
385 unsigned long mapped;
386 unsigned long mapcount_max;
387 unsigned long node[MAX_NUMNODES];
391 * Calculate numa node maps for a vma
393 static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
397 struct mm_struct *mm = vma->vm_mm;
399 struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
406 md->mapcount_max = 0;
410 spin_lock(&mm->page_table_lock);
411 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
412 page = follow_page(mm, vaddr, 0);
414 int count = page_mapcount(page);
418 if (count > md->mapcount_max)
419 md->mapcount_max = count;
423 md->node[page_to_nid(page)]++;
426 spin_unlock(&mm->page_table_lock);
430 static int show_numa_map(struct seq_file *m, void *v)
432 struct task_struct *task = m->private;
433 struct vm_area_struct *vma = v;
434 struct mempolicy *pol;
435 struct numa_maps *md;
443 md = get_numa_maps(vma);
447 seq_printf(m, "%08lx", vma->vm_start);
448 pol = get_vma_policy(task, vma, vma->vm_start);
450 switch (pol->policy) {
452 seq_printf(m, " prefer=%d", pol->v.preferred_node);
455 seq_printf(m, " bind={");
457 for (z = pol->v.zonelist->zones; *z; z++) {
463 seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
468 case MPOL_INTERLEAVE:
469 seq_printf(m, " interleave={");
472 if (test_bit(n, pol->v.nodes)) {
477 seq_printf(m, "%d",n);
483 seq_printf(m," default");
486 seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
487 md->mapcount_max, md->pages, md->mapped);
489 seq_printf(m," Anon=%lu",md->anon);
491 for_each_online_node(n) {
493 seq_printf(m, " N%d=%lu", n, md->node[n]);
497 if (m->count < m->size) /* vma is copied successfully */
498 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
502 struct seq_operations proc_pid_numa_maps_op = {
506 .show = show_numa_map