2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/mempolicy.h>
11 #include <asm/uaccess.h>
12 #include <asm/tlbflush.h>
15 char *task_mem(struct mm_struct *mm, char *buffer)
17 unsigned long data, text, lib;
18 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
21 * Note: to minimize their overhead, mm maintains hiwater_vm and
22 * hiwater_rss only when about to *lower* total_vm or rss. Any
23 * collector of these hiwater stats must therefore get total_vm
24 * and rss too, which will usually be the higher. Barriers? not
25 * worth the effort, such snapshots can always be inconsistent.
27 hiwater_vm = total_vm = mm->total_vm;
28 if (hiwater_vm < mm->hiwater_vm)
29 hiwater_vm = mm->hiwater_vm;
30 hiwater_rss = total_rss = get_mm_rss(mm);
31 if (hiwater_rss < mm->hiwater_rss)
32 hiwater_rss = mm->hiwater_rss;
34 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
35 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
36 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
37 buffer += sprintf(buffer,
48 hiwater_vm << (PAGE_SHIFT-10),
49 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
50 mm->locked_vm << (PAGE_SHIFT-10),
51 hiwater_rss << (PAGE_SHIFT-10),
52 total_rss << (PAGE_SHIFT-10),
53 data << (PAGE_SHIFT-10),
54 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
59 unsigned long task_vsize(struct mm_struct *mm)
61 return PAGE_SIZE * mm->total_vm;
64 int task_statm(struct mm_struct *mm, int *shared, int *text,
65 int *data, int *resident)
67 *shared = get_mm_counter(mm, file_rss);
68 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
70 *data = mm->total_vm - mm->shared_vm;
71 *resident = *shared + get_mm_counter(mm, anon_rss);
75 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
77 struct vm_area_struct * vma;
79 struct task_struct *task = get_proc_task(inode);
80 struct mm_struct * mm = NULL;
83 mm = get_task_mm(task);
84 put_task_struct(task);
88 down_read(&mm->mmap_sem);
92 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
98 *mnt = mntget(vma->vm_file->f_path.mnt);
99 *dentry = dget(vma->vm_file->f_path.dentry);
103 up_read(&mm->mmap_sem);
109 static void pad_len_spaces(struct seq_file *m, int len)
111 len = 25 + sizeof(void*) * 6 - len;
114 seq_printf(m, "%*c", len, ' ');
118 * Proportional Set Size(PSS): my share of RSS.
120 * PSS of a process is the count of pages it has in memory, where each
121 * page is divided by the number of processes sharing it. So if a
122 * process has 1000 pages all to itself, and 1000 shared with one other
123 * process, its PSS will be 1500.
125 * To keep (accumulated) division errors low, we adopt a 64bit
126 * fixed-point pss counter to minimize division errors. So (pss >>
127 * PSS_SHIFT) would be the real byte count.
129 * A shift of 12 before division means (assuming 4K page size):
130 * - 1M 3-user-pages add up to 8KB errors;
131 * - supports mapcount up to 2^24, or 16M;
132 * - supports PSS up to 2^52 bytes, or 4PB.
136 struct mem_size_stats
138 struct vm_area_struct *vma;
139 unsigned long resident;
140 unsigned long shared_clean;
141 unsigned long shared_dirty;
142 unsigned long private_clean;
143 unsigned long private_dirty;
144 unsigned long referenced;
148 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
150 struct proc_maps_private *priv = m->private;
151 struct task_struct *task = priv->task;
152 struct vm_area_struct *vma = v;
153 struct mm_struct *mm = vma->vm_mm;
154 struct file *file = vma->vm_file;
155 int flags = vma->vm_flags;
156 unsigned long ino = 0;
160 if (maps_protect && !ptrace_may_attach(task))
164 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
165 dev = inode->i_sb->s_dev;
169 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
172 flags & VM_READ ? 'r' : '-',
173 flags & VM_WRITE ? 'w' : '-',
174 flags & VM_EXEC ? 'x' : '-',
175 flags & VM_MAYSHARE ? 's' : 'p',
176 vma->vm_pgoff << PAGE_SHIFT,
177 MAJOR(dev), MINOR(dev), ino, &len);
180 * Print the dentry name for named mappings, and a
181 * special [heap] marker for the heap:
184 pad_len_spaces(m, len);
185 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
187 const char *name = arch_vma_name(vma);
190 if (vma->vm_start <= mm->start_brk &&
191 vma->vm_end >= mm->brk) {
193 } else if (vma->vm_start <= mm->start_stack &&
194 vma->vm_end >= mm->start_stack) {
202 pad_len_spaces(m, len);
213 "Shared_Clean: %8lu kB\n"
214 "Shared_Dirty: %8lu kB\n"
215 "Private_Clean: %8lu kB\n"
216 "Private_Dirty: %8lu kB\n"
217 "Referenced: %8lu kB\n",
218 (vma->vm_end - vma->vm_start) >> 10,
220 (unsigned long)(mss->pss >> (10 + PSS_SHIFT)),
221 mss->shared_clean >> 10,
222 mss->shared_dirty >> 10,
223 mss->private_clean >> 10,
224 mss->private_dirty >> 10,
225 mss->referenced >> 10);
227 if (m->count < m->size) /* vma is copied successfully */
228 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
232 static int show_map(struct seq_file *m, void *v)
234 return show_map_internal(m, v, NULL);
237 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
240 struct mem_size_stats *mss = private;
241 struct vm_area_struct *vma = mss->vma;
247 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
248 for (; addr != end; pte++, addr += PAGE_SIZE) {
250 if (!pte_present(ptent))
253 mss->resident += PAGE_SIZE;
255 page = vm_normal_page(vma, addr, ptent);
259 /* Accumulate the size in pages that have been accessed. */
260 if (pte_young(ptent) || PageReferenced(page))
261 mss->referenced += PAGE_SIZE;
262 mapcount = page_mapcount(page);
264 if (pte_dirty(ptent))
265 mss->shared_dirty += PAGE_SIZE;
267 mss->shared_clean += PAGE_SIZE;
268 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
270 if (pte_dirty(ptent))
271 mss->private_dirty += PAGE_SIZE;
273 mss->private_clean += PAGE_SIZE;
274 mss->pss += (PAGE_SIZE << PSS_SHIFT);
277 pte_unmap_unlock(pte - 1, ptl);
282 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
283 unsigned long end, void *private)
285 struct vm_area_struct *vma = private;
290 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
291 for (; addr != end; pte++, addr += PAGE_SIZE) {
293 if (!pte_present(ptent))
296 page = vm_normal_page(vma, addr, ptent);
300 /* Clear accessed and referenced bits. */
301 ptep_test_and_clear_young(vma, addr, pte);
302 ClearPageReferenced(page);
304 pte_unmap_unlock(pte - 1, ptl);
309 static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
311 static int show_smap(struct seq_file *m, void *v)
313 struct vm_area_struct *vma = v;
314 struct mem_size_stats mss;
316 memset(&mss, 0, sizeof mss);
318 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
319 walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
321 return show_map_internal(m, v, &mss);
324 static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
326 void clear_refs_smap(struct mm_struct *mm)
328 struct vm_area_struct *vma;
330 down_read(&mm->mmap_sem);
331 for (vma = mm->mmap; vma; vma = vma->vm_next)
332 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
333 walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
334 &clear_refs_walk, vma);
336 up_read(&mm->mmap_sem);
339 static void *m_start(struct seq_file *m, loff_t *pos)
341 struct proc_maps_private *priv = m->private;
342 unsigned long last_addr = m->version;
343 struct mm_struct *mm;
344 struct vm_area_struct *vma, *tail_vma = NULL;
347 /* Clear the per syscall fields in priv */
349 priv->tail_vma = NULL;
352 * We remember last_addr rather than next_addr to hit with
353 * mmap_cache most of the time. We have zero last_addr at
354 * the beginning and also after lseek. We will have -1 last_addr
355 * after the end of the vmas.
358 if (last_addr == -1UL)
361 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
365 mm = mm_for_maps(priv->task);
369 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
371 /* Start with last addr hint */
372 if (last_addr && (vma = find_vma(mm, last_addr))) {
378 * Check the vma index is within the range and do
379 * sequential scan until m_index.
382 if ((unsigned long)l < mm->map_count) {
389 if (l != mm->map_count)
390 tail_vma = NULL; /* After gate vma */
396 /* End of vmas has been reached */
397 m->version = (tail_vma != NULL)? 0: -1UL;
398 up_read(&mm->mmap_sem);
403 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
405 if (vma && vma != priv->tail_vma) {
406 struct mm_struct *mm = vma->vm_mm;
407 up_read(&mm->mmap_sem);
412 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
414 struct proc_maps_private *priv = m->private;
415 struct vm_area_struct *vma = v;
416 struct vm_area_struct *tail_vma = priv->tail_vma;
419 if (vma && (vma != tail_vma) && vma->vm_next)
422 return (vma != tail_vma)? tail_vma: NULL;
425 static void m_stop(struct seq_file *m, void *v)
427 struct proc_maps_private *priv = m->private;
428 struct vm_area_struct *vma = v;
432 put_task_struct(priv->task);
435 static struct seq_operations proc_pid_maps_op = {
442 static struct seq_operations proc_pid_smaps_op = {
449 static int do_maps_open(struct inode *inode, struct file *file,
450 struct seq_operations *ops)
452 struct proc_maps_private *priv;
454 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
456 priv->pid = proc_pid(inode);
457 ret = seq_open(file, ops);
459 struct seq_file *m = file->private_data;
468 static int maps_open(struct inode *inode, struct file *file)
470 return do_maps_open(inode, file, &proc_pid_maps_op);
473 const struct file_operations proc_maps_operations = {
477 .release = seq_release_private,
481 extern int show_numa_map(struct seq_file *m, void *v);
483 static int show_numa_map_checked(struct seq_file *m, void *v)
485 struct proc_maps_private *priv = m->private;
486 struct task_struct *task = priv->task;
488 if (maps_protect && !ptrace_may_attach(task))
491 return show_numa_map(m, v);
494 static struct seq_operations proc_pid_numa_maps_op = {
498 .show = show_numa_map_checked
501 static int numa_maps_open(struct inode *inode, struct file *file)
503 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
506 const struct file_operations proc_numa_maps_operations = {
507 .open = numa_maps_open,
510 .release = seq_release_private,
514 static int smaps_open(struct inode *inode, struct file *file)
516 return do_maps_open(inode, file, &proc_pid_smaps_op);
519 const struct file_operations proc_smaps_operations = {
523 .release = seq_release_private,