2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
89 #include <linux/migrate.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
97 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
99 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
101 static struct kmem_cache *policy_cache;
102 static struct kmem_cache *sn_cache;
104 #define PDprintk(fmt...)
106 /* Highest zone. An specific allocation for a zone below that is not
108 enum zone_type policy_zone = ZONE_DMA;
110 struct mempolicy default_policy = {
111 .refcnt = ATOMIC_INIT(1), /* never free it */
112 .policy = MPOL_DEFAULT,
115 /* Do sanity checking on a policy */
116 static int mpol_check_policy(int mode, nodemask_t *nodes)
118 int empty = nodes_empty(*nodes);
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
136 /* Generate a custom zonelist for the BIND policy. */
137 static struct zonelist *bind_zonelist(nodemask_t *nodes)
143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
144 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
148 /* First put in the highest zones from all nodes, then all the next
149 lower zones etc. Avoid empty zones because the memory allocator
150 doesn't like them. If you implement node hot removal you
154 for_each_node_mask(nd, *nodes) {
155 struct zone *z = &NODE_DATA(nd)->node_zones[k];
156 if (z->present_pages > 0)
157 zl->zones[num++] = z;
163 zl->zones[num] = NULL;
167 /* Create a new policy */
168 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
170 struct mempolicy *policy;
172 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
173 if (mode == MPOL_DEFAULT)
175 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
177 return ERR_PTR(-ENOMEM);
178 atomic_set(&policy->refcnt, 1);
180 case MPOL_INTERLEAVE:
181 policy->v.nodes = *nodes;
182 if (nodes_weight(*nodes) == 0) {
183 kmem_cache_free(policy_cache, policy);
184 return ERR_PTR(-EINVAL);
188 policy->v.preferred_node = first_node(*nodes);
189 if (policy->v.preferred_node >= MAX_NUMNODES)
190 policy->v.preferred_node = -1;
193 policy->v.zonelist = bind_zonelist(nodes);
194 if (policy->v.zonelist == NULL) {
195 kmem_cache_free(policy_cache, policy);
196 return ERR_PTR(-ENOMEM);
200 policy->policy = mode;
201 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
205 static void gather_stats(struct page *, void *, int pte_dirty);
206 static void migrate_page_add(struct page *page, struct list_head *pagelist,
207 unsigned long flags);
209 /* Scan through pages checking if pages follow certain conditions. */
210 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
211 unsigned long addr, unsigned long end,
212 const nodemask_t *nodes, unsigned long flags,
219 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
224 if (!pte_present(*pte))
226 page = vm_normal_page(vma, addr, *pte);
230 * The check for PageReserved here is important to avoid
231 * handling zero pages and other pages that may have been
232 * marked special by the system.
234 * If the PageReserved would not be checked here then f.e.
235 * the location of the zero page could have an influence
236 * on MPOL_MF_STRICT, zero pages would be counted for
237 * the per node stats, and there would be useless attempts
238 * to put zero pages on the migration list.
240 if (PageReserved(page))
242 nid = page_to_nid(page);
243 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
246 if (flags & MPOL_MF_STATS)
247 gather_stats(page, private, pte_dirty(*pte));
248 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
249 migrate_page_add(page, private, flags);
252 } while (pte++, addr += PAGE_SIZE, addr != end);
253 pte_unmap_unlock(orig_pte, ptl);
257 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
258 unsigned long addr, unsigned long end,
259 const nodemask_t *nodes, unsigned long flags,
265 pmd = pmd_offset(pud, addr);
267 next = pmd_addr_end(addr, end);
268 if (pmd_none_or_clear_bad(pmd))
270 if (check_pte_range(vma, pmd, addr, next, nodes,
273 } while (pmd++, addr = next, addr != end);
277 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
278 unsigned long addr, unsigned long end,
279 const nodemask_t *nodes, unsigned long flags,
285 pud = pud_offset(pgd, addr);
287 next = pud_addr_end(addr, end);
288 if (pud_none_or_clear_bad(pud))
290 if (check_pmd_range(vma, pud, addr, next, nodes,
293 } while (pud++, addr = next, addr != end);
297 static inline int check_pgd_range(struct vm_area_struct *vma,
298 unsigned long addr, unsigned long end,
299 const nodemask_t *nodes, unsigned long flags,
305 pgd = pgd_offset(vma->vm_mm, addr);
307 next = pgd_addr_end(addr, end);
308 if (pgd_none_or_clear_bad(pgd))
310 if (check_pud_range(vma, pgd, addr, next, nodes,
313 } while (pgd++, addr = next, addr != end);
317 /* Check if a vma is migratable */
318 static inline int vma_migratable(struct vm_area_struct *vma)
320 if (vma->vm_flags & (
321 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
327 * Check if all pages in a range are on a set of nodes.
328 * If pagelist != NULL then isolate pages from the LRU and
329 * put them on the pagelist.
331 static struct vm_area_struct *
332 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
333 const nodemask_t *nodes, unsigned long flags, void *private)
336 struct vm_area_struct *first, *vma, *prev;
338 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
340 err = migrate_prep();
345 first = find_vma(mm, start);
347 return ERR_PTR(-EFAULT);
349 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
350 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
351 if (!vma->vm_next && vma->vm_end < end)
352 return ERR_PTR(-EFAULT);
353 if (prev && prev->vm_end < vma->vm_start)
354 return ERR_PTR(-EFAULT);
356 if (!is_vm_hugetlb_page(vma) &&
357 ((flags & MPOL_MF_STRICT) ||
358 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
359 vma_migratable(vma)))) {
360 unsigned long endvma = vma->vm_end;
364 if (vma->vm_start > start)
365 start = vma->vm_start;
366 err = check_pgd_range(vma, start, endvma, nodes,
369 first = ERR_PTR(err);
378 /* Apply policy to a single VMA */
379 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
382 struct mempolicy *old = vma->vm_policy;
384 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
385 vma->vm_start, vma->vm_end, vma->vm_pgoff,
386 vma->vm_ops, vma->vm_file,
387 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
389 if (vma->vm_ops && vma->vm_ops->set_policy)
390 err = vma->vm_ops->set_policy(vma, new);
393 vma->vm_policy = new;
399 /* Step 2: apply policy to a range and do splits. */
400 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
401 unsigned long end, struct mempolicy *new)
403 struct vm_area_struct *next;
407 for (; vma && vma->vm_start < end; vma = next) {
409 if (vma->vm_start < start)
410 err = split_vma(vma->vm_mm, vma, start, 1);
411 if (!err && vma->vm_end > end)
412 err = split_vma(vma->vm_mm, vma, end, 0);
414 err = policy_vma(vma, new);
421 static int contextualize_policy(int mode, nodemask_t *nodes)
426 cpuset_update_task_memory_state();
427 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
429 return mpol_check_policy(mode, nodes);
434 * Update task->flags PF_MEMPOLICY bit: set iff non-default
435 * mempolicy. Allows more rapid checking of this (combined perhaps
436 * with other PF_* flag bits) on memory allocation hot code paths.
438 * If called from outside this file, the task 'p' should -only- be
439 * a newly forked child not yet visible on the task list, because
440 * manipulating the task flags of a visible task is not safe.
442 * The above limitation is why this routine has the funny name
443 * mpol_fix_fork_child_flag().
445 * It is also safe to call this with a task pointer of current,
446 * which the static wrapper mpol_set_task_struct_flag() does,
447 * for use within this file.
450 void mpol_fix_fork_child_flag(struct task_struct *p)
453 p->flags |= PF_MEMPOLICY;
455 p->flags &= ~PF_MEMPOLICY;
458 static void mpol_set_task_struct_flag(void)
460 mpol_fix_fork_child_flag(current);
463 /* Set the process memory policy */
464 long do_set_mempolicy(int mode, nodemask_t *nodes)
466 struct mempolicy *new;
468 if (contextualize_policy(mode, nodes))
470 new = mpol_new(mode, nodes);
473 mpol_free(current->mempolicy);
474 current->mempolicy = new;
475 mpol_set_task_struct_flag();
476 if (new && new->policy == MPOL_INTERLEAVE)
477 current->il_next = first_node(new->v.nodes);
481 /* Fill a zone bitmap for a policy */
482 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
489 for (i = 0; p->v.zonelist->zones[i]; i++)
490 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
495 case MPOL_INTERLEAVE:
499 /* or use current node instead of online map? */
500 if (p->v.preferred_node < 0)
501 *nodes = node_online_map;
503 node_set(p->v.preferred_node, *nodes);
510 static int lookup_node(struct mm_struct *mm, unsigned long addr)
515 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
517 err = page_to_nid(p);
523 /* Retrieve NUMA policy */
524 long do_get_mempolicy(int *policy, nodemask_t *nmask,
525 unsigned long addr, unsigned long flags)
528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = NULL;
530 struct mempolicy *pol = current->mempolicy;
532 cpuset_update_task_memory_state();
533 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
535 if (flags & MPOL_F_ADDR) {
536 down_read(&mm->mmap_sem);
537 vma = find_vma_intersection(mm, addr, addr+1);
539 up_read(&mm->mmap_sem);
542 if (vma->vm_ops && vma->vm_ops->get_policy)
543 pol = vma->vm_ops->get_policy(vma, addr);
545 pol = vma->vm_policy;
550 pol = &default_policy;
552 if (flags & MPOL_F_NODE) {
553 if (flags & MPOL_F_ADDR) {
554 err = lookup_node(mm, addr);
558 } else if (pol == current->mempolicy &&
559 pol->policy == MPOL_INTERLEAVE) {
560 *policy = current->il_next;
566 *policy = pol->policy;
569 up_read(¤t->mm->mmap_sem);
575 get_zonemask(pol, nmask);
579 up_read(¤t->mm->mmap_sem);
583 #ifdef CONFIG_MIGRATION
587 static void migrate_page_add(struct page *page, struct list_head *pagelist,
591 * Avoid migrating a page that is shared with others.
593 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
594 isolate_lru_page(page, pagelist);
597 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
599 return alloc_pages_node(node, GFP_HIGHUSER, 0);
603 * Migrate pages from one node to a target node.
604 * Returns error or the number of pages not migrated.
606 int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
613 node_set(source, nmask);
615 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
616 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
618 if (!list_empty(&pagelist))
619 err = migrate_pages(&pagelist, new_node_page, dest);
625 * Move pages between the two nodesets so as to preserve the physical
626 * layout as much as possible.
628 * Returns the number of page that could not be moved.
630 int do_migrate_pages(struct mm_struct *mm,
631 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
638 down_read(&mm->mmap_sem);
640 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
645 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
646 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
647 * bit in 'tmp', and return that <source, dest> pair for migration.
648 * The pair of nodemasks 'to' and 'from' define the map.
650 * If no pair of bits is found that way, fallback to picking some
651 * pair of 'source' and 'dest' bits that are not the same. If the
652 * 'source' and 'dest' bits are the same, this represents a node
653 * that will be migrating to itself, so no pages need move.
655 * If no bits are left in 'tmp', or if all remaining bits left
656 * in 'tmp' correspond to the same bit in 'to', return false
657 * (nothing left to migrate).
659 * This lets us pick a pair of nodes to migrate between, such that
660 * if possible the dest node is not already occupied by some other
661 * source node, minimizing the risk of overloading the memory on a
662 * node that would happen if we migrated incoming memory to a node
663 * before migrating outgoing memory source that same node.
665 * A single scan of tmp is sufficient. As we go, we remember the
666 * most recent <s, d> pair that moved (s != d). If we find a pair
667 * that not only moved, but what's better, moved to an empty slot
668 * (d is not set in tmp), then we break out then, with that pair.
669 * Otherwise when we finish scannng from_tmp, we at least have the
670 * most recent <s, d> pair that moved. If we get all the way through
671 * the scan of tmp without finding any node that moved, much less
672 * moved to an empty node, then there is nothing left worth migrating.
676 while (!nodes_empty(tmp)) {
681 for_each_node_mask(s, tmp) {
682 d = node_remap(s, *from_nodes, *to_nodes);
686 source = s; /* Node moved. Memorize */
689 /* dest not in remaining from nodes? */
690 if (!node_isset(dest, tmp))
696 node_clear(source, tmp);
697 err = migrate_to_node(mm, source, dest, flags);
704 up_read(&mm->mmap_sem);
711 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
713 struct vm_area_struct *vma = (struct vm_area_struct *)private;
715 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
719 static void migrate_page_add(struct page *page, struct list_head *pagelist,
724 int do_migrate_pages(struct mm_struct *mm,
725 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
730 static struct page *new_vma_page(struct page *page, unsigned long private)
736 long do_mbind(unsigned long start, unsigned long len,
737 unsigned long mode, nodemask_t *nmask, unsigned long flags)
739 struct vm_area_struct *vma;
740 struct mm_struct *mm = current->mm;
741 struct mempolicy *new;
746 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
747 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
750 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
753 if (start & ~PAGE_MASK)
756 if (mode == MPOL_DEFAULT)
757 flags &= ~MPOL_MF_STRICT;
759 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
767 if (mpol_check_policy(mode, nmask))
770 new = mpol_new(mode, nmask);
775 * If we are using the default policy then operation
776 * on discontinuous address spaces is okay after all
779 flags |= MPOL_MF_DISCONTIG_OK;
781 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
782 mode,nodes_addr(nodes)[0]);
784 down_write(&mm->mmap_sem);
785 vma = check_range(mm, start, end, nmask,
786 flags | MPOL_MF_INVERT, &pagelist);
792 err = mbind_range(vma, start, end, new);
794 if (!list_empty(&pagelist))
795 nr_failed = migrate_pages(&pagelist, new_vma_page,
798 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
802 up_write(&mm->mmap_sem);
808 * User space interface with variable sized bitmaps for nodelists.
811 /* Copy a node mask from user space. */
812 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
813 unsigned long maxnode)
816 unsigned long nlongs;
817 unsigned long endmask;
821 if (maxnode == 0 || !nmask)
823 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
826 nlongs = BITS_TO_LONGS(maxnode);
827 if ((maxnode % BITS_PER_LONG) == 0)
830 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
832 /* When the user specified more nodes than supported just check
833 if the non supported part is all zero. */
834 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
835 if (nlongs > PAGE_SIZE/sizeof(long))
837 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
839 if (get_user(t, nmask + k))
841 if (k == nlongs - 1) {
847 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
851 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
853 nodes_addr(*nodes)[nlongs-1] &= endmask;
857 /* Copy a kernel node mask to user space */
858 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
861 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
862 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
865 if (copy > PAGE_SIZE)
867 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
871 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
874 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
876 unsigned long __user *nmask, unsigned long maxnode,
882 err = get_nodes(&nodes, nmask, maxnode);
885 return do_mbind(start, len, mode, &nodes, flags);
888 /* Set the process memory policy */
889 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
890 unsigned long maxnode)
895 if (mode < 0 || mode > MPOL_MAX)
897 err = get_nodes(&nodes, nmask, maxnode);
900 return do_set_mempolicy(mode, &nodes);
903 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
904 const unsigned long __user *old_nodes,
905 const unsigned long __user *new_nodes)
907 struct mm_struct *mm;
908 struct task_struct *task;
911 nodemask_t task_nodes;
914 err = get_nodes(&old, old_nodes, maxnode);
918 err = get_nodes(&new, new_nodes, maxnode);
922 /* Find the mm_struct */
923 read_lock(&tasklist_lock);
924 task = pid ? find_task_by_pid(pid) : current;
926 read_unlock(&tasklist_lock);
929 mm = get_task_mm(task);
930 read_unlock(&tasklist_lock);
936 * Check if this process has the right to modify the specified
937 * process. The right exists if the process has administrative
938 * capabilities, superuser privileges or the same
939 * userid as the target process.
941 if ((current->euid != task->suid) && (current->euid != task->uid) &&
942 (current->uid != task->suid) && (current->uid != task->uid) &&
943 !capable(CAP_SYS_NICE)) {
948 task_nodes = cpuset_mems_allowed(task);
949 /* Is the user allowed to access the target nodes? */
950 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
955 err = security_task_movememory(task);
959 err = do_migrate_pages(mm, &old, &new,
960 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
967 /* Retrieve NUMA policy */
968 asmlinkage long sys_get_mempolicy(int __user *policy,
969 unsigned long __user *nmask,
970 unsigned long maxnode,
971 unsigned long addr, unsigned long flags)
976 if (nmask != NULL && maxnode < MAX_NUMNODES)
979 err = do_get_mempolicy(&pval, &nodes, addr, flags);
984 if (policy && put_user(pval, policy))
988 err = copy_nodes_to_user(nmask, maxnode, &nodes);
995 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
996 compat_ulong_t __user *nmask,
997 compat_ulong_t maxnode,
998 compat_ulong_t addr, compat_ulong_t flags)
1001 unsigned long __user *nm = NULL;
1002 unsigned long nr_bits, alloc_size;
1003 DECLARE_BITMAP(bm, MAX_NUMNODES);
1005 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1006 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1009 nm = compat_alloc_user_space(alloc_size);
1011 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1013 if (!err && nmask) {
1014 err = copy_from_user(bm, nm, alloc_size);
1015 /* ensure entire bitmap is zeroed */
1016 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1017 err |= compat_put_bitmap(nmask, bm, nr_bits);
1023 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1024 compat_ulong_t maxnode)
1027 unsigned long __user *nm = NULL;
1028 unsigned long nr_bits, alloc_size;
1029 DECLARE_BITMAP(bm, MAX_NUMNODES);
1031 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1032 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1035 err = compat_get_bitmap(bm, nmask, nr_bits);
1036 nm = compat_alloc_user_space(alloc_size);
1037 err |= copy_to_user(nm, bm, alloc_size);
1043 return sys_set_mempolicy(mode, nm, nr_bits+1);
1046 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1047 compat_ulong_t mode, compat_ulong_t __user *nmask,
1048 compat_ulong_t maxnode, compat_ulong_t flags)
1051 unsigned long __user *nm = NULL;
1052 unsigned long nr_bits, alloc_size;
1055 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1056 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1059 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1060 nm = compat_alloc_user_space(alloc_size);
1061 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1067 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1072 /* Return effective policy for a VMA */
1073 static struct mempolicy * get_vma_policy(struct task_struct *task,
1074 struct vm_area_struct *vma, unsigned long addr)
1076 struct mempolicy *pol = task->mempolicy;
1079 if (vma->vm_ops && vma->vm_ops->get_policy)
1080 pol = vma->vm_ops->get_policy(vma, addr);
1081 else if (vma->vm_policy &&
1082 vma->vm_policy->policy != MPOL_DEFAULT)
1083 pol = vma->vm_policy;
1086 pol = &default_policy;
1090 /* Return a zonelist representing a mempolicy */
1091 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1095 switch (policy->policy) {
1096 case MPOL_PREFERRED:
1097 nd = policy->v.preferred_node;
1099 nd = numa_node_id();
1102 /* Lower zones don't get a policy applied */
1103 /* Careful: current->mems_allowed might have moved */
1104 if (gfp_zone(gfp) >= policy_zone)
1105 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1106 return policy->v.zonelist;
1108 case MPOL_INTERLEAVE: /* should not happen */
1110 nd = numa_node_id();
1116 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1119 /* Do dynamic interleaving for a process */
1120 static unsigned interleave_nodes(struct mempolicy *policy)
1123 struct task_struct *me = current;
1126 next = next_node(nid, policy->v.nodes);
1127 if (next >= MAX_NUMNODES)
1128 next = first_node(policy->v.nodes);
1134 * Depending on the memory policy provide a node from which to allocate the
1137 unsigned slab_node(struct mempolicy *policy)
1139 switch (policy->policy) {
1140 case MPOL_INTERLEAVE:
1141 return interleave_nodes(policy);
1145 * Follow bind policy behavior and start allocation at the
1148 return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
1150 case MPOL_PREFERRED:
1151 if (policy->v.preferred_node >= 0)
1152 return policy->v.preferred_node;
1156 return numa_node_id();
1160 /* Do static interleaving for a VMA with known offset. */
1161 static unsigned offset_il_node(struct mempolicy *pol,
1162 struct vm_area_struct *vma, unsigned long off)
1164 unsigned nnodes = nodes_weight(pol->v.nodes);
1165 unsigned target = (unsigned)off % nnodes;
1171 nid = next_node(nid, pol->v.nodes);
1173 } while (c <= target);
1177 /* Determine a node number for interleave */
1178 static inline unsigned interleave_nid(struct mempolicy *pol,
1179 struct vm_area_struct *vma, unsigned long addr, int shift)
1185 * for small pages, there is no difference between
1186 * shift and PAGE_SHIFT, so the bit-shift is safe.
1187 * for huge pages, since vm_pgoff is in units of small
1188 * pages, we need to shift off the always 0 bits to get
1191 BUG_ON(shift < PAGE_SHIFT);
1192 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1193 off += (addr - vma->vm_start) >> shift;
1194 return offset_il_node(pol, vma, off);
1196 return interleave_nodes(pol);
1199 #ifdef CONFIG_HUGETLBFS
1200 /* Return a zonelist suitable for a huge page allocation. */
1201 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1203 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1205 if (pol->policy == MPOL_INTERLEAVE) {
1208 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1209 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1211 return zonelist_policy(GFP_HIGHUSER, pol);
1215 /* Allocate a page in interleaved policy.
1216 Own path because it needs to do special accounting. */
1217 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1220 struct zonelist *zl;
1223 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1224 page = __alloc_pages(gfp, order, zl);
1225 if (page && page_zone(page) == zl->zones[0])
1226 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1231 * alloc_page_vma - Allocate a page for a VMA.
1234 * %GFP_USER user allocation.
1235 * %GFP_KERNEL kernel allocations,
1236 * %GFP_HIGHMEM highmem/user allocations,
1237 * %GFP_FS allocation should not call back into a file system.
1238 * %GFP_ATOMIC don't sleep.
1240 * @vma: Pointer to VMA or NULL if not available.
1241 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1243 * This function allocates a page from the kernel page pool and applies
1244 * a NUMA policy associated with the VMA or the current process.
1245 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1246 * mm_struct of the VMA to prevent it from going away. Should be used for
1247 * all allocations for pages that will be mapped into
1248 * user space. Returns NULL when no page can be allocated.
1250 * Should be called with the mm_sem of the vma hold.
1253 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1255 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1257 cpuset_update_task_memory_state();
1259 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1262 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1263 return alloc_page_interleave(gfp, 0, nid);
1265 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1269 * alloc_pages_current - Allocate pages.
1272 * %GFP_USER user allocation,
1273 * %GFP_KERNEL kernel allocation,
1274 * %GFP_HIGHMEM highmem allocation,
1275 * %GFP_FS don't call back into a file system.
1276 * %GFP_ATOMIC don't sleep.
1277 * @order: Power of two of allocation size in pages. 0 is a single page.
1279 * Allocate a page from the kernel page pool. When not in
1280 * interrupt context and apply the current process NUMA policy.
1281 * Returns NULL when no page can be allocated.
1283 * Don't call cpuset_update_task_memory_state() unless
1284 * 1) it's ok to take cpuset_sem (can WAIT), and
1285 * 2) allocating for current task (not interrupt).
1287 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1289 struct mempolicy *pol = current->mempolicy;
1291 if ((gfp & __GFP_WAIT) && !in_interrupt())
1292 cpuset_update_task_memory_state();
1293 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1294 pol = &default_policy;
1295 if (pol->policy == MPOL_INTERLEAVE)
1296 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1297 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1299 EXPORT_SYMBOL(alloc_pages_current);
1302 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1303 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1304 * with the mems_allowed returned by cpuset_mems_allowed(). This
1305 * keeps mempolicies cpuset relative after its cpuset moves. See
1306 * further kernel/cpuset.c update_nodemask().
1308 void *cpuset_being_rebound;
1310 /* Slow path of a mempolicy copy */
1311 struct mempolicy *__mpol_copy(struct mempolicy *old)
1313 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1316 return ERR_PTR(-ENOMEM);
1317 if (current_cpuset_is_being_rebound()) {
1318 nodemask_t mems = cpuset_mems_allowed(current);
1319 mpol_rebind_policy(old, &mems);
1322 atomic_set(&new->refcnt, 1);
1323 if (new->policy == MPOL_BIND) {
1324 int sz = ksize(old->v.zonelist);
1325 new->v.zonelist = kmalloc(sz, SLAB_KERNEL);
1326 if (!new->v.zonelist) {
1327 kmem_cache_free(policy_cache, new);
1328 return ERR_PTR(-ENOMEM);
1330 memcpy(new->v.zonelist, old->v.zonelist, sz);
1335 /* Slow path of a mempolicy comparison */
1336 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1340 if (a->policy != b->policy)
1342 switch (a->policy) {
1345 case MPOL_INTERLEAVE:
1346 return nodes_equal(a->v.nodes, b->v.nodes);
1347 case MPOL_PREFERRED:
1348 return a->v.preferred_node == b->v.preferred_node;
1351 for (i = 0; a->v.zonelist->zones[i]; i++)
1352 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1354 return b->v.zonelist->zones[i] == NULL;
1362 /* Slow path of a mpol destructor. */
1363 void __mpol_free(struct mempolicy *p)
1365 if (!atomic_dec_and_test(&p->refcnt))
1367 if (p->policy == MPOL_BIND)
1368 kfree(p->v.zonelist);
1369 p->policy = MPOL_DEFAULT;
1370 kmem_cache_free(policy_cache, p);
1374 * Shared memory backing store policy support.
1376 * Remember policies even when nobody has shared memory mapped.
1377 * The policies are kept in Red-Black tree linked from the inode.
1378 * They are protected by the sp->lock spinlock, which should be held
1379 * for any accesses to the tree.
1382 /* lookup first element intersecting start-end */
1383 /* Caller holds sp->lock */
1384 static struct sp_node *
1385 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1387 struct rb_node *n = sp->root.rb_node;
1390 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1392 if (start >= p->end)
1394 else if (end <= p->start)
1402 struct sp_node *w = NULL;
1403 struct rb_node *prev = rb_prev(n);
1406 w = rb_entry(prev, struct sp_node, nd);
1407 if (w->end <= start)
1411 return rb_entry(n, struct sp_node, nd);
1414 /* Insert a new shared policy into the list. */
1415 /* Caller holds sp->lock */
1416 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1418 struct rb_node **p = &sp->root.rb_node;
1419 struct rb_node *parent = NULL;
1424 nd = rb_entry(parent, struct sp_node, nd);
1425 if (new->start < nd->start)
1427 else if (new->end > nd->end)
1428 p = &(*p)->rb_right;
1432 rb_link_node(&new->nd, parent, p);
1433 rb_insert_color(&new->nd, &sp->root);
1434 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1435 new->policy ? new->policy->policy : 0);
1438 /* Find shared policy intersecting idx */
1440 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1442 struct mempolicy *pol = NULL;
1445 if (!sp->root.rb_node)
1447 spin_lock(&sp->lock);
1448 sn = sp_lookup(sp, idx, idx+1);
1450 mpol_get(sn->policy);
1453 spin_unlock(&sp->lock);
1457 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1459 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1460 rb_erase(&n->nd, &sp->root);
1461 mpol_free(n->policy);
1462 kmem_cache_free(sn_cache, n);
1466 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1468 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1479 /* Replace a policy range. */
1480 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1481 unsigned long end, struct sp_node *new)
1483 struct sp_node *n, *new2 = NULL;
1486 spin_lock(&sp->lock);
1487 n = sp_lookup(sp, start, end);
1488 /* Take care of old policies in the same range. */
1489 while (n && n->start < end) {
1490 struct rb_node *next = rb_next(&n->nd);
1491 if (n->start >= start) {
1497 /* Old policy spanning whole new range. */
1500 spin_unlock(&sp->lock);
1501 new2 = sp_alloc(end, n->end, n->policy);
1507 sp_insert(sp, new2);
1515 n = rb_entry(next, struct sp_node, nd);
1519 spin_unlock(&sp->lock);
1521 mpol_free(new2->policy);
1522 kmem_cache_free(sn_cache, new2);
1527 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1528 nodemask_t *policy_nodes)
1530 info->root = RB_ROOT;
1531 spin_lock_init(&info->lock);
1533 if (policy != MPOL_DEFAULT) {
1534 struct mempolicy *newpol;
1536 /* Falls back to MPOL_DEFAULT on any error */
1537 newpol = mpol_new(policy, policy_nodes);
1538 if (!IS_ERR(newpol)) {
1539 /* Create pseudo-vma that contains just the policy */
1540 struct vm_area_struct pvma;
1542 memset(&pvma, 0, sizeof(struct vm_area_struct));
1543 /* Policy covers entire file */
1544 pvma.vm_end = TASK_SIZE;
1545 mpol_set_shared_policy(info, &pvma, newpol);
1551 int mpol_set_shared_policy(struct shared_policy *info,
1552 struct vm_area_struct *vma, struct mempolicy *npol)
1555 struct sp_node *new = NULL;
1556 unsigned long sz = vma_pages(vma);
1558 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1560 sz, npol? npol->policy : -1,
1561 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1564 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1568 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1570 kmem_cache_free(sn_cache, new);
1574 /* Free a backing policy store on inode delete. */
1575 void mpol_free_shared_policy(struct shared_policy *p)
1578 struct rb_node *next;
1580 if (!p->root.rb_node)
1582 spin_lock(&p->lock);
1583 next = rb_first(&p->root);
1585 n = rb_entry(next, struct sp_node, nd);
1586 next = rb_next(&n->nd);
1587 rb_erase(&n->nd, &p->root);
1588 mpol_free(n->policy);
1589 kmem_cache_free(sn_cache, n);
1591 spin_unlock(&p->lock);
1594 /* assumes fs == KERNEL_DS */
1595 void __init numa_policy_init(void)
1597 policy_cache = kmem_cache_create("numa_policy",
1598 sizeof(struct mempolicy),
1599 0, SLAB_PANIC, NULL, NULL);
1601 sn_cache = kmem_cache_create("shared_policy_node",
1602 sizeof(struct sp_node),
1603 0, SLAB_PANIC, NULL, NULL);
1605 /* Set interleaving policy for system init. This way not all
1606 the data structures allocated at system boot end up in node zero. */
1608 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
1609 printk("numa_policy_init: interleaving failed\n");
1612 /* Reset policy of current process to default */
1613 void numa_default_policy(void)
1615 do_set_mempolicy(MPOL_DEFAULT, NULL);
1618 /* Migrate a policy to a different set of nodes */
1619 void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1621 nodemask_t *mpolmask;
1626 mpolmask = &pol->cpuset_mems_allowed;
1627 if (nodes_equal(*mpolmask, *newmask))
1630 switch (pol->policy) {
1633 case MPOL_INTERLEAVE:
1634 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1636 *mpolmask = *newmask;
1637 current->il_next = node_remap(current->il_next,
1638 *mpolmask, *newmask);
1640 case MPOL_PREFERRED:
1641 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1642 *mpolmask, *newmask);
1643 *mpolmask = *newmask;
1648 struct zonelist *zonelist;
1651 for (z = pol->v.zonelist->zones; *z; z++)
1652 node_set((*z)->zone_pgdat->node_id, nodes);
1653 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1656 zonelist = bind_zonelist(&nodes);
1658 /* If no mem, then zonelist is NULL and we keep old zonelist.
1659 * If that old zonelist has no remaining mems_allowed nodes,
1660 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1664 /* Good - got mem - substitute new zonelist */
1665 kfree(pol->v.zonelist);
1666 pol->v.zonelist = zonelist;
1668 *mpolmask = *newmask;
1678 * Wrapper for mpol_rebind_policy() that just requires task
1679 * pointer, and updates task mempolicy.
1682 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1684 mpol_rebind_policy(tsk->mempolicy, new);
1688 * Rebind each vma in mm to new nodemask.
1690 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1693 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1695 struct vm_area_struct *vma;
1697 down_write(&mm->mmap_sem);
1698 for (vma = mm->mmap; vma; vma = vma->vm_next)
1699 mpol_rebind_policy(vma->vm_policy, new);
1700 up_write(&mm->mmap_sem);
1704 * Display pages allocated per node and memory policy via /proc.
1707 static const char *policy_types[] = { "default", "prefer", "bind",
1711 * Convert a mempolicy into a string.
1712 * Returns the number of characters in buffer (if positive)
1713 * or an error (negative)
1715 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1720 int mode = pol ? pol->policy : MPOL_DEFAULT;
1727 case MPOL_PREFERRED:
1729 node_set(pol->v.preferred_node, nodes);
1733 get_zonemask(pol, &nodes);
1736 case MPOL_INTERLEAVE:
1737 nodes = pol->v.nodes;
1745 l = strlen(policy_types[mode]);
1746 if (buffer + maxlen < p + l + 1)
1749 strcpy(p, policy_types[mode]);
1752 if (!nodes_empty(nodes)) {
1753 if (buffer + maxlen < p + 2)
1756 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1762 unsigned long pages;
1764 unsigned long active;
1765 unsigned long writeback;
1766 unsigned long mapcount_max;
1767 unsigned long dirty;
1768 unsigned long swapcache;
1769 unsigned long node[MAX_NUMNODES];
1772 static void gather_stats(struct page *page, void *private, int pte_dirty)
1774 struct numa_maps *md = private;
1775 int count = page_mapcount(page);
1778 if (pte_dirty || PageDirty(page))
1781 if (PageSwapCache(page))
1784 if (PageActive(page))
1787 if (PageWriteback(page))
1793 if (count > md->mapcount_max)
1794 md->mapcount_max = count;
1796 md->node[page_to_nid(page)]++;
1799 #ifdef CONFIG_HUGETLB_PAGE
1800 static void check_huge_range(struct vm_area_struct *vma,
1801 unsigned long start, unsigned long end,
1802 struct numa_maps *md)
1807 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1808 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1818 page = pte_page(pte);
1822 gather_stats(page, md, pte_dirty(*ptep));
1826 static inline void check_huge_range(struct vm_area_struct *vma,
1827 unsigned long start, unsigned long end,
1828 struct numa_maps *md)
1833 int show_numa_map(struct seq_file *m, void *v)
1835 struct proc_maps_private *priv = m->private;
1836 struct vm_area_struct *vma = v;
1837 struct numa_maps *md;
1838 struct file *file = vma->vm_file;
1839 struct mm_struct *mm = vma->vm_mm;
1846 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1850 mpol_to_str(buffer, sizeof(buffer),
1851 get_vma_policy(priv->task, vma, vma->vm_start));
1853 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1856 seq_printf(m, " file=");
1857 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= ");
1858 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1859 seq_printf(m, " heap");
1860 } else if (vma->vm_start <= mm->start_stack &&
1861 vma->vm_end >= mm->start_stack) {
1862 seq_printf(m, " stack");
1865 if (is_vm_hugetlb_page(vma)) {
1866 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1867 seq_printf(m, " huge");
1869 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1870 &node_online_map, MPOL_MF_STATS, md);
1877 seq_printf(m," anon=%lu",md->anon);
1880 seq_printf(m," dirty=%lu",md->dirty);
1882 if (md->pages != md->anon && md->pages != md->dirty)
1883 seq_printf(m, " mapped=%lu", md->pages);
1885 if (md->mapcount_max > 1)
1886 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1889 seq_printf(m," swapcache=%lu", md->swapcache);
1891 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1892 seq_printf(m," active=%lu", md->active);
1895 seq_printf(m," writeback=%lu", md->writeback);
1897 for_each_online_node(n)
1899 seq_printf(m, " N%d=%lu", n, md->node[n]);
1904 if (m->count < m->size)
1905 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;