2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
90 #include <asm/tlbflush.h>
91 #include <asm/uaccess.h>
94 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
95 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
96 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
98 static kmem_cache_t *policy_cache;
99 static kmem_cache_t *sn_cache;
101 #define PDprintk(fmt...)
103 /* Highest zone. An specific allocation for a zone below that is not
105 int policy_zone = ZONE_DMA;
107 struct mempolicy default_policy = {
108 .refcnt = ATOMIC_INIT(1), /* never free it */
109 .policy = MPOL_DEFAULT,
112 /* Do sanity checking on a policy */
113 static int mpol_check_policy(int mode, nodemask_t *nodes)
115 int empty = nodes_empty(*nodes);
123 case MPOL_INTERLEAVE:
124 /* Preferred will only use the first bit, but allow
130 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
132 /* Generate a custom zonelist for the BIND policy. */
133 static struct zonelist *bind_zonelist(nodemask_t *nodes)
138 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
139 zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
143 for_each_node_mask(nd, *nodes)
144 zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone];
145 zl->zones[num] = NULL;
149 /* Create a new policy */
150 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
152 struct mempolicy *policy;
154 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
155 if (mode == MPOL_DEFAULT)
157 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
159 return ERR_PTR(-ENOMEM);
160 atomic_set(&policy->refcnt, 1);
162 case MPOL_INTERLEAVE:
163 policy->v.nodes = *nodes;
164 if (nodes_weight(*nodes) == 0) {
165 kmem_cache_free(policy_cache, policy);
166 return ERR_PTR(-EINVAL);
170 policy->v.preferred_node = first_node(*nodes);
171 if (policy->v.preferred_node >= MAX_NUMNODES)
172 policy->v.preferred_node = -1;
175 policy->v.zonelist = bind_zonelist(nodes);
176 if (policy->v.zonelist == NULL) {
177 kmem_cache_free(policy_cache, policy);
178 return ERR_PTR(-ENOMEM);
182 policy->policy = mode;
186 static void gather_stats(struct page *, void *);
187 static void migrate_page_add(struct vm_area_struct *vma,
188 struct page *page, struct list_head *pagelist, unsigned long flags);
190 /* Scan through pages checking if pages follow certain conditions. */
191 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
192 unsigned long addr, unsigned long end,
193 const nodemask_t *nodes, unsigned long flags,
200 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
205 if (!pte_present(*pte))
207 page = vm_normal_page(vma, addr, *pte);
210 nid = page_to_nid(page);
211 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
214 if (flags & MPOL_MF_STATS)
215 gather_stats(page, private);
216 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
218 migrate_page_add(vma, page, private, flags);
223 } while (pte++, addr += PAGE_SIZE, addr != end);
224 pte_unmap_unlock(orig_pte, ptl);
228 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
229 unsigned long addr, unsigned long end,
230 const nodemask_t *nodes, unsigned long flags,
236 pmd = pmd_offset(pud, addr);
238 next = pmd_addr_end(addr, end);
239 if (pmd_none_or_clear_bad(pmd))
241 if (check_pte_range(vma, pmd, addr, next, nodes,
244 } while (pmd++, addr = next, addr != end);
248 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
249 unsigned long addr, unsigned long end,
250 const nodemask_t *nodes, unsigned long flags,
256 pud = pud_offset(pgd, addr);
258 next = pud_addr_end(addr, end);
259 if (pud_none_or_clear_bad(pud))
261 if (check_pmd_range(vma, pud, addr, next, nodes,
264 } while (pud++, addr = next, addr != end);
268 static inline int check_pgd_range(struct vm_area_struct *vma,
269 unsigned long addr, unsigned long end,
270 const nodemask_t *nodes, unsigned long flags,
276 pgd = pgd_offset(vma->vm_mm, addr);
278 next = pgd_addr_end(addr, end);
279 if (pgd_none_or_clear_bad(pgd))
281 if (check_pud_range(vma, pgd, addr, next, nodes,
284 } while (pgd++, addr = next, addr != end);
288 /* Check if a vma is migratable */
289 static inline int vma_migratable(struct vm_area_struct *vma)
291 if (vma->vm_flags & (
292 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP))
298 * Check if all pages in a range are on a set of nodes.
299 * If pagelist != NULL then isolate pages from the LRU and
300 * put them on the pagelist.
302 static struct vm_area_struct *
303 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
304 const nodemask_t *nodes, unsigned long flags, void *private)
307 struct vm_area_struct *first, *vma, *prev;
309 first = find_vma(mm, start);
311 return ERR_PTR(-EFAULT);
313 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
314 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
315 if (!vma->vm_next && vma->vm_end < end)
316 return ERR_PTR(-EFAULT);
317 if (prev && prev->vm_end < vma->vm_start)
318 return ERR_PTR(-EFAULT);
320 if (!is_vm_hugetlb_page(vma) &&
321 ((flags & MPOL_MF_STRICT) ||
322 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
323 vma_migratable(vma)))) {
324 unsigned long endvma = vma->vm_end;
328 if (vma->vm_start > start)
329 start = vma->vm_start;
330 err = check_pgd_range(vma, start, endvma, nodes,
333 first = ERR_PTR(err);
342 /* Apply policy to a single VMA */
343 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
346 struct mempolicy *old = vma->vm_policy;
348 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
349 vma->vm_start, vma->vm_end, vma->vm_pgoff,
350 vma->vm_ops, vma->vm_file,
351 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
353 if (vma->vm_ops && vma->vm_ops->set_policy)
354 err = vma->vm_ops->set_policy(vma, new);
357 vma->vm_policy = new;
363 /* Step 2: apply policy to a range and do splits. */
364 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
365 unsigned long end, struct mempolicy *new)
367 struct vm_area_struct *next;
371 for (; vma && vma->vm_start < end; vma = next) {
373 if (vma->vm_start < start)
374 err = split_vma(vma->vm_mm, vma, start, 1);
375 if (!err && vma->vm_end > end)
376 err = split_vma(vma->vm_mm, vma, end, 0);
378 err = policy_vma(vma, new);
385 static int contextualize_policy(int mode, nodemask_t *nodes)
390 cpuset_update_task_memory_state();
391 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
393 return mpol_check_policy(mode, nodes);
396 /* Set the process memory policy */
397 long do_set_mempolicy(int mode, nodemask_t *nodes)
399 struct mempolicy *new;
401 if (contextualize_policy(mode, nodes))
403 new = mpol_new(mode, nodes);
406 mpol_free(current->mempolicy);
407 current->mempolicy = new;
408 if (new && new->policy == MPOL_INTERLEAVE)
409 current->il_next = first_node(new->v.nodes);
413 /* Fill a zone bitmap for a policy */
414 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
421 for (i = 0; p->v.zonelist->zones[i]; i++)
422 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
427 case MPOL_INTERLEAVE:
431 /* or use current node instead of online map? */
432 if (p->v.preferred_node < 0)
433 *nodes = node_online_map;
435 node_set(p->v.preferred_node, *nodes);
442 static int lookup_node(struct mm_struct *mm, unsigned long addr)
447 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
449 err = page_to_nid(p);
455 /* Retrieve NUMA policy */
456 long do_get_mempolicy(int *policy, nodemask_t *nmask,
457 unsigned long addr, unsigned long flags)
460 struct mm_struct *mm = current->mm;
461 struct vm_area_struct *vma = NULL;
462 struct mempolicy *pol = current->mempolicy;
464 cpuset_update_task_memory_state();
465 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
467 if (flags & MPOL_F_ADDR) {
468 down_read(&mm->mmap_sem);
469 vma = find_vma_intersection(mm, addr, addr+1);
471 up_read(&mm->mmap_sem);
474 if (vma->vm_ops && vma->vm_ops->get_policy)
475 pol = vma->vm_ops->get_policy(vma, addr);
477 pol = vma->vm_policy;
482 pol = &default_policy;
484 if (flags & MPOL_F_NODE) {
485 if (flags & MPOL_F_ADDR) {
486 err = lookup_node(mm, addr);
490 } else if (pol == current->mempolicy &&
491 pol->policy == MPOL_INTERLEAVE) {
492 *policy = current->il_next;
498 *policy = pol->policy;
501 up_read(¤t->mm->mmap_sem);
507 get_zonemask(pol, nmask);
511 up_read(¤t->mm->mmap_sem);
519 /* Check if we are the only process mapping the page in question */
520 static inline int single_mm_mapping(struct mm_struct *mm,
521 struct address_space *mapping)
523 struct vm_area_struct *vma;
524 struct prio_tree_iter iter;
527 spin_lock(&mapping->i_mmap_lock);
528 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
529 if (mm != vma->vm_mm) {
533 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
534 if (mm != vma->vm_mm) {
539 spin_unlock(&mapping->i_mmap_lock);
544 * Add a page to be migrated to the pagelist
546 static void migrate_page_add(struct vm_area_struct *vma,
547 struct page *page, struct list_head *pagelist, unsigned long flags)
550 * Avoid migrating a page that is shared by others and not writable.
552 if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) ||
553 mapping_writably_mapped(page->mapping) ||
554 single_mm_mapping(vma->vm_mm, page->mapping)) {
555 int rc = isolate_lru_page(page);
558 list_add(&page->lru, pagelist);
560 * If the isolate attempt was not successful then we just
561 * encountered an unswappable page. Something must be wrong.
567 static int swap_pages(struct list_head *pagelist)
573 n = migrate_pages(pagelist, NULL, &moved, &failed);
574 putback_lru_pages(&failed);
575 putback_lru_pages(&moved);
581 * For now migrate_pages simply swaps out the pages from nodes that are in
582 * the source set but not in the target set. In the future, we would
583 * want a function that moves pages between the two nodesets in such
584 * a way as to preserve the physical layout as much as possible.
586 * Returns the number of page that could not be moved.
588 int do_migrate_pages(struct mm_struct *mm,
589 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
595 nodes_andnot(nodes, *from_nodes, *to_nodes);
597 down_read(&mm->mmap_sem);
598 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
599 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
601 if (!list_empty(&pagelist)) {
602 count = swap_pages(&pagelist);
603 putback_lru_pages(&pagelist);
606 up_read(&mm->mmap_sem);
610 long do_mbind(unsigned long start, unsigned long len,
611 unsigned long mode, nodemask_t *nmask, unsigned long flags)
613 struct vm_area_struct *vma;
614 struct mm_struct *mm = current->mm;
615 struct mempolicy *new;
620 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
621 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
624 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE))
627 if (start & ~PAGE_MASK)
630 if (mode == MPOL_DEFAULT)
631 flags &= ~MPOL_MF_STRICT;
633 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
641 if (mpol_check_policy(mode, nmask))
644 new = mpol_new(mode, nmask);
649 * If we are using the default policy then operation
650 * on discontinuous address spaces is okay after all
653 flags |= MPOL_MF_DISCONTIG_OK;
655 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
656 mode,nodes_addr(nodes)[0]);
658 down_write(&mm->mmap_sem);
659 vma = check_range(mm, start, end, nmask,
660 flags | MPOL_MF_INVERT, &pagelist);
666 err = mbind_range(vma, start, end, new);
667 if (!list_empty(&pagelist))
668 nr_failed = swap_pages(&pagelist);
670 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
673 if (!list_empty(&pagelist))
674 putback_lru_pages(&pagelist);
676 up_write(&mm->mmap_sem);
682 * User space interface with variable sized bitmaps for nodelists.
685 /* Copy a node mask from user space. */
686 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
687 unsigned long maxnode)
690 unsigned long nlongs;
691 unsigned long endmask;
695 if (maxnode == 0 || !nmask)
698 nlongs = BITS_TO_LONGS(maxnode);
699 if ((maxnode % BITS_PER_LONG) == 0)
702 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
704 /* When the user specified more nodes than supported just check
705 if the non supported part is all zero. */
706 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
707 if (nlongs > PAGE_SIZE/sizeof(long))
709 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
711 if (get_user(t, nmask + k))
713 if (k == nlongs - 1) {
719 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
723 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
725 nodes_addr(*nodes)[nlongs-1] &= endmask;
729 /* Copy a kernel node mask to user space */
730 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
733 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
734 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
737 if (copy > PAGE_SIZE)
739 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
743 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
746 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
748 unsigned long __user *nmask, unsigned long maxnode,
754 err = get_nodes(&nodes, nmask, maxnode);
757 return do_mbind(start, len, mode, &nodes, flags);
760 /* Set the process memory policy */
761 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
762 unsigned long maxnode)
767 if (mode < 0 || mode > MPOL_MAX)
769 err = get_nodes(&nodes, nmask, maxnode);
772 return do_set_mempolicy(mode, &nodes);
775 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
776 const unsigned long __user *old_nodes,
777 const unsigned long __user *new_nodes)
779 struct mm_struct *mm;
780 struct task_struct *task;
783 nodemask_t task_nodes;
786 err = get_nodes(&old, old_nodes, maxnode);
790 err = get_nodes(&new, new_nodes, maxnode);
794 /* Find the mm_struct */
795 read_lock(&tasklist_lock);
796 task = pid ? find_task_by_pid(pid) : current;
798 read_unlock(&tasklist_lock);
801 mm = get_task_mm(task);
802 read_unlock(&tasklist_lock);
808 * Check if this process has the right to modify the specified
809 * process. The right exists if the process has administrative
810 * capabilities, superuser priviledges or the same
811 * userid as the target process.
813 if ((current->euid != task->suid) && (current->euid != task->uid) &&
814 (current->uid != task->suid) && (current->uid != task->uid) &&
815 !capable(CAP_SYS_ADMIN)) {
820 task_nodes = cpuset_mems_allowed(task);
821 /* Is the user allowed to access the target nodes? */
822 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) {
827 err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE);
834 /* Retrieve NUMA policy */
835 asmlinkage long sys_get_mempolicy(int __user *policy,
836 unsigned long __user *nmask,
837 unsigned long maxnode,
838 unsigned long addr, unsigned long flags)
843 if (nmask != NULL && maxnode < MAX_NUMNODES)
846 err = do_get_mempolicy(&pval, &nodes, addr, flags);
851 if (policy && put_user(pval, policy))
855 err = copy_nodes_to_user(nmask, maxnode, &nodes);
862 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
863 compat_ulong_t __user *nmask,
864 compat_ulong_t maxnode,
865 compat_ulong_t addr, compat_ulong_t flags)
868 unsigned long __user *nm = NULL;
869 unsigned long nr_bits, alloc_size;
870 DECLARE_BITMAP(bm, MAX_NUMNODES);
872 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
873 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
876 nm = compat_alloc_user_space(alloc_size);
878 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
881 err = copy_from_user(bm, nm, alloc_size);
882 /* ensure entire bitmap is zeroed */
883 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
884 err |= compat_put_bitmap(nmask, bm, nr_bits);
890 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
891 compat_ulong_t maxnode)
894 unsigned long __user *nm = NULL;
895 unsigned long nr_bits, alloc_size;
896 DECLARE_BITMAP(bm, MAX_NUMNODES);
898 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
899 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
902 err = compat_get_bitmap(bm, nmask, nr_bits);
903 nm = compat_alloc_user_space(alloc_size);
904 err |= copy_to_user(nm, bm, alloc_size);
910 return sys_set_mempolicy(mode, nm, nr_bits+1);
913 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
914 compat_ulong_t mode, compat_ulong_t __user *nmask,
915 compat_ulong_t maxnode, compat_ulong_t flags)
918 unsigned long __user *nm = NULL;
919 unsigned long nr_bits, alloc_size;
922 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
923 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
926 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
927 nm = compat_alloc_user_space(alloc_size);
928 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
934 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
939 /* Return effective policy for a VMA */
940 static struct mempolicy * get_vma_policy(struct task_struct *task,
941 struct vm_area_struct *vma, unsigned long addr)
943 struct mempolicy *pol = task->mempolicy;
946 if (vma->vm_ops && vma->vm_ops->get_policy)
947 pol = vma->vm_ops->get_policy(vma, addr);
948 else if (vma->vm_policy &&
949 vma->vm_policy->policy != MPOL_DEFAULT)
950 pol = vma->vm_policy;
953 pol = &default_policy;
957 /* Return a zonelist representing a mempolicy */
958 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
962 switch (policy->policy) {
964 nd = policy->v.preferred_node;
969 /* Lower zones don't get a policy applied */
970 /* Careful: current->mems_allowed might have moved */
971 if (gfp_zone(gfp) >= policy_zone)
972 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
973 return policy->v.zonelist;
975 case MPOL_INTERLEAVE: /* should not happen */
983 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
986 /* Do dynamic interleaving for a process */
987 static unsigned interleave_nodes(struct mempolicy *policy)
990 struct task_struct *me = current;
993 next = next_node(nid, policy->v.nodes);
994 if (next >= MAX_NUMNODES)
995 next = first_node(policy->v.nodes);
1000 /* Do static interleaving for a VMA with known offset. */
1001 static unsigned offset_il_node(struct mempolicy *pol,
1002 struct vm_area_struct *vma, unsigned long off)
1004 unsigned nnodes = nodes_weight(pol->v.nodes);
1005 unsigned target = (unsigned)off % nnodes;
1011 nid = next_node(nid, pol->v.nodes);
1013 } while (c <= target);
1017 /* Determine a node number for interleave */
1018 static inline unsigned interleave_nid(struct mempolicy *pol,
1019 struct vm_area_struct *vma, unsigned long addr, int shift)
1024 off = vma->vm_pgoff;
1025 off += (addr - vma->vm_start) >> shift;
1026 return offset_il_node(pol, vma, off);
1028 return interleave_nodes(pol);
1031 /* Return a zonelist suitable for a huge page allocation. */
1032 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1034 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1036 if (pol->policy == MPOL_INTERLEAVE) {
1039 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1040 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1042 return zonelist_policy(GFP_HIGHUSER, pol);
1045 /* Allocate a page in interleaved policy.
1046 Own path because it needs to do special accounting. */
1047 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1050 struct zonelist *zl;
1053 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1054 page = __alloc_pages(gfp, order, zl);
1055 if (page && page_zone(page) == zl->zones[0]) {
1056 zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
1063 * alloc_page_vma - Allocate a page for a VMA.
1066 * %GFP_USER user allocation.
1067 * %GFP_KERNEL kernel allocations,
1068 * %GFP_HIGHMEM highmem/user allocations,
1069 * %GFP_FS allocation should not call back into a file system.
1070 * %GFP_ATOMIC don't sleep.
1072 * @vma: Pointer to VMA or NULL if not available.
1073 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1075 * This function allocates a page from the kernel page pool and applies
1076 * a NUMA policy associated with the VMA or the current process.
1077 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1078 * mm_struct of the VMA to prevent it from going away. Should be used for
1079 * all allocations for pages that will be mapped into
1080 * user space. Returns NULL when no page can be allocated.
1082 * Should be called with the mm_sem of the vma hold.
1085 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1087 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1089 cpuset_update_task_memory_state();
1091 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1094 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1095 return alloc_page_interleave(gfp, 0, nid);
1097 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1101 * alloc_pages_current - Allocate pages.
1104 * %GFP_USER user allocation,
1105 * %GFP_KERNEL kernel allocation,
1106 * %GFP_HIGHMEM highmem allocation,
1107 * %GFP_FS don't call back into a file system.
1108 * %GFP_ATOMIC don't sleep.
1109 * @order: Power of two of allocation size in pages. 0 is a single page.
1111 * Allocate a page from the kernel page pool. When not in
1112 * interrupt context and apply the current process NUMA policy.
1113 * Returns NULL when no page can be allocated.
1115 * Don't call cpuset_update_task_memory_state() unless
1116 * 1) it's ok to take cpuset_sem (can WAIT), and
1117 * 2) allocating for current task (not interrupt).
1119 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1121 struct mempolicy *pol = current->mempolicy;
1123 if ((gfp & __GFP_WAIT) && !in_interrupt())
1124 cpuset_update_task_memory_state();
1125 if (!pol || in_interrupt())
1126 pol = &default_policy;
1127 if (pol->policy == MPOL_INTERLEAVE)
1128 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1129 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1131 EXPORT_SYMBOL(alloc_pages_current);
1133 /* Slow path of a mempolicy copy */
1134 struct mempolicy *__mpol_copy(struct mempolicy *old)
1136 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1139 return ERR_PTR(-ENOMEM);
1141 atomic_set(&new->refcnt, 1);
1142 if (new->policy == MPOL_BIND) {
1143 int sz = ksize(old->v.zonelist);
1144 new->v.zonelist = kmalloc(sz, SLAB_KERNEL);
1145 if (!new->v.zonelist) {
1146 kmem_cache_free(policy_cache, new);
1147 return ERR_PTR(-ENOMEM);
1149 memcpy(new->v.zonelist, old->v.zonelist, sz);
1154 /* Slow path of a mempolicy comparison */
1155 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1159 if (a->policy != b->policy)
1161 switch (a->policy) {
1164 case MPOL_INTERLEAVE:
1165 return nodes_equal(a->v.nodes, b->v.nodes);
1166 case MPOL_PREFERRED:
1167 return a->v.preferred_node == b->v.preferred_node;
1170 for (i = 0; a->v.zonelist->zones[i]; i++)
1171 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1173 return b->v.zonelist->zones[i] == NULL;
1181 /* Slow path of a mpol destructor. */
1182 void __mpol_free(struct mempolicy *p)
1184 if (!atomic_dec_and_test(&p->refcnt))
1186 if (p->policy == MPOL_BIND)
1187 kfree(p->v.zonelist);
1188 p->policy = MPOL_DEFAULT;
1189 kmem_cache_free(policy_cache, p);
1193 * Shared memory backing store policy support.
1195 * Remember policies even when nobody has shared memory mapped.
1196 * The policies are kept in Red-Black tree linked from the inode.
1197 * They are protected by the sp->lock spinlock, which should be held
1198 * for any accesses to the tree.
1201 /* lookup first element intersecting start-end */
1202 /* Caller holds sp->lock */
1203 static struct sp_node *
1204 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1206 struct rb_node *n = sp->root.rb_node;
1209 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1211 if (start >= p->end)
1213 else if (end <= p->start)
1221 struct sp_node *w = NULL;
1222 struct rb_node *prev = rb_prev(n);
1225 w = rb_entry(prev, struct sp_node, nd);
1226 if (w->end <= start)
1230 return rb_entry(n, struct sp_node, nd);
1233 /* Insert a new shared policy into the list. */
1234 /* Caller holds sp->lock */
1235 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1237 struct rb_node **p = &sp->root.rb_node;
1238 struct rb_node *parent = NULL;
1243 nd = rb_entry(parent, struct sp_node, nd);
1244 if (new->start < nd->start)
1246 else if (new->end > nd->end)
1247 p = &(*p)->rb_right;
1251 rb_link_node(&new->nd, parent, p);
1252 rb_insert_color(&new->nd, &sp->root);
1253 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1254 new->policy ? new->policy->policy : 0);
1257 /* Find shared policy intersecting idx */
1259 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1261 struct mempolicy *pol = NULL;
1264 if (!sp->root.rb_node)
1266 spin_lock(&sp->lock);
1267 sn = sp_lookup(sp, idx, idx+1);
1269 mpol_get(sn->policy);
1272 spin_unlock(&sp->lock);
1276 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1278 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1279 rb_erase(&n->nd, &sp->root);
1280 mpol_free(n->policy);
1281 kmem_cache_free(sn_cache, n);
1285 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1287 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1298 /* Replace a policy range. */
1299 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1300 unsigned long end, struct sp_node *new)
1302 struct sp_node *n, *new2 = NULL;
1305 spin_lock(&sp->lock);
1306 n = sp_lookup(sp, start, end);
1307 /* Take care of old policies in the same range. */
1308 while (n && n->start < end) {
1309 struct rb_node *next = rb_next(&n->nd);
1310 if (n->start >= start) {
1316 /* Old policy spanning whole new range. */
1319 spin_unlock(&sp->lock);
1320 new2 = sp_alloc(end, n->end, n->policy);
1326 sp_insert(sp, new2);
1334 n = rb_entry(next, struct sp_node, nd);
1338 spin_unlock(&sp->lock);
1340 mpol_free(new2->policy);
1341 kmem_cache_free(sn_cache, new2);
1346 int mpol_set_shared_policy(struct shared_policy *info,
1347 struct vm_area_struct *vma, struct mempolicy *npol)
1350 struct sp_node *new = NULL;
1351 unsigned long sz = vma_pages(vma);
1353 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1355 sz, npol? npol->policy : -1,
1356 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1359 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1363 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1365 kmem_cache_free(sn_cache, new);
1369 /* Free a backing policy store on inode delete. */
1370 void mpol_free_shared_policy(struct shared_policy *p)
1373 struct rb_node *next;
1375 if (!p->root.rb_node)
1377 spin_lock(&p->lock);
1378 next = rb_first(&p->root);
1380 n = rb_entry(next, struct sp_node, nd);
1381 next = rb_next(&n->nd);
1382 rb_erase(&n->nd, &p->root);
1383 mpol_free(n->policy);
1384 kmem_cache_free(sn_cache, n);
1386 spin_unlock(&p->lock);
1389 /* assumes fs == KERNEL_DS */
1390 void __init numa_policy_init(void)
1392 policy_cache = kmem_cache_create("numa_policy",
1393 sizeof(struct mempolicy),
1394 0, SLAB_PANIC, NULL, NULL);
1396 sn_cache = kmem_cache_create("shared_policy_node",
1397 sizeof(struct sp_node),
1398 0, SLAB_PANIC, NULL, NULL);
1400 /* Set interleaving policy for system init. This way not all
1401 the data structures allocated at system boot end up in node zero. */
1403 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
1404 printk("numa_policy_init: interleaving failed\n");
1407 /* Reset policy of current process to default */
1408 void numa_default_policy(void)
1410 do_set_mempolicy(MPOL_DEFAULT, NULL);
1413 /* Migrate a policy to a different set of nodes */
1414 static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
1415 const nodemask_t *new)
1422 switch (pol->policy) {
1425 case MPOL_INTERLEAVE:
1426 nodes_remap(tmp, pol->v.nodes, *old, *new);
1428 current->il_next = node_remap(current->il_next, *old, *new);
1430 case MPOL_PREFERRED:
1431 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1437 struct zonelist *zonelist;
1440 for (z = pol->v.zonelist->zones; *z; z++)
1441 node_set((*z)->zone_pgdat->node_id, nodes);
1442 nodes_remap(tmp, nodes, *old, *new);
1445 zonelist = bind_zonelist(&nodes);
1447 /* If no mem, then zonelist is NULL and we keep old zonelist.
1448 * If that old zonelist has no remaining mems_allowed nodes,
1449 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1453 /* Good - got mem - substitute new zonelist */
1454 kfree(pol->v.zonelist);
1455 pol->v.zonelist = zonelist;
1466 * Someone moved this task to different nodes. Fixup mempolicies.
1468 * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well,
1469 * once we have a cpuset mechanism to mark which cpuset subtree is migrating.
1471 void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
1473 rebind_policy(current->mempolicy, old, new);
1477 * Display pages allocated per node and memory policy via /proc.
1480 static const char *policy_types[] = { "default", "prefer", "bind",
1484 * Convert a mempolicy into a string.
1485 * Returns the number of characters in buffer (if positive)
1486 * or an error (negative)
1488 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1493 int mode = pol ? pol->policy : MPOL_DEFAULT;
1500 case MPOL_PREFERRED:
1502 node_set(pol->v.preferred_node, nodes);
1506 get_zonemask(pol, &nodes);
1509 case MPOL_INTERLEAVE:
1510 nodes = pol->v.nodes;
1518 l = strlen(policy_types[mode]);
1519 if (buffer + maxlen < p + l + 1)
1522 strcpy(p, policy_types[mode]);
1525 if (!nodes_empty(nodes)) {
1526 if (buffer + maxlen < p + 2)
1529 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1535 unsigned long pages;
1537 unsigned long mapped;
1538 unsigned long mapcount_max;
1539 unsigned long node[MAX_NUMNODES];
1542 static void gather_stats(struct page *page, void *private)
1544 struct numa_maps *md = private;
1545 int count = page_mapcount(page);
1550 if (count > md->mapcount_max)
1551 md->mapcount_max = count;
1558 md->node[page_to_nid(page)]++;
1562 int show_numa_map(struct seq_file *m, void *v)
1564 struct task_struct *task = m->private;
1565 struct vm_area_struct *vma = v;
1566 struct numa_maps *md;
1573 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1577 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1578 &node_online_map, MPOL_MF_STATS, md);
1581 mpol_to_str(buffer, sizeof(buffer),
1582 get_vma_policy(task, vma, vma->vm_start));
1584 seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
1585 vma->vm_start, buffer, md->pages,
1586 md->mapped, md->mapcount_max);
1589 seq_printf(m," anon=%lu",md->anon);
1591 for_each_online_node(n)
1593 seq_printf(m, " N%d=%lu", n, md->node[n]);
1599 if (m->count < m->size)
1600 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;