4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
18 #include <linux/vmalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
24 DEFINE_RWLOCK(vmlist_lock);
25 struct vm_struct *vmlist;
27 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
30 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
34 pte = pte_offset_kernel(pmd, addr);
36 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 } while (pte++, addr += PAGE_SIZE, addr != end);
41 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
47 pmd = pmd_offset(pud, addr);
49 next = pmd_addr_end(addr, end);
50 if (pmd_none_or_clear_bad(pmd))
52 vunmap_pte_range(pmd, addr, next);
53 } while (pmd++, addr = next, addr != end);
56 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
62 pud = pud_offset(pgd, addr);
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud))
67 vunmap_pmd_range(pud, addr, next);
68 } while (pud++, addr = next, addr != end);
71 void unmap_vm_area(struct vm_struct *area)
75 unsigned long addr = (unsigned long) area->addr;
76 unsigned long end = addr + area->size;
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
87 flush_tlb_kernel_range((unsigned long) area->addr, end);
90 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
91 unsigned long end, pgprot_t prot, struct page ***pages)
95 pte = pte_alloc_kernel(pmd, addr);
99 struct page *page = **pages;
100 WARN_ON(!pte_none(*pte));
103 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
105 } while (pte++, addr += PAGE_SIZE, addr != end);
109 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
110 unsigned long end, pgprot_t prot, struct page ***pages)
115 pmd = pmd_alloc(&init_mm, pud, addr);
119 next = pmd_addr_end(addr, end);
120 if (vmap_pte_range(pmd, addr, next, prot, pages))
122 } while (pmd++, addr = next, addr != end);
126 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
127 unsigned long end, pgprot_t prot, struct page ***pages)
132 pud = pud_alloc(&init_mm, pgd, addr);
136 next = pud_addr_end(addr, end);
137 if (vmap_pmd_range(pud, addr, next, prot, pages))
139 } while (pud++, addr = next, addr != end);
143 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
147 unsigned long addr = (unsigned long) area->addr;
148 unsigned long end = addr + area->size - PAGE_SIZE;
152 pgd = pgd_offset_k(addr);
154 next = pgd_addr_end(addr, end);
155 err = vmap_pud_range(pgd, addr, next, prot, pages);
158 } while (pgd++, addr = next, addr != end);
159 flush_cache_vmap((unsigned long) area->addr, end);
163 static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end,
165 int node, gfp_t gfp_mask)
167 struct vm_struct **p, *tmp, *area;
168 unsigned long align = 1;
171 BUG_ON(in_interrupt());
172 if (flags & VM_IOREMAP) {
175 if (bit > IOREMAP_MAX_ORDER)
176 bit = IOREMAP_MAX_ORDER;
177 else if (bit < PAGE_SHIFT)
182 addr = ALIGN(start, align);
183 size = PAGE_ALIGN(size);
187 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
192 * We always allocate a guard page.
196 write_lock(&vmlist_lock);
197 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
198 if ((unsigned long)tmp->addr < addr) {
199 if((unsigned long)tmp->addr + tmp->size >= addr)
200 addr = ALIGN(tmp->size +
201 (unsigned long)tmp->addr, align);
204 if ((size + addr) < addr)
206 if (size + addr <= (unsigned long)tmp->addr)
208 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
209 if (addr > end - size)
218 area->addr = (void *)addr;
223 write_unlock(&vmlist_lock);
228 write_unlock(&vmlist_lock);
230 if (printk_ratelimit())
231 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
235 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
236 unsigned long start, unsigned long end)
238 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
242 * get_vm_area - reserve a contingous kernel virtual area
243 * @size: size of the area
244 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
246 * Search an area of @size in the kernel virtual mapping area,
247 * and reserved it for out purposes. Returns the area descriptor
248 * on success or %NULL on failure.
250 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
252 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
255 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
256 int node, gfp_t gfp_mask)
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
262 /* Caller must hold vmlist_lock */
263 static struct vm_struct *__find_vm_area(void *addr)
265 struct vm_struct *tmp;
267 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
268 if (tmp->addr == addr)
275 /* Caller must hold vmlist_lock */
276 static struct vm_struct *__remove_vm_area(void *addr)
278 struct vm_struct **p, *tmp;
280 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
281 if (tmp->addr == addr)
291 * Remove the guard page.
293 tmp->size -= PAGE_SIZE;
298 * remove_vm_area - find and remove a contingous kernel virtual area
299 * @addr: base address
301 * Search for the kernel VM area starting at @addr, and remove it.
302 * This function returns the found VM area, but using it is NOT safe
303 * on SMP machines, except for its size or flags.
305 struct vm_struct *remove_vm_area(void *addr)
308 write_lock(&vmlist_lock);
309 v = __remove_vm_area(addr);
310 write_unlock(&vmlist_lock);
314 void __vunmap(void *addr, int deallocate_pages)
316 struct vm_struct *area;
321 if ((PAGE_SIZE-1) & (unsigned long)addr) {
322 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
327 area = remove_vm_area(addr);
328 if (unlikely(!area)) {
329 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
335 debug_check_no_locks_freed(addr, area->size);
337 if (deallocate_pages) {
340 for (i = 0; i < area->nr_pages; i++) {
341 BUG_ON(!area->pages[i]);
342 __free_page(area->pages[i]);
345 if (area->flags & VM_VPAGES)
356 * vfree - release memory allocated by vmalloc()
357 * @addr: memory base address
359 * Free the virtually contiguous memory area starting at @addr, as
360 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
361 * NULL, no operation is performed.
363 * Must not be called in interrupt context.
365 void vfree(void *addr)
367 BUG_ON(in_interrupt());
370 EXPORT_SYMBOL(vfree);
373 * vunmap - release virtual mapping obtained by vmap()
374 * @addr: memory base address
376 * Free the virtually contiguous memory area starting at @addr,
377 * which was created from the page array passed to vmap().
379 * Must not be called in interrupt context.
381 void vunmap(void *addr)
383 BUG_ON(in_interrupt());
386 EXPORT_SYMBOL(vunmap);
389 * vmap - map an array of pages into virtually contiguous space
390 * @pages: array of page pointers
391 * @count: number of pages to map
392 * @flags: vm_area->flags
393 * @prot: page protection for the mapping
395 * Maps @count pages from @pages into contiguous kernel virtual
398 void *vmap(struct page **pages, unsigned int count,
399 unsigned long flags, pgprot_t prot)
401 struct vm_struct *area;
403 if (count > num_physpages)
406 area = get_vm_area((count << PAGE_SHIFT), flags);
409 if (map_vm_area(area, prot, &pages)) {
418 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
419 pgprot_t prot, int node)
422 unsigned int nr_pages, array_size, i;
424 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
425 array_size = (nr_pages * sizeof(struct page *));
427 area->nr_pages = nr_pages;
428 /* Please note that the recursion is strictly bounded. */
429 if (array_size > PAGE_SIZE) {
430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
431 area->flags |= VM_VPAGES;
433 pages = kmalloc_node(array_size,
434 (gfp_mask & GFP_LEVEL_MASK),
439 remove_vm_area(area->addr);
443 memset(area->pages, 0, array_size);
445 for (i = 0; i < area->nr_pages; i++) {
447 area->pages[i] = alloc_page(gfp_mask);
449 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
450 if (unlikely(!area->pages[i])) {
451 /* Successfully allocated i pages, free them in __vunmap() */
457 if (map_vm_area(area, prot, &pages))
466 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
468 return __vmalloc_area_node(area, gfp_mask, prot, -1);
472 * __vmalloc_node - allocate virtually contiguous memory
473 * @size: allocation size
474 * @gfp_mask: flags for the page level allocator
475 * @prot: protection mask for the allocated pages
476 * @node: node to use for allocation or -1
478 * Allocate enough pages to cover @size from the page level
479 * allocator with @gfp_mask flags. Map them into contiguous
480 * kernel virtual space, using a pagetable protection of @prot.
482 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
485 struct vm_struct *area;
487 size = PAGE_ALIGN(size);
488 if (!size || (size >> PAGE_SHIFT) > num_physpages)
491 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
495 return __vmalloc_area_node(area, gfp_mask, prot, node);
498 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
500 return __vmalloc_node(size, gfp_mask, prot, -1);
502 EXPORT_SYMBOL(__vmalloc);
505 * vmalloc - allocate virtually contiguous memory
506 * @size: allocation size
507 * Allocate enough pages to cover @size from the page level
508 * allocator and map them into contiguous kernel virtual space.
510 * For tight control over page level allocator and protection flags
511 * use __vmalloc() instead.
513 void *vmalloc(unsigned long size)
515 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
517 EXPORT_SYMBOL(vmalloc);
520 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
521 * @size: allocation size
523 * The resulting memory area is zeroed so it can be mapped to userspace
524 * without leaking data.
526 void *vmalloc_user(unsigned long size)
528 struct vm_struct *area;
531 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
533 write_lock(&vmlist_lock);
534 area = __find_vm_area(ret);
535 area->flags |= VM_USERMAP;
536 write_unlock(&vmlist_lock);
540 EXPORT_SYMBOL(vmalloc_user);
543 * vmalloc_node - allocate memory on a specific node
544 * @size: allocation size
547 * Allocate enough pages to cover @size from the page level
548 * allocator and map them into contiguous kernel virtual space.
550 * For tight control over page level allocator and protection flags
551 * use __vmalloc() instead.
553 void *vmalloc_node(unsigned long size, int node)
555 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
557 EXPORT_SYMBOL(vmalloc_node);
559 #ifndef PAGE_KERNEL_EXEC
560 # define PAGE_KERNEL_EXEC PAGE_KERNEL
564 * vmalloc_exec - allocate virtually contiguous, executable memory
565 * @size: allocation size
567 * Kernel-internal function to allocate enough pages to cover @size
568 * the page level allocator and map them into contiguous and
569 * executable kernel virtual space.
571 * For tight control over page level allocator and protection flags
572 * use __vmalloc() instead.
575 void *vmalloc_exec(unsigned long size)
577 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
580 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
581 #define GFP_VMALLOC32 GFP_DMA32
582 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
583 #define GFP_VMALLOC32 GFP_DMA
585 #define GFP_VMALLOC32 GFP_KERNEL
589 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
590 * @size: allocation size
592 * Allocate enough 32bit PA addressable pages to cover @size from the
593 * page level allocator and map them into contiguous kernel virtual space.
595 void *vmalloc_32(unsigned long size)
597 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
599 EXPORT_SYMBOL(vmalloc_32);
602 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
603 * @size: allocation size
605 * The resulting memory area is 32bit addressable and zeroed so it can be
606 * mapped to userspace without leaking data.
608 void *vmalloc_32_user(unsigned long size)
610 struct vm_struct *area;
613 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
615 write_lock(&vmlist_lock);
616 area = __find_vm_area(ret);
617 area->flags |= VM_USERMAP;
618 write_unlock(&vmlist_lock);
622 EXPORT_SYMBOL(vmalloc_32_user);
624 long vread(char *buf, char *addr, unsigned long count)
626 struct vm_struct *tmp;
627 char *vaddr, *buf_start = buf;
630 /* Don't allow overflow */
631 if ((unsigned long) addr + count < count)
632 count = -(unsigned long) addr;
634 read_lock(&vmlist_lock);
635 for (tmp = vmlist; tmp; tmp = tmp->next) {
636 vaddr = (char *) tmp->addr;
637 if (addr >= vaddr + tmp->size - PAGE_SIZE)
639 while (addr < vaddr) {
647 n = vaddr + tmp->size - PAGE_SIZE - addr;
658 read_unlock(&vmlist_lock);
659 return buf - buf_start;
662 long vwrite(char *buf, char *addr, unsigned long count)
664 struct vm_struct *tmp;
665 char *vaddr, *buf_start = buf;
668 /* Don't allow overflow */
669 if ((unsigned long) addr + count < count)
670 count = -(unsigned long) addr;
672 read_lock(&vmlist_lock);
673 for (tmp = vmlist; tmp; tmp = tmp->next) {
674 vaddr = (char *) tmp->addr;
675 if (addr >= vaddr + tmp->size - PAGE_SIZE)
677 while (addr < vaddr) {
684 n = vaddr + tmp->size - PAGE_SIZE - addr;
695 read_unlock(&vmlist_lock);
696 return buf - buf_start;
700 * remap_vmalloc_range - map vmalloc pages to userspace
701 * @vma: vma to cover (map full range of vma)
702 * @addr: vmalloc memory
703 * @pgoff: number of pages into addr before first page to map
704 * @returns: 0 for success, -Exxx on failure
706 * This function checks that addr is a valid vmalloc'ed area, and
707 * that it is big enough to cover the vma. Will return failure if
708 * that criteria isn't met.
710 * Similar to remap_pfn_range() (see mm/memory.c)
712 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
715 struct vm_struct *area;
716 unsigned long uaddr = vma->vm_start;
717 unsigned long usize = vma->vm_end - vma->vm_start;
720 if ((PAGE_SIZE-1) & (unsigned long)addr)
723 read_lock(&vmlist_lock);
724 area = __find_vm_area(addr);
726 goto out_einval_locked;
728 if (!(area->flags & VM_USERMAP))
729 goto out_einval_locked;
731 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
732 goto out_einval_locked;
733 read_unlock(&vmlist_lock);
735 addr += pgoff << PAGE_SHIFT;
737 struct page *page = vmalloc_to_page(addr);
738 ret = vm_insert_page(vma, uaddr, page);
747 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
748 vma->vm_flags |= VM_RESERVED;
753 read_unlock(&vmlist_lock);
756 EXPORT_SYMBOL(remap_vmalloc_range);
759 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
762 void __attribute__((weak)) vmalloc_sync_all(void)