4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 DEFINE_RWLOCK(vmlist_lock);
24 struct vm_struct *vmlist;
26 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
30 pte = pte_offset_kernel(pmd, addr);
32 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
33 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
34 } while (pte++, addr += PAGE_SIZE, addr != end);
37 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
43 pmd = pmd_offset(pud, addr);
45 next = pmd_addr_end(addr, end);
46 if (pmd_none_or_clear_bad(pmd))
48 vunmap_pte_range(pmd, addr, next);
49 } while (pmd++, addr = next, addr != end);
52 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
58 pud = pud_offset(pgd, addr);
60 next = pud_addr_end(addr, end);
61 if (pud_none_or_clear_bad(pud))
63 vunmap_pmd_range(pud, addr, next);
64 } while (pud++, addr = next, addr != end);
67 void unmap_vm_area(struct vm_struct *area)
71 unsigned long addr = (unsigned long) area->addr;
72 unsigned long end = addr + area->size;
75 pgd = pgd_offset_k(addr);
76 flush_cache_vunmap(addr, end);
78 next = pgd_addr_end(addr, end);
79 if (pgd_none_or_clear_bad(pgd))
81 vunmap_pud_range(pgd, addr, next);
82 } while (pgd++, addr = next, addr != end);
83 flush_tlb_kernel_range((unsigned long) area->addr, end);
86 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
87 unsigned long end, pgprot_t prot, struct page ***pages)
91 pte = pte_alloc_kernel(&init_mm, pmd, addr);
95 struct page *page = **pages;
96 WARN_ON(!pte_none(*pte));
99 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101 } while (pte++, addr += PAGE_SIZE, addr != end);
105 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
106 unsigned long end, pgprot_t prot, struct page ***pages)
111 pmd = pmd_alloc(&init_mm, pud, addr);
115 next = pmd_addr_end(addr, end);
116 if (vmap_pte_range(pmd, addr, next, prot, pages))
118 } while (pmd++, addr = next, addr != end);
122 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
123 unsigned long end, pgprot_t prot, struct page ***pages)
128 pud = pud_alloc(&init_mm, pgd, addr);
132 next = pud_addr_end(addr, end);
133 if (vmap_pmd_range(pud, addr, next, prot, pages))
135 } while (pud++, addr = next, addr != end);
139 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
143 unsigned long addr = (unsigned long) area->addr;
144 unsigned long end = addr + area->size - PAGE_SIZE;
148 pgd = pgd_offset_k(addr);
149 spin_lock(&init_mm.page_table_lock);
151 next = pgd_addr_end(addr, end);
152 err = vmap_pud_range(pgd, addr, next, prot, pages);
155 } while (pgd++, addr = next, addr != end);
156 spin_unlock(&init_mm.page_table_lock);
157 flush_cache_vmap((unsigned long) area->addr, end);
161 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
162 unsigned long start, unsigned long end)
164 struct vm_struct **p, *tmp, *area;
165 unsigned long align = 1;
168 if (flags & VM_IOREMAP) {
171 if (bit > IOREMAP_MAX_ORDER)
172 bit = IOREMAP_MAX_ORDER;
173 else if (bit < PAGE_SHIFT)
178 addr = ALIGN(start, align);
179 size = PAGE_ALIGN(size);
181 area = kmalloc(sizeof(*area), GFP_KERNEL);
185 if (unlikely(!size)) {
191 * We always allocate a guard page.
195 write_lock(&vmlist_lock);
196 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
197 if ((unsigned long)tmp->addr < addr) {
198 if((unsigned long)tmp->addr + tmp->size >= addr)
199 addr = ALIGN(tmp->size +
200 (unsigned long)tmp->addr, align);
203 if ((size + addr) < addr)
205 if (size + addr <= (unsigned long)tmp->addr)
207 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
208 if (addr > end - size)
217 area->addr = (void *)addr;
222 write_unlock(&vmlist_lock);
227 write_unlock(&vmlist_lock);
229 if (printk_ratelimit())
230 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
235 * get_vm_area - reserve a contingous kernel virtual area
237 * @size: size of the area
238 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
240 * Search an area of @size in the kernel virtual mapping area,
241 * and reserved it for out purposes. Returns the area descriptor
242 * on success or %NULL on failure.
244 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249 /* Caller must hold vmlist_lock */
250 struct vm_struct *__remove_vm_area(void *addr)
252 struct vm_struct **p, *tmp;
254 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
255 if (tmp->addr == addr)
265 * Remove the guard page.
267 tmp->size -= PAGE_SIZE;
272 * remove_vm_area - find and remove a contingous kernel virtual area
274 * @addr: base address
276 * Search for the kernel VM area starting at @addr, and remove it.
277 * This function returns the found VM area, but using it is NOT safe
278 * on SMP machines, except for its size or flags.
280 struct vm_struct *remove_vm_area(void *addr)
283 write_lock(&vmlist_lock);
284 v = __remove_vm_area(addr);
285 write_unlock(&vmlist_lock);
289 void __vunmap(void *addr, int deallocate_pages)
291 struct vm_struct *area;
296 if ((PAGE_SIZE-1) & (unsigned long)addr) {
297 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
302 area = remove_vm_area(addr);
303 if (unlikely(!area)) {
304 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
310 if (deallocate_pages) {
313 for (i = 0; i < area->nr_pages; i++) {
314 if (unlikely(!area->pages[i]))
316 __free_page(area->pages[i]);
319 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
330 * vfree - release memory allocated by vmalloc()
332 * @addr: memory base address
334 * Free the virtually contiguous memory area starting at @addr, as
335 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
336 * NULL, no operation is performed.
338 * Must not be called in interrupt context.
340 void vfree(void *addr)
342 BUG_ON(in_interrupt());
346 EXPORT_SYMBOL(vfree);
349 * vunmap - release virtual mapping obtained by vmap()
351 * @addr: memory base address
353 * Free the virtually contiguous memory area starting at @addr,
354 * which was created from the page array passed to vmap().
356 * Must not be called in interrupt context.
358 void vunmap(void *addr)
360 BUG_ON(in_interrupt());
364 EXPORT_SYMBOL(vunmap);
367 * vmap - map an array of pages into virtually contiguous space
369 * @pages: array of page pointers
370 * @count: number of pages to map
371 * @flags: vm_area->flags
372 * @prot: page protection for the mapping
374 * Maps @count pages from @pages into contiguous kernel virtual
377 void *vmap(struct page **pages, unsigned int count,
378 unsigned long flags, pgprot_t prot)
380 struct vm_struct *area;
382 if (count > num_physpages)
385 area = get_vm_area((count << PAGE_SHIFT), flags);
388 if (map_vm_area(area, prot, &pages)) {
398 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
401 unsigned int nr_pages, array_size, i;
403 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
404 array_size = (nr_pages * sizeof(struct page *));
406 area->nr_pages = nr_pages;
407 /* Please note that the recursion is strictly bounded. */
408 if (array_size > PAGE_SIZE)
409 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
411 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
414 remove_vm_area(area->addr);
418 memset(area->pages, 0, array_size);
420 for (i = 0; i < area->nr_pages; i++) {
421 area->pages[i] = alloc_page(gfp_mask);
422 if (unlikely(!area->pages[i])) {
423 /* Successfully allocated i pages, free them in __vunmap() */
429 if (map_vm_area(area, prot, &pages))
439 * __vmalloc - allocate virtually contiguous memory
441 * @size: allocation size
442 * @gfp_mask: flags for the page level allocator
443 * @prot: protection mask for the allocated pages
445 * Allocate enough pages to cover @size from the page level
446 * allocator with @gfp_mask flags. Map them into contiguous
447 * kernel virtual space, using a pagetable protection of @prot.
449 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
451 struct vm_struct *area;
453 size = PAGE_ALIGN(size);
454 if (!size || (size >> PAGE_SHIFT) > num_physpages)
457 area = get_vm_area(size, VM_ALLOC);
461 return __vmalloc_area(area, gfp_mask, prot);
464 EXPORT_SYMBOL(__vmalloc);
467 * vmalloc - allocate virtually contiguous memory
469 * @size: allocation size
471 * Allocate enough pages to cover @size from the page level
472 * allocator and map them into contiguous kernel virtual space.
474 * For tight cotrol over page level allocator and protection flags
475 * use __vmalloc() instead.
477 void *vmalloc(unsigned long size)
479 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
482 EXPORT_SYMBOL(vmalloc);
484 #ifndef PAGE_KERNEL_EXEC
485 # define PAGE_KERNEL_EXEC PAGE_KERNEL
489 * vmalloc_exec - allocate virtually contiguous, executable memory
491 * @size: allocation size
493 * Kernel-internal function to allocate enough pages to cover @size
494 * the page level allocator and map them into contiguous and
495 * executable kernel virtual space.
497 * For tight cotrol over page level allocator and protection flags
498 * use __vmalloc() instead.
501 void *vmalloc_exec(unsigned long size)
503 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
507 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
509 * @size: allocation size
511 * Allocate enough 32bit PA addressable pages to cover @size from the
512 * page level allocator and map them into contiguous kernel virtual space.
514 void *vmalloc_32(unsigned long size)
516 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
519 EXPORT_SYMBOL(vmalloc_32);
521 long vread(char *buf, char *addr, unsigned long count)
523 struct vm_struct *tmp;
524 char *vaddr, *buf_start = buf;
527 /* Don't allow overflow */
528 if ((unsigned long) addr + count < count)
529 count = -(unsigned long) addr;
531 read_lock(&vmlist_lock);
532 for (tmp = vmlist; tmp; tmp = tmp->next) {
533 vaddr = (char *) tmp->addr;
534 if (addr >= vaddr + tmp->size - PAGE_SIZE)
536 while (addr < vaddr) {
544 n = vaddr + tmp->size - PAGE_SIZE - addr;
555 read_unlock(&vmlist_lock);
556 return buf - buf_start;
559 long vwrite(char *buf, char *addr, unsigned long count)
561 struct vm_struct *tmp;
562 char *vaddr, *buf_start = buf;
565 /* Don't allow overflow */
566 if ((unsigned long) addr + count < count)
567 count = -(unsigned long) addr;
569 read_lock(&vmlist_lock);
570 for (tmp = vmlist; tmp; tmp = tmp->next) {
571 vaddr = (char *) tmp->addr;
572 if (addr >= vaddr + tmp->size - PAGE_SIZE)
574 while (addr < vaddr) {
581 n = vaddr + tmp->size - PAGE_SIZE - addr;
592 read_unlock(&vmlist_lock);
593 return buf - buf_start;