Merge branches 'release' and 'autoload' into release
[linux-2.6] / arch / arm / mm / mmap.c
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <asm/system.h>
10
11 #define COLOUR_ALIGN(addr,pgoff)                \
12         ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
13          (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
14
15 /*
16  * We need to ensure that shared mappings are correctly aligned to
17  * avoid aliasing issues with VIPT caches.  We need to ensure that
18  * a specific page of an object is always mapped at a multiple of
19  * SHMLBA bytes.
20  *
21  * We unconditionally provide this function for all cases, however
22  * in the VIVT case, we optimise out the alignment rules.
23  */
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26                 unsigned long len, unsigned long pgoff, unsigned long flags)
27 {
28         struct mm_struct *mm = current->mm;
29         struct vm_area_struct *vma;
30         unsigned long start_addr;
31 #ifdef CONFIG_CPU_V6
32         unsigned int cache_type;
33         int do_align = 0, aliasing = 0;
34
35         /*
36          * We only need to do colour alignment if either the I or D
37          * caches alias.  This is indicated by bits 9 and 21 of the
38          * cache type register.
39          */
40         cache_type = read_cpuid(CPUID_CACHETYPE);
41         if (cache_type != read_cpuid(CPUID_ID)) {
42                 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43                 if (aliasing)
44                         do_align = filp || flags & MAP_SHARED;
45         }
46 #else
47 #define do_align 0
48 #define aliasing 0
49 #endif
50
51         /*
52          * We enforce the MAP_FIXED case.
53          */
54         if (flags & MAP_FIXED) {
55                 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
56                         return -EINVAL;
57                 return addr;
58         }
59
60         if (len > TASK_SIZE)
61                 return -ENOMEM;
62
63         if (addr) {
64                 if (do_align)
65                         addr = COLOUR_ALIGN(addr, pgoff);
66                 else
67                         addr = PAGE_ALIGN(addr);
68
69                 vma = find_vma(mm, addr);
70                 if (TASK_SIZE - len >= addr &&
71                     (!vma || addr + len <= vma->vm_start))
72                         return addr;
73         }
74         if (len > mm->cached_hole_size) {
75                 start_addr = addr = mm->free_area_cache;
76         } else {
77                 start_addr = addr = TASK_UNMAPPED_BASE;
78                 mm->cached_hole_size = 0;
79         }
80
81 full_search:
82         if (do_align)
83                 addr = COLOUR_ALIGN(addr, pgoff);
84         else
85                 addr = PAGE_ALIGN(addr);
86
87         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
88                 /* At this point:  (!vma || addr < vma->vm_end). */
89                 if (TASK_SIZE - len < addr) {
90                         /*
91                          * Start a new search - just in case we missed
92                          * some holes.
93                          */
94                         if (start_addr != TASK_UNMAPPED_BASE) {
95                                 start_addr = addr = TASK_UNMAPPED_BASE;
96                                 mm->cached_hole_size = 0;
97                                 goto full_search;
98                         }
99                         return -ENOMEM;
100                 }
101                 if (!vma || addr + len <= vma->vm_start) {
102                         /*
103                          * Remember the place where we stopped the search:
104                          */
105                         mm->free_area_cache = addr + len;
106                         return addr;
107                 }
108                 if (addr + mm->cached_hole_size < vma->vm_start)
109                         mm->cached_hole_size = vma->vm_start - addr;
110                 addr = vma->vm_end;
111                 if (do_align)
112                         addr = COLOUR_ALIGN(addr, pgoff);
113         }
114 }
115
116
117 /*
118  * You really shouldn't be using read() or write() on /dev/mem.  This
119  * might go away in the future.
120  */
121 int valid_phys_addr_range(unsigned long addr, size_t size)
122 {
123         if (addr + size > __pa(high_memory))
124                 return 0;
125
126         return 1;
127 }
128
129 /*
130  * We don't use supersection mappings for mmap() on /dev/mem, which
131  * means that we can't map the memory area above the 4G barrier into
132  * userspace.
133  */
134 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
135 {
136         return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
137 }