2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
9 #include <asm/system.h>
11 #define COLOUR_ALIGN(addr,pgoff) \
12 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
13 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
16 * We need to ensure that shared mappings are correctly aligned to
17 * avoid aliasing issues with VIPT caches. We need to ensure that
18 * a specific page of an object is always mapped at a multiple of
21 * We unconditionally provide this function for all cases, however
22 * in the VIVT case, we optimise out the alignment rules.
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
28 struct mm_struct *mm = current->mm;
29 struct vm_area_struct *vma;
30 unsigned long start_addr;
32 unsigned int cache_type;
33 int do_align = 0, aliasing = 0;
36 * We only need to do colour alignment if either the I or D
37 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register.
40 cache_type = read_cpuid(CPUID_CACHETYPE);
41 if (cache_type != read_cpuid(CPUID_ID)) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
44 do_align = filp || flags & MAP_SHARED;
52 * We should enforce the MAP_FIXED case. However, currently
53 * the generic kernel code doesn't allow us to handle this.
55 if (flags & MAP_FIXED) {
56 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
66 addr = COLOUR_ALIGN(addr, pgoff);
68 addr = PAGE_ALIGN(addr);
70 vma = find_vma(mm, addr);
71 if (TASK_SIZE - len >= addr &&
72 (!vma || addr + len <= vma->vm_start))
75 if (len > mm->cached_hole_size) {
76 start_addr = addr = mm->free_area_cache;
78 start_addr = addr = TASK_UNMAPPED_BASE;
79 mm->cached_hole_size = 0;
84 addr = COLOUR_ALIGN(addr, pgoff);
86 addr = PAGE_ALIGN(addr);
88 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
89 /* At this point: (!vma || addr < vma->vm_end). */
90 if (TASK_SIZE - len < addr) {
92 * Start a new search - just in case we missed
95 if (start_addr != TASK_UNMAPPED_BASE) {
96 start_addr = addr = TASK_UNMAPPED_BASE;
97 mm->cached_hole_size = 0;
102 if (!vma || addr + len <= vma->vm_start) {
104 * Remember the place where we stopped the search:
106 mm->free_area_cache = addr + len;
109 if (addr + mm->cached_hole_size < vma->vm_start)
110 mm->cached_hole_size = vma->vm_start - addr;
113 addr = COLOUR_ALIGN(addr, pgoff);