Merge branches 'x86/asm', 'x86/cleanups', 'x86/cpudetect', 'x86/debug', 'x86/doc...
[linux-2.6] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 static inline int phys_addr_valid(unsigned long addr)
28 {
29         return addr < (1UL << boot_cpu_data.x86_phys_bits);
30 }
31
32 unsigned long __phys_addr(unsigned long x)
33 {
34         if (x >= __START_KERNEL_map) {
35                 x -= __START_KERNEL_map;
36                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37                 x += phys_base;
38         } else {
39                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40                 x -= PAGE_OFFSET;
41                 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42                                         !phys_addr_valid(x));
43         }
44         return x;
45 }
46 EXPORT_SYMBOL(__phys_addr);
47
48 bool __virt_addr_valid(unsigned long x)
49 {
50         if (x >= __START_KERNEL_map) {
51                 x -= __START_KERNEL_map;
52                 if (x >= KERNEL_IMAGE_SIZE)
53                         return false;
54                 x += phys_base;
55         } else {
56                 if (x < PAGE_OFFSET)
57                         return false;
58                 x -= PAGE_OFFSET;
59                 if (system_state == SYSTEM_BOOTING ?
60                                 x > MAXMEM : !phys_addr_valid(x)) {
61                         return false;
62                 }
63         }
64
65         return pfn_valid(x >> PAGE_SHIFT);
66 }
67 EXPORT_SYMBOL(__virt_addr_valid);
68
69 #else
70
71 static inline int phys_addr_valid(unsigned long addr)
72 {
73         return 1;
74 }
75
76 #ifdef CONFIG_DEBUG_VIRTUAL
77 unsigned long __phys_addr(unsigned long x)
78 {
79         /* VMALLOC_* aren't constants; not available at the boot time */
80         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81         VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82                 is_vmalloc_addr((void *) x));
83         return x - PAGE_OFFSET;
84 }
85 EXPORT_SYMBOL(__phys_addr);
86 #endif
87
88 bool __virt_addr_valid(unsigned long x)
89 {
90         if (x < PAGE_OFFSET)
91                 return false;
92         if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93                 return false;
94         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95 }
96 EXPORT_SYMBOL(__virt_addr_valid);
97
98 #endif
99
100 int page_is_ram(unsigned long pagenr)
101 {
102         resource_size_t addr, end;
103         int i;
104
105         /*
106          * A special case is the first 4Kb of memory;
107          * This is a BIOS owned area, not kernel ram, but generally
108          * not listed as such in the E820 table.
109          */
110         if (pagenr == 0)
111                 return 0;
112
113         /*
114          * Second special case: Some BIOSen report the PC BIOS
115          * area (640->1Mb) as ram even though it is not.
116          */
117         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118                     pagenr < (BIOS_END >> PAGE_SHIFT))
119                 return 0;
120
121         for (i = 0; i < e820.nr_map; i++) {
122                 /*
123                  * Not usable memory:
124                  */
125                 if (e820.map[i].type != E820_RAM)
126                         continue;
127                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
129
130
131                 if ((pagenr >= addr) && (pagenr < end))
132                         return 1;
133         }
134         return 0;
135 }
136
137 int pagerange_is_ram(unsigned long start, unsigned long end)
138 {
139         int ram_page = 0, not_rampage = 0;
140         unsigned long page_nr;
141
142         for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143              ++page_nr) {
144                 if (page_is_ram(page_nr))
145                         ram_page = 1;
146                 else
147                         not_rampage = 1;
148
149                 if (ram_page == not_rampage)
150                         return -1;
151         }
152
153         return ram_page;
154 }
155
156 /*
157  * Fix up the linear direct mapping of the kernel to avoid cache attribute
158  * conflicts.
159  */
160 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161                                unsigned long prot_val)
162 {
163         unsigned long nrpages = size >> PAGE_SHIFT;
164         int err;
165
166         switch (prot_val) {
167         case _PAGE_CACHE_UC:
168         default:
169                 err = _set_memory_uc(vaddr, nrpages);
170                 break;
171         case _PAGE_CACHE_WC:
172                 err = _set_memory_wc(vaddr, nrpages);
173                 break;
174         case _PAGE_CACHE_WB:
175                 err = _set_memory_wb(vaddr, nrpages);
176                 break;
177         }
178
179         return err;
180 }
181
182 /*
183  * Remap an arbitrary physical address space into the kernel virtual
184  * address space. Needed when the kernel wants to access high addresses
185  * directly.
186  *
187  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188  * have to convert them into an offset in a page-aligned mapping, but the
189  * caller shouldn't need to know that small detail.
190  */
191 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192                 unsigned long size, unsigned long prot_val, void *caller)
193 {
194         unsigned long pfn, offset, vaddr;
195         resource_size_t last_addr;
196         const resource_size_t unaligned_phys_addr = phys_addr;
197         const unsigned long unaligned_size = size;
198         struct vm_struct *area;
199         unsigned long new_prot_val;
200         pgprot_t prot;
201         int retval;
202         void __iomem *ret_addr;
203
204         /* Don't allow wraparound or zero size */
205         last_addr = phys_addr + size - 1;
206         if (!size || last_addr < phys_addr)
207                 return NULL;
208
209         if (!phys_addr_valid(phys_addr)) {
210                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
211                        (unsigned long long)phys_addr);
212                 WARN_ON_ONCE(1);
213                 return NULL;
214         }
215
216         /*
217          * Don't remap the low PCI/ISA area, it's always mapped..
218          */
219         if (is_ISA_range(phys_addr, last_addr))
220                 return (__force void __iomem *)phys_to_virt(phys_addr);
221
222         /*
223          * Check if the request spans more than any BAR in the iomem resource
224          * tree.
225          */
226         WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
227                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
228
229         /*
230          * Don't allow anybody to remap normal RAM that we're using..
231          */
232         for (pfn = phys_addr >> PAGE_SHIFT;
233                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
234                                 pfn++) {
235
236                 int is_ram = page_is_ram(pfn);
237
238                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
239                         return NULL;
240                 WARN_ON_ONCE(is_ram);
241         }
242
243         /*
244          * Mappings have to be page-aligned
245          */
246         offset = phys_addr & ~PAGE_MASK;
247         phys_addr &= PAGE_MASK;
248         size = PAGE_ALIGN(last_addr+1) - phys_addr;
249
250         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
251                                                 prot_val, &new_prot_val);
252         if (retval) {
253                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
254                 return NULL;
255         }
256
257         if (prot_val != new_prot_val) {
258                 /*
259                  * Do not fallback to certain memory types with certain
260                  * requested type:
261                  * - request is uc-, return cannot be write-back
262                  * - request is uc-, return cannot be write-combine
263                  * - request is write-combine, return cannot be write-back
264                  */
265                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
266                      (new_prot_val == _PAGE_CACHE_WB ||
267                       new_prot_val == _PAGE_CACHE_WC)) ||
268                     (prot_val == _PAGE_CACHE_WC &&
269                      new_prot_val == _PAGE_CACHE_WB)) {
270                         pr_debug(
271                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
272                                 (unsigned long long)phys_addr,
273                                 (unsigned long long)(phys_addr + size),
274                                 prot_val, new_prot_val);
275                         free_memtype(phys_addr, phys_addr + size);
276                         return NULL;
277                 }
278                 prot_val = new_prot_val;
279         }
280
281         switch (prot_val) {
282         case _PAGE_CACHE_UC:
283         default:
284                 prot = PAGE_KERNEL_IO_NOCACHE;
285                 break;
286         case _PAGE_CACHE_UC_MINUS:
287                 prot = PAGE_KERNEL_IO_UC_MINUS;
288                 break;
289         case _PAGE_CACHE_WC:
290                 prot = PAGE_KERNEL_IO_WC;
291                 break;
292         case _PAGE_CACHE_WB:
293                 prot = PAGE_KERNEL_IO;
294                 break;
295         }
296
297         /*
298          * Ok, go for it..
299          */
300         area = get_vm_area_caller(size, VM_IOREMAP, caller);
301         if (!area)
302                 return NULL;
303         area->phys_addr = phys_addr;
304         vaddr = (unsigned long) area->addr;
305         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
306                 free_memtype(phys_addr, phys_addr + size);
307                 free_vm_area(area);
308                 return NULL;
309         }
310
311         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
312                 free_memtype(phys_addr, phys_addr + size);
313                 vunmap(area->addr);
314                 return NULL;
315         }
316
317         ret_addr = (void __iomem *) (vaddr + offset);
318         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
319
320         return ret_addr;
321 }
322
323 /**
324  * ioremap_nocache     -   map bus memory into CPU space
325  * @offset:    bus address of the memory
326  * @size:      size of the resource to map
327  *
328  * ioremap_nocache performs a platform specific sequence of operations to
329  * make bus memory CPU accessible via the readb/readw/readl/writeb/
330  * writew/writel functions and the other mmio helpers. The returned
331  * address is not guaranteed to be usable directly as a virtual
332  * address.
333  *
334  * This version of ioremap ensures that the memory is marked uncachable
335  * on the CPU as well as honouring existing caching rules from things like
336  * the PCI bus. Note that there are other caches and buffers on many
337  * busses. In particular driver authors should read up on PCI writes
338  *
339  * It's useful if some control registers are in such an area and
340  * write combining or read caching is not desirable:
341  *
342  * Must be freed with iounmap.
343  */
344 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
345 {
346         /*
347          * Ideally, this should be:
348          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
349          *
350          * Till we fix all X drivers to use ioremap_wc(), we will use
351          * UC MINUS.
352          */
353         unsigned long val = _PAGE_CACHE_UC_MINUS;
354
355         return __ioremap_caller(phys_addr, size, val,
356                                 __builtin_return_address(0));
357 }
358 EXPORT_SYMBOL(ioremap_nocache);
359
360 /**
361  * ioremap_wc   -       map memory into CPU space write combined
362  * @offset:     bus address of the memory
363  * @size:       size of the resource to map
364  *
365  * This version of ioremap ensures that the memory is marked write combining.
366  * Write combining allows faster writes to some hardware devices.
367  *
368  * Must be freed with iounmap.
369  */
370 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
371 {
372         if (pat_enabled)
373                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
374                                         __builtin_return_address(0));
375         else
376                 return ioremap_nocache(phys_addr, size);
377 }
378 EXPORT_SYMBOL(ioremap_wc);
379
380 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
381 {
382         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
383                                 __builtin_return_address(0));
384 }
385 EXPORT_SYMBOL(ioremap_cache);
386
387 static void __iomem *ioremap_default(resource_size_t phys_addr,
388                                         unsigned long size)
389 {
390         unsigned long flags;
391         void __iomem *ret;
392         int err;
393
394         /*
395          * - WB for WB-able memory and no other conflicting mappings
396          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
397          * - Inherit from confliting mappings otherwise
398          */
399         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
400         if (err < 0)
401                 return NULL;
402
403         ret = __ioremap_caller(phys_addr, size, flags,
404                                __builtin_return_address(0));
405
406         free_memtype(phys_addr, phys_addr + size);
407         return ret;
408 }
409
410 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
411                                 unsigned long prot_val)
412 {
413         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
414                                 __builtin_return_address(0));
415 }
416 EXPORT_SYMBOL(ioremap_prot);
417
418 /**
419  * iounmap - Free a IO remapping
420  * @addr: virtual address from ioremap_*
421  *
422  * Caller must ensure there is only one unmapping for the same pointer.
423  */
424 void iounmap(volatile void __iomem *addr)
425 {
426         struct vm_struct *p, *o;
427
428         if ((void __force *)addr <= high_memory)
429                 return;
430
431         /*
432          * __ioremap special-cases the PCI/ISA range by not instantiating a
433          * vm_area and by simply returning an address into the kernel mapping
434          * of ISA space.   So handle that here.
435          */
436         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
437             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
438                 return;
439
440         addr = (volatile void __iomem *)
441                 (PAGE_MASK & (unsigned long __force)addr);
442
443         mmiotrace_iounmap(addr);
444
445         /* Use the vm area unlocked, assuming the caller
446            ensures there isn't another iounmap for the same address
447            in parallel. Reuse of the virtual address is prevented by
448            leaving it in the global lists until we're done with it.
449            cpa takes care of the direct mappings. */
450         read_lock(&vmlist_lock);
451         for (p = vmlist; p; p = p->next) {
452                 if (p->addr == (void __force *)addr)
453                         break;
454         }
455         read_unlock(&vmlist_lock);
456
457         if (!p) {
458                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
459                 dump_stack();
460                 return;
461         }
462
463         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
464
465         /* Finally remove it */
466         o = remove_vm_area((void __force *)addr);
467         BUG_ON(p != o || o == NULL);
468         kfree(p);
469 }
470 EXPORT_SYMBOL(iounmap);
471
472 /*
473  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
474  * access
475  */
476 void *xlate_dev_mem_ptr(unsigned long phys)
477 {
478         void *addr;
479         unsigned long start = phys & PAGE_MASK;
480
481         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
482         if (page_is_ram(start >> PAGE_SHIFT))
483                 return __va(phys);
484
485         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
486         if (addr)
487                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
488
489         return addr;
490 }
491
492 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
493 {
494         if (page_is_ram(phys >> PAGE_SHIFT))
495                 return;
496
497         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
498         return;
499 }
500
501 static int __initdata early_ioremap_debug;
502
503 static int __init early_ioremap_debug_setup(char *str)
504 {
505         early_ioremap_debug = 1;
506
507         return 0;
508 }
509 early_param("early_ioremap_debug", early_ioremap_debug_setup);
510
511 static __initdata int after_paging_init;
512 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
513
514 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
515 {
516         /* Don't assume we're using swapper_pg_dir at this point */
517         pgd_t *base = __va(read_cr3());
518         pgd_t *pgd = &base[pgd_index(addr)];
519         pud_t *pud = pud_offset(pgd, addr);
520         pmd_t *pmd = pmd_offset(pud, addr);
521
522         return pmd;
523 }
524
525 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
526 {
527         return &bm_pte[pte_index(addr)];
528 }
529
530 void __init early_ioremap_init(void)
531 {
532         pmd_t *pmd;
533
534         if (early_ioremap_debug)
535                 printk(KERN_INFO "early_ioremap_init()\n");
536
537         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
538         memset(bm_pte, 0, sizeof(bm_pte));
539         pmd_populate_kernel(&init_mm, pmd, bm_pte);
540
541         /*
542          * The boot-ioremap range spans multiple pmds, for which
543          * we are not prepared:
544          */
545         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
546                 WARN_ON(1);
547                 printk(KERN_WARNING "pmd %p != %p\n",
548                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
549                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
550                         fix_to_virt(FIX_BTMAP_BEGIN));
551                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
552                         fix_to_virt(FIX_BTMAP_END));
553
554                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
555                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
556                        FIX_BTMAP_BEGIN);
557         }
558 }
559
560 void __init early_ioremap_reset(void)
561 {
562         after_paging_init = 1;
563 }
564
565 static void __init __early_set_fixmap(enum fixed_addresses idx,
566                                    unsigned long phys, pgprot_t flags)
567 {
568         unsigned long addr = __fix_to_virt(idx);
569         pte_t *pte;
570
571         if (idx >= __end_of_fixed_addresses) {
572                 BUG();
573                 return;
574         }
575         pte = early_ioremap_pte(addr);
576
577         if (pgprot_val(flags))
578                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
579         else
580                 pte_clear(&init_mm, addr, pte);
581         __flush_tlb_one(addr);
582 }
583
584 static inline void __init early_set_fixmap(enum fixed_addresses idx,
585                                            unsigned long phys, pgprot_t prot)
586 {
587         if (after_paging_init)
588                 __set_fixmap(idx, phys, prot);
589         else
590                 __early_set_fixmap(idx, phys, prot);
591 }
592
593 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
594 {
595         if (after_paging_init)
596                 clear_fixmap(idx);
597         else
598                 __early_set_fixmap(idx, 0, __pgprot(0));
599 }
600
601 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
602 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
603 static int __init check_early_ioremap_leak(void)
604 {
605         int count = 0;
606         int i;
607
608         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
609                 if (prev_map[i])
610                         count++;
611
612         if (!count)
613                 return 0;
614         WARN(1, KERN_WARNING
615                "Debug warning: early ioremap leak of %d areas detected.\n",
616                 count);
617         printk(KERN_WARNING
618                 "please boot with early_ioremap_debug and report the dmesg.\n");
619
620         return 1;
621 }
622 late_initcall(check_early_ioremap_leak);
623
624 static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
625 {
626         unsigned long offset, last_addr;
627         unsigned int nrpages;
628         enum fixed_addresses idx0, idx;
629         int i, slot;
630
631         WARN_ON(system_state != SYSTEM_BOOTING);
632
633         slot = -1;
634         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
635                 if (!prev_map[i]) {
636                         slot = i;
637                         break;
638                 }
639         }
640
641         if (slot < 0) {
642                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
643                          phys_addr, size);
644                 WARN_ON(1);
645                 return NULL;
646         }
647
648         if (early_ioremap_debug) {
649                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
650                        phys_addr, size, slot);
651                 dump_stack();
652         }
653
654         /* Don't allow wraparound or zero size */
655         last_addr = phys_addr + size - 1;
656         if (!size || last_addr < phys_addr) {
657                 WARN_ON(1);
658                 return NULL;
659         }
660
661         prev_size[slot] = size;
662         /*
663          * Mappings have to be page-aligned
664          */
665         offset = phys_addr & ~PAGE_MASK;
666         phys_addr &= PAGE_MASK;
667         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
668
669         /*
670          * Mappings have to fit in the FIX_BTMAP area.
671          */
672         nrpages = size >> PAGE_SHIFT;
673         if (nrpages > NR_FIX_BTMAPS) {
674                 WARN_ON(1);
675                 return NULL;
676         }
677
678         /*
679          * Ok, go for it..
680          */
681         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
682         idx = idx0;
683         while (nrpages > 0) {
684                 early_set_fixmap(idx, phys_addr, prot);
685                 phys_addr += PAGE_SIZE;
686                 --idx;
687                 --nrpages;
688         }
689         if (early_ioremap_debug)
690                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
691
692         prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
693         return prev_map[slot];
694 }
695
696 /* Remap an IO device */
697 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
698 {
699         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
700 }
701
702 /* Remap memory */
703 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
704 {
705         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
706 }
707
708 void __init early_iounmap(void __iomem *addr, unsigned long size)
709 {
710         unsigned long virt_addr;
711         unsigned long offset;
712         unsigned int nrpages;
713         enum fixed_addresses idx;
714         int i, slot;
715
716         slot = -1;
717         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
718                 if (prev_map[i] == addr) {
719                         slot = i;
720                         break;
721                 }
722         }
723
724         if (slot < 0) {
725                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
726                          addr, size);
727                 WARN_ON(1);
728                 return;
729         }
730
731         if (prev_size[slot] != size) {
732                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
733                          addr, size, slot, prev_size[slot]);
734                 WARN_ON(1);
735                 return;
736         }
737
738         if (early_ioremap_debug) {
739                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
740                        size, slot);
741                 dump_stack();
742         }
743
744         virt_addr = (unsigned long)addr;
745         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
746                 WARN_ON(1);
747                 return;
748         }
749         offset = virt_addr & ~PAGE_MASK;
750         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
751
752         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
753         while (nrpages > 0) {
754                 early_clear_fixmap(idx);
755                 --idx;
756                 --nrpages;
757         }
758         prev_map[slot] = NULL;
759 }
760
761 void __this_fixmap_does_not_exist(void)
762 {
763         WARN_ON(1);
764 }