Merge git://git.infradead.org/iommu-2.6
[linux-2.6] / arch / arm / mm / pgd.c
1 /*
2  *  linux/arch/arm/mm/pgd.c
3  *
4  *  Copyright (C) 1998-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/mm.h>
11 #include <linux/highmem.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/tlbflush.h>
16
17 #include "mm.h"
18
19 #define FIRST_KERNEL_PGD_NR     (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
20
21 /*
22  * need to get a 16k page for level 1
23  */
24 pgd_t *get_pgd_slow(struct mm_struct *mm)
25 {
26         pgd_t *new_pgd, *init_pgd;
27         pmd_t *new_pmd, *init_pmd;
28         pte_t *new_pte, *init_pte;
29
30         new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
31         if (!new_pgd)
32                 goto no_pgd;
33
34         memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
35
36         /*
37          * Copy over the kernel and IO PGD entries
38          */
39         init_pgd = pgd_offset_k(0);
40         memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
41                        (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
42
43         clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44
45         if (!vectors_high()) {
46                 /*
47                  * On ARM, first page must always be allocated since it
48                  * contains the machine vectors.
49                  */
50                 new_pmd = pmd_alloc(mm, new_pgd, 0);
51                 if (!new_pmd)
52                         goto no_pmd;
53
54                 new_pte = pte_alloc_map(mm, new_pmd, 0);
55                 if (!new_pte)
56                         goto no_pte;
57
58                 init_pmd = pmd_offset(init_pgd, 0);
59                 init_pte = pte_offset_map_nested(init_pmd, 0);
60                 set_pte_ext(new_pte, *init_pte, 0);
61                 pte_unmap_nested(init_pte);
62                 pte_unmap(new_pte);
63         }
64
65         return new_pgd;
66
67 no_pte:
68         pmd_free(mm, new_pmd);
69 no_pmd:
70         free_pages((unsigned long)new_pgd, 2);
71 no_pgd:
72         return NULL;
73 }
74
75 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
76 {
77         pmd_t *pmd;
78         pgtable_t pte;
79
80         if (!pgd)
81                 return;
82
83         /* pgd is always present and good */
84         pmd = pmd_off(pgd, 0);
85         if (pmd_none(*pmd))
86                 goto free;
87         if (pmd_bad(*pmd)) {
88                 pmd_ERROR(*pmd);
89                 pmd_clear(pmd);
90                 goto free;
91         }
92
93         pte = pmd_pgtable(*pmd);
94         pmd_clear(pmd);
95         pte_free(mm, pte);
96         pmd_free(mm, pmd);
97 free:
98         free_pages((unsigned long) pgd, 2);
99 }