[PATCH] core remove PageReserved
[linux-2.6] / arch / sparc64 / mm / generic.c
1 /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2  * generic.c: Generic Sparc mm routines that are not dependent upon
3  *            MMU type but are Sparc specific.
4  *
5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/page.h>
16 #include <asm/tlbflush.h>
17
18 /* Remap IO memory, the same way as remap_pfn_range(), but use
19  * the obio memory space.
20  *
21  * They use a pgprot that sets PAGE_IO and does not check the
22  * mem_map table as this is independent of normal memory.
23  */
24 static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
25                                       unsigned long address,
26                                       unsigned long size,
27                                       unsigned long offset, pgprot_t prot,
28                                       int space)
29 {
30         unsigned long end;
31
32         /* clear hack bit that was used as a write_combine side-effect flag */
33         offset &= ~0x1UL;
34         address &= ~PMD_MASK;
35         end = address + size;
36         if (end > PMD_SIZE)
37                 end = PMD_SIZE;
38         do {
39                 pte_t entry;
40                 unsigned long curend = address + PAGE_SIZE;
41                 
42                 entry = mk_pte_io(offset, prot, space);
43                 if (!(address & 0xffff)) {
44                         if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
45                                 entry = mk_pte_io(offset,
46                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
47                                                   space);
48                                 curend = address + 0x400000;
49                                 offset += 0x400000;
50                         } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
51                                 entry = mk_pte_io(offset,
52                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
53                                                   space);
54                                 curend = address + 0x80000;
55                                 offset += 0x80000;
56                         } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
57                                 entry = mk_pte_io(offset,
58                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
59                                                   space);
60                                 curend = address + 0x10000;
61                                 offset += 0x10000;
62                         } else
63                                 offset += PAGE_SIZE;
64                 } else
65                         offset += PAGE_SIZE;
66
67                 do {
68                         BUG_ON(!pte_none(*pte));
69                         set_pte_at(mm, address, pte, entry);
70                         address += PAGE_SIZE;
71                         pte++;
72                 } while (address < curend);
73         } while (address < end);
74 }
75
76 static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
77         unsigned long offset, pgprot_t prot, int space)
78 {
79         unsigned long end;
80
81         address &= ~PGDIR_MASK;
82         end = address + size;
83         if (end > PGDIR_SIZE)
84                 end = PGDIR_SIZE;
85         offset -= address;
86         do {
87                 pte_t * pte = pte_alloc_map(mm, pmd, address);
88                 if (!pte)
89                         return -ENOMEM;
90                 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
91                 pte_unmap(pte);
92                 address = (address + PMD_SIZE) & PMD_MASK;
93                 pmd++;
94         } while (address < end);
95         return 0;
96 }
97
98 static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
99         unsigned long offset, pgprot_t prot, int space)
100 {
101         unsigned long end;
102
103         address &= ~PUD_MASK;
104         end = address + size;
105         if (end > PUD_SIZE)
106                 end = PUD_SIZE;
107         offset -= address;
108         do {
109                 pmd_t *pmd = pmd_alloc(mm, pud, address);
110                 if (!pud)
111                         return -ENOMEM;
112                 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
113                 address = (address + PUD_SIZE) & PUD_MASK;
114                 pud++;
115         } while (address < end);
116         return 0;
117 }
118
119 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
120                 unsigned long pfn, unsigned long size, pgprot_t prot)
121 {
122         int error = 0;
123         pgd_t * dir;
124         unsigned long beg = from;
125         unsigned long end = from + size;
126         struct mm_struct *mm = vma->vm_mm;
127         int space = GET_IOSPACE(pfn);
128         unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
129
130         /* See comment in mm/memory.c remap_pfn_range */
131         vma->vm_flags |= VM_IO | VM_RESERVED;
132
133         prot = __pgprot(pg_iobits);
134         offset -= from;
135         dir = pgd_offset(mm, from);
136         flush_cache_range(vma, beg, end);
137
138         spin_lock(&mm->page_table_lock);
139         while (from < end) {
140                 pud_t *pud = pud_alloc(current->mm, dir, from);
141                 error = -ENOMEM;
142                 if (!pud)
143                         break;
144                 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
145                 if (error)
146                         break;
147                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
148                 dir++;
149         }
150         flush_tlb_range(vma, beg, end);
151         spin_unlock(&mm->page_table_lock);
152
153         return error;
154 }