1 /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
16 #include <asm/tlbflush.h>
18 /* Remap IO memory, the same way as remap_pfn_range(), but use
19 * the obio memory space.
21 * They use a pgprot that sets PAGE_IO and does not check the
22 * mem_map table as this is independent of normal memory.
24 static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
25 unsigned long address,
27 unsigned long offset, pgprot_t prot,
32 /* clear hack bit that was used as a write_combine side-effect flag */
40 unsigned long curend = address + PAGE_SIZE;
42 entry = mk_pte_io(offset, prot, space);
43 if (!(address & 0xffff)) {
44 if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
45 entry = mk_pte_io(offset,
46 __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
48 curend = address + 0x400000;
50 } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
51 entry = mk_pte_io(offset,
52 __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
54 curend = address + 0x80000;
56 } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
57 entry = mk_pte_io(offset,
58 __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
60 curend = address + 0x10000;
68 BUG_ON(!pte_none(*pte));
69 set_pte_at(mm, address, pte, entry);
72 } while (address < curend);
73 } while (address < end);
76 static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
77 unsigned long offset, pgprot_t prot, int space)
81 address &= ~PGDIR_MASK;
87 pte_t * pte = pte_alloc_map(mm, pmd, address);
90 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
92 address = (address + PMD_SIZE) & PMD_MASK;
94 } while (address < end);
98 static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
99 unsigned long offset, pgprot_t prot, int space)
103 address &= ~PUD_MASK;
104 end = address + size;
109 pmd_t *pmd = pmd_alloc(mm, pud, address);
112 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
113 address = (address + PUD_SIZE) & PUD_MASK;
115 } while (address < end);
119 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
120 unsigned long pfn, unsigned long size, pgprot_t prot)
124 unsigned long beg = from;
125 unsigned long end = from + size;
126 struct mm_struct *mm = vma->vm_mm;
127 int space = GET_IOSPACE(pfn);
128 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
130 /* See comment in mm/memory.c remap_pfn_range */
131 vma->vm_flags |= VM_IO | VM_RESERVED;
133 prot = __pgprot(pg_iobits);
135 dir = pgd_offset(mm, from);
136 flush_cache_range(vma, beg, end);
138 spin_lock(&mm->page_table_lock);
140 pud_t *pud = pud_alloc(current->mm, dir, from);
144 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
147 from = (from + PGDIR_SIZE) & PGDIR_MASK;
150 flush_tlb_range(vma, beg, end);
151 spin_unlock(&mm->page_table_lock);