4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005, 2006 Paul Mundt
8 * P1/P2 Section mapping definitions from map32.h, which was:
10 * Copyright 2003 (c) Lineo Solutions,Inc.
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/bitops.h>
21 #include <linux/debugfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/err.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
31 #define NR_PMB_ENTRIES 16
33 static struct kmem_cache *pmb_cache;
34 static unsigned long pmb_map;
36 static struct pmb_entry pmb_init_map[] = {
37 /* vpn ppn flags (ub/sz/c/wt) */
39 /* P1 Section Mappings */
40 { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
41 { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
42 { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
43 { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
44 { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
45 { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
47 /* P2 Section Mappings */
48 { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
49 { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
50 { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
51 { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
52 { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
53 { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
56 static inline unsigned long mk_pmb_entry(unsigned int entry)
58 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
61 static inline unsigned long mk_pmb_addr(unsigned int entry)
63 return mk_pmb_entry(entry) | PMB_ADDR;
66 static inline unsigned long mk_pmb_data(unsigned int entry)
68 return mk_pmb_entry(entry) | PMB_DATA;
71 struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
74 struct pmb_entry *pmbe;
76 pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
78 return ERR_PTR(-ENOMEM);
87 void pmb_free(struct pmb_entry *pmbe)
89 kmem_cache_free(pmb_cache, pmbe);
93 * Must be in P2 for __set_pmb_entry()
95 int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
96 unsigned long flags, int *entry)
98 unsigned int pos = *entry;
100 if (unlikely(pos == PMB_NO_ENTRY))
101 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
104 if (unlikely(pos > NR_PMB_ENTRIES))
107 if (test_and_set_bit(pos, &pmb_map)) {
108 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
112 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
114 #ifdef CONFIG_SH_WRITETHROUGH
116 * When we are in 32-bit address extended mode, CCR.CB becomes
117 * invalid, so care must be taken to manually adjust cacheable
120 if (likely(flags & PMB_C))
124 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
131 int set_pmb_entry(struct pmb_entry *pmbe)
136 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
142 void clear_pmb_entry(struct pmb_entry *pmbe)
144 unsigned int entry = pmbe->entry;
148 * Don't allow clearing of wired init entries, P1 or P2 access
149 * without a corresponding mapping in the PMB will lead to reset
152 if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
153 entry >= NR_PMB_ENTRIES))
159 addr = mk_pmb_addr(entry);
160 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
162 addr = mk_pmb_data(entry);
163 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
167 clear_bit(entry, &pmb_map);
170 static DEFINE_SPINLOCK(pmb_list_lock);
171 static struct pmb_entry *pmb_list;
173 static inline void pmb_list_add(struct pmb_entry *pmbe)
175 struct pmb_entry **p, *tmp;
178 while ((tmp = *p) != NULL)
185 static inline void pmb_list_del(struct pmb_entry *pmbe)
187 struct pmb_entry **p, *tmp;
189 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
200 { .size = 0x20000000, .flag = PMB_SZ_512M, },
201 { .size = 0x08000000, .flag = PMB_SZ_128M, },
202 { .size = 0x04000000, .flag = PMB_SZ_64M, },
203 { .size = 0x01000000, .flag = PMB_SZ_16M, },
206 long pmb_remap(unsigned long vaddr, unsigned long phys,
207 unsigned long size, unsigned long flags)
209 struct pmb_entry *pmbp;
210 unsigned long wanted;
213 /* Convert typical pgprot value to the PMB equivalent */
214 if (flags & _PAGE_CACHABLE) {
215 if (flags & _PAGE_WT)
220 pmb_flags = PMB_WT | PMB_UB;
226 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
227 struct pmb_entry *pmbe;
230 if (size < pmb_sizes[i].size)
233 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
235 return PTR_ERR(pmbe);
237 ret = set_pmb_entry(pmbe);
243 phys += pmb_sizes[i].size;
244 vaddr += pmb_sizes[i].size;
245 size -= pmb_sizes[i].size;
248 * Link adjacent entries that span multiple PMB entries
249 * for easier tear-down.
257 if (size >= 0x1000000)
260 return wanted - size;
263 void pmb_unmap(unsigned long addr)
265 struct pmb_entry **p, *pmbe;
267 for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
268 if (pmbe->vpn == addr)
274 WARN_ON(!test_bit(pmbe->entry, &pmb_map));
277 struct pmb_entry *pmblink = pmbe;
279 clear_pmb_entry(pmbe);
280 pmbe = pmblink->link;
286 static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
288 struct pmb_entry *pmbe = pmb;
290 memset(pmb, 0, sizeof(struct pmb_entry));
292 spin_lock_irq(&pmb_list_lock);
294 pmbe->entry = PMB_NO_ENTRY;
297 spin_unlock_irq(&pmb_list_lock);
300 static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
302 spin_lock_irq(&pmb_list_lock);
304 spin_unlock_irq(&pmb_list_lock);
307 static int __init pmb_init(void)
309 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
312 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
314 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
315 SLAB_PANIC, pmb_cache_ctor,
321 * Ordering is important, P2 must be mapped in the PMB before we
322 * can set PMB.SE, and P1 must be mapped before we jump back to
325 for (entry = 0; entry < nr_entries; entry++) {
326 struct pmb_entry *pmbe = pmb_init_map + entry;
328 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
331 ctrl_outl(0, PMB_IRMCR);
333 /* PMB.SE and UB[7] */
334 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
340 arch_initcall(pmb_init);
342 static int pmb_seq_show(struct seq_file *file, void *iter)
346 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
347 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
348 seq_printf(file, "ety vpn ppn size flags\n");
350 for (i = 0; i < NR_PMB_ENTRIES; i++) {
351 unsigned long addr, data;
355 addr = ctrl_inl(mk_pmb_addr(i));
356 data = ctrl_inl(mk_pmb_data(i));
358 size = data & PMB_SZ_MASK;
359 sz_str = (size == PMB_SZ_16M) ? " 16MB":
360 (size == PMB_SZ_64M) ? " 64MB":
361 (size == PMB_SZ_128M) ? "128MB":
364 /* 02: V 0x88 0x08 128MB C CB B */
365 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
366 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
367 (addr >> 24) & 0xff, (data >> 24) & 0xff,
368 sz_str, (data & PMB_C) ? 'C' : ' ',
369 (data & PMB_WT) ? "WT" : "CB",
370 (data & PMB_UB) ? "UB" : " B");
376 static int pmb_debugfs_open(struct inode *inode, struct file *file)
378 return single_open(file, pmb_seq_show, NULL);
381 static const struct file_operations pmb_debugfs_fops = {
382 .owner = THIS_MODULE,
383 .open = pmb_debugfs_open,
386 .release = seq_release,
389 static int __init pmb_debugfs_init(void)
391 struct dentry *dentry;
393 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
394 NULL, NULL, &pmb_debugfs_fops);
396 return PTR_ERR(dentry);
400 postcore_initcall(pmb_debugfs_init);