sh: sh7366 clock framework rewrite
[linux-2.6] / arch / sh / kernel / io_trapped.c
1 /*
2  * Trapped io support
3  *
4  * Copyright (C) 2008 Magnus Damm
5  *
6  * Intercept io operations by trapping.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/bitops.h>
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
20 #include <asm/uaccess.h>
21 #include <asm/io.h>
22 #include <asm/io_trapped.h>
23
24 #define TRAPPED_PAGES_MAX 16
25
26 #ifdef CONFIG_HAS_IOPORT
27 LIST_HEAD(trapped_io);
28 EXPORT_SYMBOL_GPL(trapped_io);
29 #endif
30 #ifdef CONFIG_HAS_IOMEM
31 LIST_HEAD(trapped_mem);
32 EXPORT_SYMBOL_GPL(trapped_mem);
33 #endif
34 static DEFINE_SPINLOCK(trapped_lock);
35
36 static int trapped_io_disable __read_mostly;
37
38 static int __init trapped_io_setup(char *__unused)
39 {
40         trapped_io_disable = 1;
41         return 1;
42 }
43 __setup("noiotrap", trapped_io_setup);
44
45 int register_trapped_io(struct trapped_io *tiop)
46 {
47         struct resource *res;
48         unsigned long len = 0, flags = 0;
49         struct page *pages[TRAPPED_PAGES_MAX];
50         int k, n;
51
52         if (unlikely(trapped_io_disable))
53                 return 0;
54
55         /* structure must be page aligned */
56         if ((unsigned long)tiop & (PAGE_SIZE - 1))
57                 goto bad;
58
59         for (k = 0; k < tiop->num_resources; k++) {
60                 res = tiop->resource + k;
61                 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
62                 flags |= res->flags;
63         }
64
65         /* support IORESOURCE_IO _or_ MEM, not both */
66         if (hweight_long(flags) != 1)
67                 goto bad;
68
69         n = len >> PAGE_SHIFT;
70
71         if (n >= TRAPPED_PAGES_MAX)
72                 goto bad;
73
74         for (k = 0; k < n; k++)
75                 pages[k] = virt_to_page(tiop);
76
77         tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
78         if (!tiop->virt_base)
79                 goto bad;
80
81         len = 0;
82         for (k = 0; k < tiop->num_resources; k++) {
83                 res = tiop->resource + k;
84                 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
85                        (unsigned long)(tiop->virt_base + len),
86                        res->flags & IORESOURCE_IO ? "io" : "mmio",
87                        (unsigned long)res->start);
88                 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
89         }
90
91         tiop->magic = IO_TRAPPED_MAGIC;
92         INIT_LIST_HEAD(&tiop->list);
93         spin_lock_irq(&trapped_lock);
94         if (flags & IORESOURCE_IO)
95                 list_add(&tiop->list, &trapped_io);
96         if (flags & IORESOURCE_MEM)
97                 list_add(&tiop->list, &trapped_mem);
98         spin_unlock_irq(&trapped_lock);
99
100         return 0;
101  bad:
102         pr_warning("unable to install trapped io filter\n");
103         return -1;
104 }
105 EXPORT_SYMBOL_GPL(register_trapped_io);
106
107 void __iomem *match_trapped_io_handler(struct list_head *list,
108                                        unsigned long offset,
109                                        unsigned long size)
110 {
111         unsigned long voffs;
112         struct trapped_io *tiop;
113         struct resource *res;
114         int k, len;
115
116         spin_lock_irq(&trapped_lock);
117         list_for_each_entry(tiop, list, list) {
118                 voffs = 0;
119                 for (k = 0; k < tiop->num_resources; k++) {
120                         res = tiop->resource + k;
121                         if (res->start == offset) {
122                                 spin_unlock_irq(&trapped_lock);
123                                 return tiop->virt_base + voffs;
124                         }
125
126                         len = (res->end - res->start) + 1;
127                         voffs += roundup(len, PAGE_SIZE);
128                 }
129         }
130         spin_unlock_irq(&trapped_lock);
131         return NULL;
132 }
133 EXPORT_SYMBOL_GPL(match_trapped_io_handler);
134
135 static struct trapped_io *lookup_tiop(unsigned long address)
136 {
137         pgd_t *pgd_k;
138         pud_t *pud_k;
139         pmd_t *pmd_k;
140         pte_t *pte_k;
141         pte_t entry;
142
143         pgd_k = swapper_pg_dir + pgd_index(address);
144         if (!pgd_present(*pgd_k))
145                 return NULL;
146
147         pud_k = pud_offset(pgd_k, address);
148         if (!pud_present(*pud_k))
149                 return NULL;
150
151         pmd_k = pmd_offset(pud_k, address);
152         if (!pmd_present(*pmd_k))
153                 return NULL;
154
155         pte_k = pte_offset_kernel(pmd_k, address);
156         entry = *pte_k;
157
158         return pfn_to_kaddr(pte_pfn(entry));
159 }
160
161 static unsigned long lookup_address(struct trapped_io *tiop,
162                                     unsigned long address)
163 {
164         struct resource *res;
165         unsigned long vaddr = (unsigned long)tiop->virt_base;
166         unsigned long len;
167         int k;
168
169         for (k = 0; k < tiop->num_resources; k++) {
170                 res = tiop->resource + k;
171                 len = roundup((res->end - res->start) + 1, PAGE_SIZE);
172                 if (address < (vaddr + len))
173                         return res->start + (address - vaddr);
174                 vaddr += len;
175         }
176         return 0;
177 }
178
179 static unsigned long long copy_word(unsigned long src_addr, int src_len,
180                                     unsigned long dst_addr, int dst_len)
181 {
182         unsigned long long tmp = 0;
183
184         switch (src_len) {
185         case 1:
186                 tmp = ctrl_inb(src_addr);
187                 break;
188         case 2:
189                 tmp = ctrl_inw(src_addr);
190                 break;
191         case 4:
192                 tmp = ctrl_inl(src_addr);
193                 break;
194         case 8:
195                 tmp = ctrl_inq(src_addr);
196                 break;
197         }
198
199         switch (dst_len) {
200         case 1:
201                 ctrl_outb(tmp, dst_addr);
202                 break;
203         case 2:
204                 ctrl_outw(tmp, dst_addr);
205                 break;
206         case 4:
207                 ctrl_outl(tmp, dst_addr);
208                 break;
209         case 8:
210                 ctrl_outq(tmp, dst_addr);
211                 break;
212         }
213
214         return tmp;
215 }
216
217 static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
218 {
219         struct trapped_io *tiop;
220         unsigned long src_addr = (unsigned long)src;
221         unsigned long long tmp;
222
223         pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
224         tiop = lookup_tiop(src_addr);
225         WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
226
227         src_addr = lookup_address(tiop, src_addr);
228         if (!src_addr)
229                 return cnt;
230
231         tmp = copy_word(src_addr,
232                         max_t(unsigned long, cnt,
233                               (tiop->minimum_bus_width / 8)),
234                         (unsigned long)dst, cnt);
235
236         pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
237         return 0;
238 }
239
240 static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
241 {
242         struct trapped_io *tiop;
243         unsigned long dst_addr = (unsigned long)dst;
244         unsigned long long tmp;
245
246         pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
247         tiop = lookup_tiop(dst_addr);
248         WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
249
250         dst_addr = lookup_address(tiop, dst_addr);
251         if (!dst_addr)
252                 return cnt;
253
254         tmp = copy_word((unsigned long)src, cnt,
255                         dst_addr, max_t(unsigned long, cnt,
256                                         (tiop->minimum_bus_width / 8)));
257
258         pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
259         return 0;
260 }
261
262 static struct mem_access trapped_io_access = {
263         from_device,
264         to_device,
265 };
266
267 int handle_trapped_io(struct pt_regs *regs, unsigned long address)
268 {
269         mm_segment_t oldfs;
270         insn_size_t instruction;
271         int tmp;
272
273         if (!lookup_tiop(address))
274                 return 0;
275
276         WARN_ON(user_mode(regs));
277
278         oldfs = get_fs();
279         set_fs(KERNEL_DS);
280         if (copy_from_user(&instruction, (void *)(regs->pc),
281                            sizeof(instruction))) {
282                 set_fs(oldfs);
283                 return 0;
284         }
285
286         tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
287         set_fs(oldfs);
288         return tmp == 0;
289 }