[IA64-SGI] convert some sn SAL_CALLs to ia64_sal_oemcall calls
[linux-2.6] / include / asm-mips / io.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 1995 Waldorf GmbH
7  * Copyright (C) 1994 - 2000 Ralf Baechle
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004, 2005  MIPS Technologies, Inc.  All rights reserved.
10  *      Author: Maciej W. Rozycki <macro@mips.com>
11  */
12 #ifndef _ASM_IO_H
13 #define _ASM_IO_H
14
15 #include <linux/config.h>
16 #include <linux/compiler.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19
20 #include <asm/addrspace.h>
21 #include <asm/bug.h>
22 #include <asm/byteorder.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm/page.h>
26 #include <asm/pgtable-bits.h>
27 #include <asm/processor.h>
28
29 #include <mangle-port.h>
30
31 /*
32  * Slowdown I/O port space accesses for antique hardware.
33  */
34 #undef CONF_SLOWDOWN_IO
35
36 /*
37  * Raw operations are never swapped in software.  Otoh values that raw
38  * operations are working on may or may not have been swapped by the bus
39  * hardware.  An example use would be for flash memory that's used for
40  * execute in place.
41  */
42 # define __raw_ioswabb(x)       (x)
43 # define __raw_ioswabw(x)       (x)
44 # define __raw_ioswabl(x)       (x)
45 # define __raw_ioswabq(x)       (x)
46
47 /*
48  * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
49  * less sane hardware forces software to fiddle with this...
50  */
51 #if defined(CONFIG_SWAP_IO_SPACE)
52
53 # define ioswabb(x)             (x)
54 # ifdef CONFIG_SGI_IP22
55 /*
56  * IP22 seems braindead enough to swap 16bits values in hardware, but
57  * not 32bits.  Go figure... Can't tell without documentation.
58  */
59 #  define ioswabw(x)            (x)
60 # else
61 #  define ioswabw(x)            le16_to_cpu(x)
62 # endif
63 # define ioswabl(x)             le32_to_cpu(x)
64 # define ioswabq(x)             le64_to_cpu(x)
65
66 #else
67
68 # define ioswabb(x)             (x)
69 # define ioswabw(x)             (x)
70 # define ioswabl(x)             (x)
71 # define ioswabq(x)             (x)
72
73 #endif
74
75 /*
76  * Native bus accesses never swapped.
77  */
78 #define bus_ioswabb(x)          (x)
79 #define bus_ioswabw(x)          (x)
80 #define bus_ioswabl(x)          (x)
81 #define bus_ioswabq(x)          (x)
82
83 #define __bus_ioswabq           bus_ioswabq
84
85 #define IO_SPACE_LIMIT 0xffff
86
87 /*
88  * On MIPS I/O ports are memory mapped, so we access them using normal
89  * load/store instructions. mips_io_port_base is the virtual address to
90  * which all ports are being mapped.  For sake of efficiency some code
91  * assumes that this is an address that can be loaded with a single lui
92  * instruction, so the lower 16 bits must be zero.  Should be true on
93  * on any sane architecture; generic code does not use this assumption.
94  */
95 extern const unsigned long mips_io_port_base;
96
97 #define set_io_port_base(base)  \
98         do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
99
100 /*
101  * Thanks to James van Artsdalen for a better timing-fix than
102  * the two short jumps: using outb's to a nonexistent port seems
103  * to guarantee better timings even on fast machines.
104  *
105  * On the other hand, I'd like to be sure of a non-existent port:
106  * I feel a bit unsafe about using 0x80 (should be safe, though)
107  *
108  *              Linus
109  *
110  */
111
112 #define __SLOW_DOWN_IO \
113         __asm__ __volatile__( \
114                 "sb\t$0,0x80(%0)" \
115                 : : "r" (mips_io_port_base));
116
117 #ifdef CONF_SLOWDOWN_IO
118 #ifdef REALLY_SLOW_IO
119 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
120 #else
121 #define SLOW_DOWN_IO __SLOW_DOWN_IO
122 #endif
123 #else
124 #define SLOW_DOWN_IO
125 #endif
126
127 /*
128  *     virt_to_phys    -       map virtual addresses to physical
129  *     @address: address to remap
130  *
131  *     The returned physical address is the physical (CPU) mapping for
132  *     the memory address given. It is only valid to use this function on
133  *     addresses directly mapped or allocated via kmalloc.
134  *
135  *     This function does not give bus mappings for DMA transfers. In
136  *     almost all conceivable cases a device driver should not be using
137  *     this function
138  */
139 static inline unsigned long virt_to_phys(volatile void * address)
140 {
141         return (unsigned long)address - PAGE_OFFSET;
142 }
143
144 /*
145  *     phys_to_virt    -       map physical address to virtual
146  *     @address: address to remap
147  *
148  *     The returned virtual address is a current CPU mapping for
149  *     the memory address given. It is only valid to use this function on
150  *     addresses that have a kernel mapping
151  *
152  *     This function does not handle bus mappings for DMA transfers. In
153  *     almost all conceivable cases a device driver should not be using
154  *     this function
155  */
156 static inline void * phys_to_virt(unsigned long address)
157 {
158         return (void *)(address + PAGE_OFFSET);
159 }
160
161 /*
162  * ISA I/O bus memory addresses are 1:1 with the physical address.
163  */
164 static inline unsigned long isa_virt_to_bus(volatile void * address)
165 {
166         return (unsigned long)address - PAGE_OFFSET;
167 }
168
169 static inline void * isa_bus_to_virt(unsigned long address)
170 {
171         return (void *)(address + PAGE_OFFSET);
172 }
173
174 #define isa_page_to_bus page_to_phys
175
176 /*
177  * However PCI ones are not necessarily 1:1 and therefore these interfaces
178  * are forbidden in portable PCI drivers.
179  *
180  * Allow them for x86 for legacy drivers, though.
181  */
182 #define virt_to_bus virt_to_phys
183 #define bus_to_virt phys_to_virt
184
185 /*
186  * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
187  * for the processor.  This implies the assumption that there is only
188  * one of these busses.
189  */
190 extern unsigned long isa_slot_offset;
191
192 /*
193  * Change "struct page" to physical address.
194  */
195 #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
196
197 extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags);
198 extern void __iounmap(volatile void __iomem *addr);
199
200 static inline void * __ioremap_mode(phys_t offset, unsigned long size,
201         unsigned long flags)
202 {
203         if (cpu_has_64bit_addresses) {
204                 u64 base = UNCAC_BASE;
205
206                 /*
207                  * R10000 supports a 2 bit uncached attribute therefore
208                  * UNCAC_BASE may not equal IO_BASE.
209                  */
210                 if (flags == _CACHE_UNCACHED)
211                         base = (u64) IO_BASE;
212                 return (void *) (unsigned long) (base + offset);
213         }
214
215         return __ioremap(offset, size, flags);
216 }
217
218 /*
219  * ioremap     -   map bus memory into CPU space
220  * @offset:    bus address of the memory
221  * @size:      size of the resource to map
222  *
223  * ioremap performs a platform specific sequence of operations to
224  * make bus memory CPU accessible via the readb/readw/readl/writeb/
225  * writew/writel functions and the other mmio helpers. The returned
226  * address is not guaranteed to be usable directly as a virtual
227  * address.
228  */
229 #define ioremap(offset, size)                                           \
230         __ioremap_mode((offset), (size), _CACHE_UNCACHED)
231
232 /*
233  * ioremap_nocache     -   map bus memory into CPU space
234  * @offset:    bus address of the memory
235  * @size:      size of the resource to map
236  *
237  * ioremap_nocache performs a platform specific sequence of operations to
238  * make bus memory CPU accessible via the readb/readw/readl/writeb/
239  * writew/writel functions and the other mmio helpers. The returned
240  * address is not guaranteed to be usable directly as a virtual
241  * address.
242  *
243  * This version of ioremap ensures that the memory is marked uncachable
244  * on the CPU as well as honouring existing caching rules from things like
245  * the PCI bus. Note that there are other caches and buffers on many
246  * busses. In paticular driver authors should read up on PCI writes
247  *
248  * It's useful if some control registers are in such an area and
249  * write combining or read caching is not desirable:
250  */
251 #define ioremap_nocache(offset, size)                                   \
252         __ioremap_mode((offset), (size), _CACHE_UNCACHED)
253
254 /*
255  * These two are MIPS specific ioremap variant.  ioremap_cacheable_cow
256  * requests a cachable mapping, ioremap_uncached_accelerated requests a
257  * mapping using the uncached accelerated mode which isn't supported on
258  * all processors.
259  */
260 #define ioremap_cacheable_cow(offset, size)                             \
261         __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
262 #define ioremap_uncached_accelerated(offset, size)                      \
263         __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
264
265 static inline void iounmap(volatile void __iomem *addr)
266 {
267         if (cpu_has_64bit_addresses)
268                 return;
269
270         __iounmap(addr);
271 }
272
273
274 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq)                     \
275                                                                         \
276 static inline void pfx##write##bwlq(type val,                           \
277                                     volatile void __iomem *mem)         \
278 {                                                                       \
279         volatile type *__mem;                                           \
280         type __val;                                                     \
281                                                                         \
282         __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
283                                                                         \
284         __val = pfx##ioswab##bwlq(val);                                 \
285                                                                         \
286         if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
287                 *__mem = __val;                                         \
288         else if (cpu_has_64bits) {                                      \
289                 unsigned long __flags;                                  \
290                 type __tmp;                                             \
291                                                                         \
292                 if (irq)                                                \
293                         local_irq_save(__flags);                        \
294                 __asm__ __volatile__(                                   \
295                         ".set   mips3"          "\t\t# __writeq""\n\t"  \
296                         "dsll32 %L0, %L0, 0"                    "\n\t"  \
297                         "dsrl32 %L0, %L0, 0"                    "\n\t"  \
298                         "dsll32 %M0, %M0, 0"                    "\n\t"  \
299                         "or     %L0, %L0, %M0"                  "\n\t"  \
300                         "sd     %L0, %2"                        "\n\t"  \
301                         ".set   mips0"                          "\n"    \
302                         : "=r" (__tmp)                                  \
303                         : "0" (__val), "m" (*__mem));                   \
304                 if (irq)                                                \
305                         local_irq_restore(__flags);                     \
306         } else                                                          \
307                 BUG();                                                  \
308 }                                                                       \
309                                                                         \
310 static inline type pfx##read##bwlq(volatile void __iomem *mem)          \
311 {                                                                       \
312         volatile type *__mem;                                           \
313         type __val;                                                     \
314                                                                         \
315         __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
316                                                                         \
317         if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
318                 __val = *__mem;                                         \
319         else if (cpu_has_64bits) {                                      \
320                 unsigned long __flags;                                  \
321                                                                         \
322                 local_irq_save(__flags);                                \
323                 __asm__ __volatile__(                                   \
324                         ".set   mips3"          "\t\t# __readq" "\n\t"  \
325                         "ld     %L0, %1"                        "\n\t"  \
326                         "dsra32 %M0, %L0, 0"                    "\n\t"  \
327                         "sll    %L0, %L0, 0"                    "\n\t"  \
328                         ".set   mips0"                          "\n"    \
329                         : "=r" (__val)                                  \
330                         : "m" (*__mem));                                \
331                 local_irq_restore(__flags);                             \
332         } else {                                                        \
333                 __val = 0;                                              \
334                 BUG();                                                  \
335         }                                                               \
336                                                                         \
337         return pfx##ioswab##bwlq(__val);                                \
338 }
339
340 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)                 \
341                                                                         \
342 static inline void pfx##out##bwlq##p(type val, unsigned long port)      \
343 {                                                                       \
344         volatile type *__addr;                                          \
345         type __val;                                                     \
346                                                                         \
347         port = __swizzle_addr_##bwlq(port);                             \
348         __addr = (void *)(mips_io_port_base + port);                    \
349                                                                         \
350         __val = pfx##ioswab##bwlq(val);                                 \
351                                                                         \
352         if (sizeof(type) != sizeof(u64)) {                              \
353                 *__addr = __val;                                        \
354                 slow;                                                   \
355         } else                                                          \
356                 BUILD_BUG();                                            \
357 }                                                                       \
358                                                                         \
359 static inline type pfx##in##bwlq##p(unsigned long port)                 \
360 {                                                                       \
361         volatile type *__addr;                                          \
362         type __val;                                                     \
363                                                                         \
364         port = __swizzle_addr_##bwlq(port);                             \
365         __addr = (void *)(mips_io_port_base + port);                    \
366                                                                         \
367         if (sizeof(type) != sizeof(u64)) {                              \
368                 __val = *__addr;                                        \
369                 slow;                                                   \
370         } else {                                                        \
371                 __val = 0;                                              \
372                 BUILD_BUG();                                            \
373         }                                                               \
374                                                                         \
375         return pfx##ioswab##bwlq(__val);                                \
376 }
377
378 #define __BUILD_MEMORY_PFX(bus, bwlq, type)                             \
379                                                                         \
380 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
381
382 #define __BUILD_IOPORT_PFX(bus, bwlq, type)                             \
383                                                                         \
384 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)                               \
385 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
386
387 #define BUILDIO(bwlq, type)                                             \
388                                                                         \
389 __BUILD_MEMORY_PFX(, bwlq, type)                                        \
390 __BUILD_MEMORY_PFX(__raw_, bwlq, type)                                  \
391 __BUILD_MEMORY_PFX(bus_, bwlq, type)                                    \
392 __BUILD_IOPORT_PFX(, bwlq, type)                                        \
393 __BUILD_IOPORT_PFX(__raw_, bwlq, type)
394
395 #define __BUILDIO(bwlq, type)                                           \
396                                                                         \
397 __BUILD_MEMORY_SINGLE(__bus_, bwlq, type, 0)
398
399 BUILDIO(b, u8)
400 BUILDIO(w, u16)
401 BUILDIO(l, u32)
402 BUILDIO(q, u64)
403
404 __BUILDIO(q, u64)
405
406 #define readb_relaxed                   readb
407 #define readw_relaxed                   readw
408 #define readl_relaxed                   readl
409 #define readq_relaxed                   readq
410
411 /*
412  * Some code tests for these symbols
413  */
414 #define readq                           readq
415 #define writeq                          writeq
416
417 #define __BUILD_MEMORY_STRING(bwlq, type)                               \
418                                                                         \
419 static inline void writes##bwlq(volatile void __iomem *mem, void *addr, \
420                                 unsigned int count)                     \
421 {                                                                       \
422         volatile type *__addr = addr;                                   \
423                                                                         \
424         while (count--) {                                               \
425                 __raw_write##bwlq(*__addr, mem);                        \
426                 __addr++;                                               \
427         }                                                               \
428 }                                                                       \
429                                                                         \
430 static inline void reads##bwlq(volatile void __iomem *mem, void *addr,  \
431                                unsigned int count)                      \
432 {                                                                       \
433         volatile type *__addr = addr;                                   \
434                                                                         \
435         while (count--) {                                               \
436                 *__addr = __raw_read##bwlq(mem);                        \
437                 __addr++;                                               \
438         }                                                               \
439 }
440
441 #define __BUILD_IOPORT_STRING(bwlq, type)                               \
442                                                                         \
443 static inline void outs##bwlq(unsigned long port, void *addr,           \
444                               unsigned int count)                       \
445 {                                                                       \
446         volatile type *__addr = addr;                                   \
447                                                                         \
448         while (count--) {                                               \
449                 __raw_out##bwlq(*__addr, port);                         \
450                 __addr++;                                               \
451         }                                                               \
452 }                                                                       \
453                                                                         \
454 static inline void ins##bwlq(unsigned long port, void *addr,            \
455                              unsigned int count)                        \
456 {                                                                       \
457         volatile type *__addr = addr;                                   \
458                                                                         \
459         while (count--) {                                               \
460                 *__addr = __raw_in##bwlq(port);                         \
461                 __addr++;                                               \
462         }                                                               \
463 }
464
465 #define BUILDSTRING(bwlq, type)                                         \
466                                                                         \
467 __BUILD_MEMORY_STRING(bwlq, type)                                       \
468 __BUILD_IOPORT_STRING(bwlq, type)
469
470 BUILDSTRING(b, u8)
471 BUILDSTRING(w, u16)
472 BUILDSTRING(l, u32)
473 BUILDSTRING(q, u64)
474
475
476 /* Depends on MIPS II instruction set */
477 #define mmiowb() asm volatile ("sync" ::: "memory")
478
479 #define memset_io(a,b,c)        memset((void *)(a),(b),(c))
480 #define memcpy_fromio(a,b,c)    memcpy((a),(void *)(b),(c))
481 #define memcpy_toio(a,b,c)      memcpy((void *)(a),(b),(c))
482
483 /*
484  * Memory Mapped I/O
485  */
486 #define ioread8(addr)           readb(addr)
487 #define ioread16(addr)          readw(addr)
488 #define ioread32(addr)          readl(addr)
489
490 #define iowrite8(b,addr)        writeb(b,addr)
491 #define iowrite16(w,addr)       writew(w,addr)
492 #define iowrite32(l,addr)       writel(l,addr)
493
494 #define ioread8_rep(a,b,c)      readsb(a,b,c)
495 #define ioread16_rep(a,b,c)     readsw(a,b,c)
496 #define ioread32_rep(a,b,c)     readsl(a,b,c)
497
498 #define iowrite8_rep(a,b,c)     writesb(a,b,c)
499 #define iowrite16_rep(a,b,c)    writesw(a,b,c)
500 #define iowrite32_rep(a,b,c)    writesl(a,b,c)
501
502 /* Create a virtual mapping cookie for an IO port range */
503 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
504 extern void ioport_unmap(void __iomem *);
505
506 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
507 struct pci_dev;
508 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
509 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
510
511 /*
512  * ISA space is 'always mapped' on currently supported MIPS systems, no need
513  * to explicitly ioremap() it. The fact that the ISA IO space is mapped
514  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
515  * are physical addresses. The following constant pointer can be
516  * used as the IO-area pointer (it can be iounmapped as well, so the
517  * analogy with PCI is quite large):
518  */
519 #define __ISA_IO_base ((char *)(isa_slot_offset))
520
521 #define isa_readb(a)            readb(__ISA_IO_base + (a))
522 #define isa_readw(a)            readw(__ISA_IO_base + (a))
523 #define isa_readl(a)            readl(__ISA_IO_base + (a))
524 #define isa_readq(a)            readq(__ISA_IO_base + (a))
525 #define isa_writeb(b,a)         writeb(b,__ISA_IO_base + (a))
526 #define isa_writew(w,a)         writew(w,__ISA_IO_base + (a))
527 #define isa_writel(l,a)         writel(l,__ISA_IO_base + (a))
528 #define isa_writeq(q,a)         writeq(q,__ISA_IO_base + (a))
529 #define isa_memset_io(a,b,c)    memset_io(__ISA_IO_base + (a),(b),(c))
530 #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
531 #define isa_memcpy_toio(a,b,c)  memcpy_toio(__ISA_IO_base + (a),(b),(c))
532
533 /*
534  * We don't have csum_partial_copy_fromio() yet, so we cheat here and
535  * just copy it. The net code will then do the checksum later.
536  */
537 #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
538 #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
539
540 /*
541  *     check_signature         -       find BIOS signatures
542  *     @io_addr: mmio address to check
543  *     @signature:  signature block
544  *     @length: length of signature
545  *
546  *     Perform a signature comparison with the mmio address io_addr. This
547  *     address should have been obtained by ioremap.
548  *     Returns 1 on a match.
549  */
550 static inline int check_signature(char __iomem *io_addr,
551         const unsigned char *signature, int length)
552 {
553         int retval = 0;
554         do {
555                 if (readb(io_addr) != *signature)
556                         goto out;
557                 io_addr++;
558                 signature++;
559                 length--;
560         } while (length);
561         retval = 1;
562 out:
563         return retval;
564 }
565
566 /*
567  * The caches on some architectures aren't dma-coherent and have need to
568  * handle this in software.  There are three types of operations that
569  * can be applied to dma buffers.
570  *
571  *  - dma_cache_wback_inv(start, size) makes caches and coherent by
572  *    writing the content of the caches back to memory, if necessary.
573  *    The function also invalidates the affected part of the caches as
574  *    necessary before DMA transfers from outside to memory.
575  *  - dma_cache_wback(start, size) makes caches and coherent by
576  *    writing the content of the caches back to memory, if necessary.
577  *    The function also invalidates the affected part of the caches as
578  *    necessary before DMA transfers from outside to memory.
579  *  - dma_cache_inv(start, size) invalidates the affected parts of the
580  *    caches.  Dirty lines of the caches may be written back or simply
581  *    be discarded.  This operation is necessary before dma operations
582  *    to the memory.
583  */
584 #ifdef CONFIG_DMA_NONCOHERENT
585
586 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
587 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
588 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
589
590 #define dma_cache_wback_inv(start, size)        _dma_cache_wback_inv(start,size)
591 #define dma_cache_wback(start, size)            _dma_cache_wback(start,size)
592 #define dma_cache_inv(start, size)              _dma_cache_inv(start,size)
593
594 #else /* Sane hardware */
595
596 #define dma_cache_wback_inv(start,size) \
597         do { (void) (start); (void) (size); } while (0)
598 #define dma_cache_wback(start,size)     \
599         do { (void) (start); (void) (size); } while (0)
600 #define dma_cache_inv(start,size)       \
601         do { (void) (start); (void) (size); } while (0)
602
603 #endif /* CONFIG_DMA_NONCOHERENT */
604
605 /*
606  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
607  * Avoid interrupt mucking, just adjust the address for 4-byte access.
608  * Assume the addresses are 8-byte aligned.
609  */
610 #ifdef __MIPSEB__
611 #define __CSR_32_ADJUST 4
612 #else
613 #define __CSR_32_ADJUST 0
614 #endif
615
616 #define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
617 #define csr_in32(a)    (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
618
619 /*
620  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
621  * access
622  */
623 #define xlate_dev_mem_ptr(p)    __va(p)
624
625 /*
626  * Convert a virtual cached pointer to an uncached pointer
627  */
628 #define xlate_dev_kmem_ptr(p)   p
629
630 #endif /* _ASM_IO_H */