Merge branches 'x86/xen', 'x86/build', 'x86/microcode', 'x86/mm-debug-v2', 'x86/memor...
[linux-2.6] / arch / sh / mm / consistent.c
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/platform_device.h>
14 #include <linux/dma-mapping.h>
15 #include <asm/cacheflush.h>
16 #include <asm/addrspace.h>
17 #include <asm/io.h>
18
19 struct dma_coherent_mem {
20         void            *virt_base;
21         u32             device_base;
22         int             size;
23         int             flags;
24         unsigned long   *bitmap;
25 };
26
27 void *dma_alloc_coherent(struct device *dev, size_t size,
28                            dma_addr_t *dma_handle, gfp_t gfp)
29 {
30         void *ret, *ret_nocache;
31         int order = get_order(size);
32
33         if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
34                 return ret;
35
36         ret = (void *)__get_free_pages(gfp, order);
37         if (!ret)
38                 return NULL;
39
40         memset(ret, 0, size);
41         /*
42          * Pages from the page allocator may have data present in
43          * cache. So flush the cache before using uncached memory.
44          */
45         dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
46
47         ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
48         if (!ret_nocache) {
49                 free_pages((unsigned long)ret, order);
50                 return NULL;
51         }
52
53         *dma_handle = virt_to_phys(ret);
54         return ret_nocache;
55 }
56 EXPORT_SYMBOL(dma_alloc_coherent);
57
58 void dma_free_coherent(struct device *dev, size_t size,
59                          void *vaddr, dma_addr_t dma_handle)
60 {
61         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
62         int order = get_order(size);
63
64         if (!dma_release_from_coherent(dev, order, vaddr)) {
65                 WARN_ON(irqs_disabled());       /* for portability */
66                 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
67                 free_pages((unsigned long)phys_to_virt(dma_handle), order);
68                 iounmap(vaddr);
69         }
70 }
71 EXPORT_SYMBOL(dma_free_coherent);
72
73 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
74                     enum dma_data_direction direction)
75 {
76 #ifdef CONFIG_CPU_SH5
77         void *p1addr = vaddr;
78 #else
79         void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
80 #endif
81
82         switch (direction) {
83         case DMA_FROM_DEVICE:           /* invalidate only */
84                 __flush_invalidate_region(p1addr, size);
85                 break;
86         case DMA_TO_DEVICE:             /* writeback only */
87                 __flush_wback_region(p1addr, size);
88                 break;
89         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
90                 __flush_purge_region(p1addr, size);
91                 break;
92         default:
93                 BUG();
94         }
95 }
96 EXPORT_SYMBOL(dma_cache_sync);
97
98 static int __init memchunk_setup(char *str)
99 {
100         return 1; /* accept anything that begins with "memchunk." */
101 }
102 __setup("memchunk.", memchunk_setup);
103
104 static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
105 {
106         char *p = boot_command_line;
107         int k = strlen(name);
108
109         while ((p = strstr(p, "memchunk."))) {
110                 p += 9; /* strlen("memchunk.") */
111                 if (!strncmp(name, p, k) && p[k] == '=') {
112                         p += k + 1;
113                         *sizep = memparse(p, NULL);
114                         pr_info("%s: forcing memory chunk size to 0x%08lx\n",
115                                 name, *sizep);
116                         break;
117                 }
118         }
119 }
120
121 int __init platform_resource_setup_memory(struct platform_device *pdev,
122                                           char *name, unsigned long memsize)
123 {
124         struct resource *r;
125         dma_addr_t dma_handle;
126         void *buf;
127
128         r = pdev->resource + pdev->num_resources - 1;
129         if (r->flags) {
130                 pr_warning("%s: unable to find empty space for resource\n",
131                         name);
132                 return -EINVAL;
133         }
134
135         memchunk_cmdline_override(name, &memsize);
136         if (!memsize)
137                 return 0;
138
139         buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
140         if (!buf) {
141                 pr_warning("%s: unable to allocate memory\n", name);
142                 return -ENOMEM;
143         }
144
145         memset(buf, 0, memsize);
146
147         r->flags = IORESOURCE_MEM;
148         r->start = dma_handle;
149         r->end = r->start + memsize - 1;
150         r->name = name;
151         return 0;
152 }