2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/highmem.h>
31 #include <linux/dma-mapping.h>
33 #include <asm/tlbflush.h>
38 * This address range defaults to a value that is safe for all
39 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
40 * can be further configured for specific applications under
41 * the "Advanced Setup" menu. -Matt
43 #define CONSISTENT_BASE (IOREMAP_TOP)
44 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
45 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
48 * This is the page table (2MB) covering uncached, DMA consistent allocations
50 static DEFINE_SPINLOCK(consistent_lock);
53 * VM region handling support.
55 * This should become something generic, handling VM region allocations for
56 * vmalloc and similar (ioremap, module space, etc).
58 * I envisage vmalloc()'s supporting vm_struct becoming:
61 * struct vm_region region;
62 * unsigned long flags;
63 * struct page **pages;
64 * unsigned int nr_pages;
65 * unsigned long phys_addr;
68 * get_vm_area() would then call vm_region_alloc with an appropriate
69 * struct vm_region head (eg):
71 * struct vm_region vmalloc_head = {
72 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
73 * .vm_start = VMALLOC_START,
74 * .vm_end = VMALLOC_END,
77 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
78 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
79 * would have to initialise this each time prior to calling vm_region_alloc().
81 struct ppc_vm_region {
82 struct list_head vm_list;
83 unsigned long vm_start;
87 static struct ppc_vm_region consistent_head = {
88 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
89 .vm_start = CONSISTENT_BASE,
90 .vm_end = CONSISTENT_END,
93 static struct ppc_vm_region *
94 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
96 unsigned long addr = head->vm_start, end = head->vm_end - size;
98 struct ppc_vm_region *c, *new;
100 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
104 spin_lock_irqsave(&consistent_lock, flags);
106 list_for_each_entry(c, &head->vm_list, vm_list) {
107 if ((addr + size) < addr)
109 if ((addr + size) <= c->vm_start)
118 * Insert this entry _before_ the one we found.
120 list_add_tail(&new->vm_list, &c->vm_list);
121 new->vm_start = addr;
122 new->vm_end = addr + size;
124 spin_unlock_irqrestore(&consistent_lock, flags);
128 spin_unlock_irqrestore(&consistent_lock, flags);
134 static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
136 struct ppc_vm_region *c;
138 list_for_each_entry(c, &head->vm_list, vm_list) {
139 if (c->vm_start == addr)
148 * Allocate DMA-coherent memory space and return both the kernel remapped
149 * virtual and bus address for that space.
152 __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
155 struct ppc_vm_region *c;
157 u64 mask = ISA_DMA_THRESHOLD, limit;
160 mask = dev->coherent_dma_mask;
163 * Sanity check the DMA mask - it must be non-zero, and
164 * must be able to be satisfied by a DMA allocation.
167 dev_warn(dev, "coherent DMA mask is unset\n");
171 if ((~mask) & ISA_DMA_THRESHOLD) {
172 dev_warn(dev, "coherent DMA mask %#llx is smaller "
173 "than system GFP_DMA mask %#llx\n",
174 mask, (unsigned long long)ISA_DMA_THRESHOLD);
180 size = PAGE_ALIGN(size);
181 limit = (mask + 1) & ~mask;
182 if ((limit && size >= limit) ||
183 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
184 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
189 order = get_order(size);
191 /* Might be useful if we ever have a real legacy DMA zone... */
192 if (mask != 0xffffffff)
195 page = alloc_pages(gfp, order);
200 * Invalidate any data that might be lurking in the
201 * kernel direct-mapped region for device DMA.
204 unsigned long kaddr = (unsigned long)page_address(page);
205 memset(page_address(page), 0, size);
206 flush_dcache_range(kaddr, kaddr + size);
210 * Allocate a virtual address in the consistent mapping region.
212 c = ppc_vm_region_alloc(&consistent_head, size,
213 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
215 unsigned long vaddr = c->vm_start;
216 struct page *end = page + (1 << order);
218 split_page(page, order);
221 * Set the "dma handle"
223 *handle = page_to_phys(page);
226 SetPageReserved(page);
227 map_page(vaddr, page_to_phys(page),
228 pgprot_noncached(PAGE_KERNEL));
231 } while (size -= PAGE_SIZE);
234 * Free the otherwise unused pages.
241 return (void *)c->vm_start;
245 __free_pages(page, order);
249 EXPORT_SYMBOL(__dma_alloc_coherent);
252 * free a page as defined by the above mapping.
254 void __dma_free_coherent(size_t size, void *vaddr)
256 struct ppc_vm_region *c;
257 unsigned long flags, addr;
259 size = PAGE_ALIGN(size);
261 spin_lock_irqsave(&consistent_lock, flags);
263 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
267 if ((c->vm_end - c->vm_start) != size) {
268 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
269 __func__, c->vm_end - c->vm_start, size);
271 size = c->vm_end - c->vm_start;
279 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
283 if (!pte_none(*ptep) && pte_present(*ptep)) {
284 pfn = pte_pfn(*ptep);
285 pte_clear(&init_mm, addr, ptep);
286 if (pfn_valid(pfn)) {
287 struct page *page = pfn_to_page(pfn);
289 ClearPageReserved(page);
294 } while (size -= PAGE_SIZE);
296 flush_tlb_kernel_range(c->vm_start, c->vm_end);
298 list_del(&c->vm_list);
300 spin_unlock_irqrestore(&consistent_lock, flags);
306 spin_unlock_irqrestore(&consistent_lock, flags);
307 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
311 EXPORT_SYMBOL(__dma_free_coherent);
314 * make an area consistent.
316 void __dma_sync(void *vaddr, size_t size, int direction)
318 unsigned long start = (unsigned long)vaddr;
319 unsigned long end = start + size;
324 case DMA_FROM_DEVICE:
326 * invalidate only when cache-line aligned otherwise there is
327 * the potential for discarding uncommitted data from the cache
329 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
330 flush_dcache_range(start, end);
332 invalidate_dcache_range(start, end);
334 case DMA_TO_DEVICE: /* writeback only */
335 clean_dcache_range(start, end);
337 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
338 flush_dcache_range(start, end);
342 EXPORT_SYMBOL(__dma_sync);
344 #ifdef CONFIG_HIGHMEM
346 * __dma_sync_page() implementation for systems using highmem.
347 * In this case, each page of a buffer must be kmapped/kunmapped
348 * in order to have a virtual address for __dma_sync(). This must
349 * not sleep so kmap_atomic()/kunmap_atomic() are used.
351 * Note: yes, it is possible and correct to have a buffer extend
352 * beyond the first page.
354 static inline void __dma_sync_page_highmem(struct page *page,
355 unsigned long offset, size_t size, int direction)
357 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
358 size_t cur_size = seg_size;
359 unsigned long flags, start, seg_offset = offset;
360 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
363 local_irq_save(flags);
366 start = (unsigned long)kmap_atomic(page + seg_nr,
367 KM_PPC_SYNC_PAGE) + seg_offset;
369 /* Sync this buffer segment */
370 __dma_sync((void *)start, seg_size, direction);
371 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
374 /* Calculate next buffer segment size */
375 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
377 /* Add the segment size to our running total */
378 cur_size += seg_size;
380 } while (seg_nr < nr_segs);
382 local_irq_restore(flags);
384 #endif /* CONFIG_HIGHMEM */
387 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
388 * takes a struct page instead of a virtual address
390 void __dma_sync_page(struct page *page, unsigned long offset,
391 size_t size, int direction)
393 #ifdef CONFIG_HIGHMEM
394 __dma_sync_page_highmem(page, offset, size, direction);
396 unsigned long start = (unsigned long)page_address(page) + offset;
397 __dma_sync((void *)start, size, direction);
400 EXPORT_SYMBOL(__dma_sync_page);