2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/time.h>
26 #include <linux/mutex.h>
28 #include <sound/core.h>
29 #include <sound/emu10k1.h>
31 /* page arguments of these two macros are Emu page (4096 bytes), not like
32 * aligned pages in others
34 #define __set_ptb_entry(emu,page,addr) \
35 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
37 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
38 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
39 /* get aligned page from offset address */
40 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
41 /* get offset address from aligned page */
42 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
45 /* page size == EMUPAGESIZE */
46 /* fill PTB entrie(s) corresponding to page with addr */
47 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
48 /* fill PTB entrie(s) corresponding to page with silence pointer */
49 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
51 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
52 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
56 for (i = 0; i < UNIT_PAGES; i++, page++) {
57 __set_ptb_entry(emu, page, addr);
61 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
65 for (i = 0; i < UNIT_PAGES; i++, page++)
66 /* do not increment ptr */
67 __set_ptb_entry(emu, page, emu->silent_page.addr);
69 #endif /* PAGE_SIZE */
74 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
75 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
80 /* initialize emu10k1 part */
81 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
83 blk->mapped_page = -1;
84 INIT_LIST_HEAD(&blk->mapped_link);
85 INIT_LIST_HEAD(&blk->mapped_order_link);
88 blk->first_page = get_aligned_page(blk->mem.offset);
89 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
90 blk->pages = blk->last_page - blk->first_page + 1;
94 * search empty region on PTB with the given size
96 * if an empty region is found, return the page and store the next mapped block
98 * if not found, return a negative error code.
100 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
102 int page = 0, found_page = -ENOMEM;
103 int max_size = npages;
105 struct list_head *candidate = &emu->mapped_link_head;
106 struct list_head *pos;
108 list_for_each (pos, &emu->mapped_link_head) {
109 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
110 if (blk->mapped_page < 0)
112 size = blk->mapped_page - page;
113 if (size == npages) {
117 else if (size > max_size) {
118 /* we look for the maximum empty hole */
123 page = blk->mapped_page + blk->pages;
125 size = MAX_ALIGN_PAGES - page;
126 if (size >= max_size) {
135 * map a memory block onto emu10k1's PTB
137 * call with memblk_lock held
139 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
142 struct list_head *next;
144 page = search_empty_map_area(emu, blk->pages, &next);
145 if (page < 0) /* not found */
147 /* insert this block in the proper position of mapped list */
148 list_add_tail(&blk->mapped_link, next);
149 /* append this as a newest block in order list */
150 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
151 blk->mapped_page = page;
153 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
154 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
162 * return the size of resultant empty pages
164 * call with memblk_lock held
166 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
168 int start_page, end_page, mpage, pg;
170 struct snd_emu10k1_memblk *q;
172 /* calculate the expected size of empty region */
173 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
174 q = get_emu10k1_memblk(p, mapped_link);
175 start_page = q->mapped_page + q->pages;
178 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
179 q = get_emu10k1_memblk(p, mapped_link);
180 end_page = q->mapped_page;
182 end_page = MAX_ALIGN_PAGES;
185 list_del(&blk->mapped_link);
186 list_del(&blk->mapped_order_link);
188 mpage = blk->mapped_page;
189 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
190 set_silent_ptb(emu, mpage);
193 blk->mapped_page = -1;
194 return end_page - start_page; /* return the new empty size */
198 * search empty pages with the given size, and create a memory block
200 * unlike synth_alloc the memory block is aligned to the page start
202 static struct snd_emu10k1_memblk *
203 search_empty(struct snd_emu10k1 *emu, int size)
206 struct snd_emu10k1_memblk *blk;
209 psize = get_aligned_page(size + PAGE_SIZE -1);
211 list_for_each(p, &emu->memhdr->block) {
212 blk = get_emu10k1_memblk(p, mem.list);
213 if (page + psize <= blk->first_page)
215 page = blk->last_page + 1;
217 if (page + psize > emu->max_cache_pages)
221 /* create a new memory block */
222 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
225 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
226 emu10k1_memblk_init(blk);
232 * check if the given pointer is valid for pages
234 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
236 if (addr & ~emu->dma_mask) {
237 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
240 if (addr & (EMUPAGESIZE-1)) {
241 snd_printk(KERN_ERR "page is not aligned\n");
248 * map the given memory block on PTB.
249 * if the block is already mapped, update the link order.
250 * if no empty pages are found, tries to release unsed memory blocks
251 * and retry the mapping.
253 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
257 struct list_head *p, *nextp;
258 struct snd_emu10k1_memblk *deleted;
261 spin_lock_irqsave(&emu->memblk_lock, flags);
262 if (blk->mapped_page >= 0) {
263 /* update order link */
264 list_del(&blk->mapped_order_link);
265 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
266 spin_unlock_irqrestore(&emu->memblk_lock, flags);
269 if ((err = map_memblk(emu, blk)) < 0) {
270 /* no enough page - try to unmap some blocks */
271 /* starting from the oldest block */
272 p = emu->mapped_order_link_head.next;
273 for (; p != &emu->mapped_order_link_head; p = nextp) {
275 deleted = get_emu10k1_memblk(p, mapped_order_link);
276 if (deleted->map_locked)
278 size = unmap_memblk(emu, deleted);
279 if (size >= blk->pages) {
280 /* ok the empty region is enough large */
281 err = map_memblk(emu, blk);
286 spin_unlock_irqrestore(&emu->memblk_lock, flags);
290 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
293 * page allocation for DMA
295 struct snd_util_memblk *
296 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
298 struct snd_pcm_runtime *runtime = substream->runtime;
299 struct snd_util_memhdr *hdr;
300 struct snd_emu10k1_memblk *blk;
303 if (snd_BUG_ON(!emu))
305 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
306 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
309 if (snd_BUG_ON(!hdr))
312 mutex_lock(&hdr->block_mutex);
313 blk = search_empty(emu, runtime->dma_bytes);
315 mutex_unlock(&hdr->block_mutex);
318 /* fill buffer addresses but pointers are not stored so that
319 * snd_free_pci_page() is not called in in synth_free()
322 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
323 unsigned long ofs = idx << PAGE_SHIFT;
325 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
326 if (! is_valid_page(emu, addr)) {
327 printk(KERN_ERR "emu: failure page = %d\n", idx);
328 mutex_unlock(&hdr->block_mutex);
331 emu->page_addr_table[page] = addr;
332 emu->page_ptr_table[page] = NULL;
335 /* set PTB entries */
336 blk->map_locked = 1; /* do not unmap this block! */
337 err = snd_emu10k1_memblk_map(emu, blk);
339 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
340 mutex_unlock(&hdr->block_mutex);
343 mutex_unlock(&hdr->block_mutex);
344 return (struct snd_util_memblk *)blk;
349 * release DMA buffer from page table
351 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
353 if (snd_BUG_ON(!emu || !blk))
355 return snd_emu10k1_synth_free(emu, blk);
360 * memory allocation using multiple pages (for synth)
361 * Unlike the DMA allocation above, non-contiguous pages are assined.
365 * allocate a synth sample area
367 struct snd_util_memblk *
368 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
370 struct snd_emu10k1_memblk *blk;
371 struct snd_util_memhdr *hdr = hw->memhdr;
373 mutex_lock(&hdr->block_mutex);
374 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
376 mutex_unlock(&hdr->block_mutex);
379 if (synth_alloc_pages(hw, blk)) {
380 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
381 mutex_unlock(&hdr->block_mutex);
384 snd_emu10k1_memblk_map(hw, blk);
385 mutex_unlock(&hdr->block_mutex);
386 return (struct snd_util_memblk *)blk;
389 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
392 * free a synth sample area
395 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
397 struct snd_util_memhdr *hdr = emu->memhdr;
398 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
401 mutex_lock(&hdr->block_mutex);
402 spin_lock_irqsave(&emu->memblk_lock, flags);
403 if (blk->mapped_page >= 0)
404 unmap_memblk(emu, blk);
405 spin_unlock_irqrestore(&emu->memblk_lock, flags);
406 synth_free_pages(emu, blk);
407 __snd_util_mem_free(hdr, memblk);
408 mutex_unlock(&hdr->block_mutex);
412 EXPORT_SYMBOL(snd_emu10k1_synth_free);
414 /* check new allocation range */
415 static void get_single_page_range(struct snd_util_memhdr *hdr,
416 struct snd_emu10k1_memblk *blk,
417 int *first_page_ret, int *last_page_ret)
420 struct snd_emu10k1_memblk *q;
421 int first_page, last_page;
422 first_page = blk->first_page;
423 if ((p = blk->mem.list.prev) != &hdr->block) {
424 q = get_emu10k1_memblk(p, mem.list);
425 if (q->last_page == first_page)
426 first_page++; /* first page was already allocated */
428 last_page = blk->last_page;
429 if ((p = blk->mem.list.next) != &hdr->block) {
430 q = get_emu10k1_memblk(p, mem.list);
431 if (q->first_page == last_page)
432 last_page--; /* last page was already allocated */
434 *first_page_ret = first_page;
435 *last_page_ret = last_page;
438 /* release allocated pages */
439 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
444 for (page = first_page; page <= last_page; page++) {
445 free_page((unsigned long)emu->page_ptr_table[page]);
446 emu->page_addr_table[page] = 0;
447 emu->page_ptr_table[page] = NULL;
452 * allocate kernel pages
454 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
456 int page, first_page, last_page;
458 emu10k1_memblk_init(blk);
459 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
460 /* allocate kernel pages */
461 for (page = first_page; page <= last_page; page++) {
462 /* first try to allocate from <4GB zone */
463 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
465 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
468 /* try to allocate from <16MB zone */
469 p = alloc_page(GFP_ATOMIC | GFP_DMA |
470 __GFP_NORETRY | /* no OOM-killer */
474 __synth_free_pages(emu, first_page, page - 1);
477 emu->page_addr_table[page] = page_to_phys(p);
478 emu->page_ptr_table[page] = page_address(p);
486 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
488 int first_page, last_page;
490 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
491 __synth_free_pages(emu, first_page, last_page);
495 /* calculate buffer pointer from offset address */
496 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
499 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
501 ptr = emu->page_ptr_table[page];
503 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
506 ptr += offset & (PAGE_SIZE - 1);
511 * bzero(blk + offset, size)
513 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
514 int offset, int size)
516 int page, nextofs, end_offset, temp, temp1;
518 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
520 offset += blk->offset & (PAGE_SIZE - 1);
521 end_offset = offset + size;
522 page = get_aligned_page(offset);
524 nextofs = aligned_page_offset(page + 1);
525 temp = nextofs - offset;
526 temp1 = end_offset - offset;
529 ptr = offset_ptr(emu, page + p->first_page, offset);
531 memset(ptr, 0, temp);
534 } while (offset < end_offset);
538 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
541 * copy_from_user(blk + offset, data, size)
543 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
544 int offset, const char __user *data, int size)
546 int page, nextofs, end_offset, temp, temp1;
548 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
550 offset += blk->offset & (PAGE_SIZE - 1);
551 end_offset = offset + size;
552 page = get_aligned_page(offset);
554 nextofs = aligned_page_offset(page + 1);
555 temp = nextofs - offset;
556 temp1 = end_offset - offset;
559 ptr = offset_ptr(emu, page + p->first_page, offset);
560 if (ptr && copy_from_user(ptr, data, temp))
565 } while (offset < end_offset);
569 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);