2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au>
6 * Trident 4DWave-NX memory page allocation (TLB area)
7 * Trident chip can handle only 16MByte of the memory at the same time.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <sound/driver.h>
28 #include <linux/pci.h>
29 #include <linux/time.h>
30 #include <linux/mutex.h>
32 #include <sound/core.h>
33 #include <sound/trident.h>
35 /* page arguments of these two macros are Trident page (4096 bytes), not like
36 * aligned pages in others
38 #define __set_tlb_bus(trident,page,ptr,addr) \
39 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
40 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
41 #define __tlb_to_ptr(trident,page) \
42 (void*)((trident)->tlb.shadow_entries[page])
43 #define __tlb_to_addr(trident,page) \
44 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
47 /* page size == SNDRV_TRIDENT_PAGE_SIZE */
48 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
49 #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */
50 /* fill TLB entrie(s) corresponding to page with ptr */
51 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
52 /* fill TLB entrie(s) corresponding to page with silence pointer */
53 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
54 /* get aligned page from offset address */
55 #define get_aligned_page(offset) ((offset) >> 12)
56 /* get offset address from aligned page */
57 #define aligned_page_offset(page) ((page) << 12)
58 /* get buffer address from aligned page */
59 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page)
60 /* get PCI physical address from aligned page */
61 #define page_to_addr(trident,page) __tlb_to_addr(trident, page)
63 #elif PAGE_SIZE == 8192
64 /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/
65 #define ALIGN_PAGE_SIZE PAGE_SIZE
66 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2)
67 #define get_aligned_page(offset) ((offset) >> 13)
68 #define aligned_page_offset(page) ((page) << 13)
69 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1)
70 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1)
72 /* fill TLB entries -- we need to fill two entries */
73 static inline void set_tlb_bus(struct snd_trident *trident, int page,
74 unsigned long ptr, dma_addr_t addr)
77 __set_tlb_bus(trident, page, ptr, addr);
78 __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
80 static inline void set_silent_tlb(struct snd_trident *trident, int page)
83 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
84 __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
89 #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE)
90 #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES)
91 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES)
92 /* Note: if alignment doesn't match to the maximum size, the last few blocks
93 * become unusable. To use such blocks, you'll need to check the validity
94 * of accessing page in set_tlb_bus and set_silent_tlb. search_empty()
95 * should also check it, too.
97 #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE)
98 #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE)
99 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES)
100 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES)
102 /* fill TLB entries -- UNIT_PAGES entries must be filled */
103 static inline void set_tlb_bus(struct snd_trident *trident, int page,
104 unsigned long ptr, dma_addr_t addr)
108 for (i = 0; i < UNIT_PAGES; i++, page++) {
109 __set_tlb_bus(trident, page, ptr, addr);
110 ptr += SNDRV_TRIDENT_PAGE_SIZE;
111 addr += SNDRV_TRIDENT_PAGE_SIZE;
114 static inline void set_silent_tlb(struct snd_trident *trident, int page)
118 for (i = 0; i < UNIT_PAGES; i++, page++)
119 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
122 #endif /* PAGE_SIZE */
124 /* calculate buffer pointer from offset address */
125 static inline void *offset_ptr(struct snd_trident *trident, int offset)
128 ptr = page_to_ptr(trident, get_aligned_page(offset));
129 ptr += offset % ALIGN_PAGE_SIZE;
133 /* first and last (aligned) pages of memory block */
134 #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page)
135 #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page)
138 * search empty pages which may contain given size
140 static struct snd_util_memblk *
141 search_empty(struct snd_util_memhdr *hdr, int size)
143 struct snd_util_memblk *blk, *prev;
147 psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1);
150 list_for_each(p, &hdr->block) {
151 blk = list_entry(p, struct snd_util_memblk, list);
152 if (page + psize <= firstpg(blk))
154 page = lastpg(blk) + 1;
156 if (page + psize > MAX_ALIGN_PAGES)
160 /* create a new memory block */
161 blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev);
164 blk->offset = aligned_page_offset(page); /* set aligned offset */
166 lastpg(blk) = page + psize - 1;
172 * check if the given pointer is valid for pages
174 static int is_valid_page(unsigned long ptr)
176 if (ptr & ~0x3fffffffUL) {
177 snd_printk(KERN_ERR "max memory size is 1GB!!\n");
180 if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) {
181 snd_printk(KERN_ERR "page is not aligned\n");
188 * page allocation for DMA (Scatter-Gather version)
190 static struct snd_util_memblk *
191 snd_trident_alloc_sg_pages(struct snd_trident *trident,
192 struct snd_pcm_substream *substream)
194 struct snd_util_memhdr *hdr;
195 struct snd_util_memblk *blk;
196 struct snd_pcm_runtime *runtime = substream->runtime;
198 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
200 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL);
201 hdr = trident->tlb.memhdr;
202 snd_assert(hdr != NULL, return NULL);
206 mutex_lock(&hdr->block_mutex);
207 blk = search_empty(hdr, runtime->dma_bytes);
209 mutex_unlock(&hdr->block_mutex);
212 if (lastpg(blk) - firstpg(blk) >= sgbuf->pages) {
213 snd_printk(KERN_ERR "page calculation doesn't match: allocated pages = %d, trident = %d/%d\n", sgbuf->pages, firstpg(blk), lastpg(blk));
214 __snd_util_mem_free(hdr, blk);
215 mutex_unlock(&hdr->block_mutex);
219 /* set TLB entries */
221 for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
222 dma_addr_t addr = sgbuf->table[idx].addr;
223 unsigned long ptr = (unsigned long)sgbuf->table[idx].buf;
224 if (! is_valid_page(addr)) {
225 __snd_util_mem_free(hdr, blk);
226 mutex_unlock(&hdr->block_mutex);
229 set_tlb_bus(trident, page, ptr, addr);
231 mutex_unlock(&hdr->block_mutex);
236 * page allocation for DMA (contiguous version)
238 static struct snd_util_memblk *
239 snd_trident_alloc_cont_pages(struct snd_trident *trident,
240 struct snd_pcm_substream *substream)
242 struct snd_util_memhdr *hdr;
243 struct snd_util_memblk *blk;
245 struct snd_pcm_runtime *runtime = substream->runtime;
249 snd_assert(runtime->dma_bytes> 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL);
250 hdr = trident->tlb.memhdr;
251 snd_assert(hdr != NULL, return NULL);
253 mutex_lock(&hdr->block_mutex);
254 blk = search_empty(hdr, runtime->dma_bytes);
256 mutex_unlock(&hdr->block_mutex);
260 /* set TLB entries */
261 addr = runtime->dma_addr;
262 ptr = (unsigned long)runtime->dma_area;
263 for (page = firstpg(blk); page <= lastpg(blk); page++,
264 ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
265 if (! is_valid_page(addr)) {
266 __snd_util_mem_free(hdr, blk);
267 mutex_unlock(&hdr->block_mutex);
270 set_tlb_bus(trident, page, ptr, addr);
272 mutex_unlock(&hdr->block_mutex);
277 * page allocation for DMA
279 struct snd_util_memblk *
280 snd_trident_alloc_pages(struct snd_trident *trident,
281 struct snd_pcm_substream *substream)
283 snd_assert(trident != NULL, return NULL);
284 snd_assert(substream != NULL, return NULL);
285 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG)
286 return snd_trident_alloc_sg_pages(trident, substream);
288 return snd_trident_alloc_cont_pages(trident, substream);
293 * release DMA buffer from page table
295 int snd_trident_free_pages(struct snd_trident *trident,
296 struct snd_util_memblk *blk)
298 struct snd_util_memhdr *hdr;
301 snd_assert(trident != NULL, return -EINVAL);
302 snd_assert(blk != NULL, return -EINVAL);
304 hdr = trident->tlb.memhdr;
305 mutex_lock(&hdr->block_mutex);
306 /* reset TLB entries */
307 for (page = firstpg(blk); page <= lastpg(blk); page++)
308 set_silent_tlb(trident, page);
309 /* free memory block */
310 __snd_util_mem_free(hdr, blk);
311 mutex_unlock(&hdr->block_mutex);
316 /*----------------------------------------------------------------
317 * memory allocation using multiple pages (for synth)
318 *----------------------------------------------------------------
319 * Unlike the DMA allocation above, non-contiguous pages are
321 *----------------------------------------------------------------*/
325 static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk);
326 static int synth_free_pages(struct snd_trident *hw, struct snd_util_memblk *blk);
329 * allocate a synth sample area
331 struct snd_util_memblk *
332 snd_trident_synth_alloc(struct snd_trident *hw, unsigned int size)
334 struct snd_util_memblk *blk;
335 struct snd_util_memhdr *hdr = hw->tlb.memhdr;
337 mutex_lock(&hdr->block_mutex);
338 blk = __snd_util_mem_alloc(hdr, size);
340 mutex_unlock(&hdr->block_mutex);
343 if (synth_alloc_pages(hw, blk)) {
344 __snd_util_mem_free(hdr, blk);
345 mutex_unlock(&hdr->block_mutex);
348 mutex_unlock(&hdr->block_mutex);
352 EXPORT_SYMBOL(snd_trident_synth_alloc);
355 * free a synth sample area
358 snd_trident_synth_free(struct snd_trident *hw, struct snd_util_memblk *blk)
360 struct snd_util_memhdr *hdr = hw->tlb.memhdr;
362 mutex_lock(&hdr->block_mutex);
363 synth_free_pages(hw, blk);
364 __snd_util_mem_free(hdr, blk);
365 mutex_unlock(&hdr->block_mutex);
369 EXPORT_SYMBOL(snd_trident_synth_free);
372 * reset TLB entry and free kernel page
374 static void clear_tlb(struct snd_trident *trident, int page)
376 void *ptr = page_to_ptr(trident, page);
377 dma_addr_t addr = page_to_addr(trident, page);
378 set_silent_tlb(trident, page);
380 struct snd_dma_buffer dmab;
381 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
382 dmab.dev.dev = snd_dma_pci_data(trident->pci);
385 dmab.bytes = ALIGN_PAGE_SIZE;
386 snd_dma_free_pages(&dmab);
390 /* check new allocation range */
391 static void get_single_page_range(struct snd_util_memhdr *hdr,
392 struct snd_util_memblk *blk,
393 int *first_page_ret, int *last_page_ret)
396 struct snd_util_memblk *q;
397 int first_page, last_page;
398 first_page = firstpg(blk);
399 if ((p = blk->list.prev) != &hdr->block) {
400 q = list_entry(p, struct snd_util_memblk, list);
401 if (lastpg(q) == first_page)
402 first_page++; /* first page was already allocated */
404 last_page = lastpg(blk);
405 if ((p = blk->list.next) != &hdr->block) {
406 q = list_entry(p, struct snd_util_memblk, list);
407 if (firstpg(q) == last_page)
408 last_page--; /* last page was already allocated */
410 *first_page_ret = first_page;
411 *last_page_ret = last_page;
415 * allocate kernel pages and assign them to TLB
417 static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk)
419 int page, first_page, last_page;
420 struct snd_dma_buffer dmab;
422 firstpg(blk) = get_aligned_page(blk->offset);
423 lastpg(blk) = get_aligned_page(blk->offset + blk->size - 1);
424 get_single_page_range(hw->tlb.memhdr, blk, &first_page, &last_page);
426 /* allocate a kernel page for each Trident page -
427 * fortunately Trident page size and kernel PAGE_SIZE is identical!
429 for (page = first_page; page <= last_page; page++) {
430 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(hw->pci),
431 ALIGN_PAGE_SIZE, &dmab) < 0)
433 if (! is_valid_page(dmab.addr)) {
434 snd_dma_free_pages(&dmab);
437 set_tlb_bus(hw, page, (unsigned long)dmab.area, dmab.addr);
442 /* release allocated pages */
443 last_page = page - 1;
444 for (page = first_page; page <= last_page; page++)
453 static int synth_free_pages(struct snd_trident *trident, struct snd_util_memblk *blk)
455 int page, first_page, last_page;
457 get_single_page_range(trident->tlb.memhdr, blk, &first_page, &last_page);
458 for (page = first_page; page <= last_page; page++)
459 clear_tlb(trident, page);
465 * copy_from_user(blk + offset, data, size)
467 int snd_trident_synth_copy_from_user(struct snd_trident *trident,
468 struct snd_util_memblk *blk,
469 int offset, const char __user *data, int size)
471 int page, nextofs, end_offset, temp, temp1;
473 offset += blk->offset;
474 end_offset = offset + size;
475 page = get_aligned_page(offset) + 1;
477 nextofs = aligned_page_offset(page);
478 temp = nextofs - offset;
479 temp1 = end_offset - offset;
482 if (copy_from_user(offset_ptr(trident, offset), data, temp))
487 } while (offset < end_offset);
491 EXPORT_SYMBOL(snd_trident_synth_copy_from_user);