Merge branch 'for-2.6.24' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer...
[linux-2.6] / sound / core / seq / seq_fifo.c
1 /*
2  *   ALSA sequencer FIFO
3  *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4  *
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  *
20  */
21
22 #include <sound/driver.h>
23 #include <sound/core.h>
24 #include <linux/slab.h>
25 #include "seq_fifo.h"
26 #include "seq_lock.h"
27
28
29 /* FIFO */
30
31 /* create new fifo */
32 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
33 {
34         struct snd_seq_fifo *f;
35
36         f = kzalloc(sizeof(*f), GFP_KERNEL);
37         if (f == NULL) {
38                 snd_printd("malloc failed for snd_seq_fifo_new() \n");
39                 return NULL;
40         }
41
42         f->pool = snd_seq_pool_new(poolsize);
43         if (f->pool == NULL) {
44                 kfree(f);
45                 return NULL;
46         }
47         if (snd_seq_pool_init(f->pool) < 0) {
48                 snd_seq_pool_delete(&f->pool);
49                 kfree(f);
50                 return NULL;
51         }
52
53         spin_lock_init(&f->lock);
54         snd_use_lock_init(&f->use_lock);
55         init_waitqueue_head(&f->input_sleep);
56         atomic_set(&f->overflow, 0);
57
58         f->head = NULL;
59         f->tail = NULL;
60         f->cells = 0;
61         
62         return f;
63 }
64
65 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
66 {
67         struct snd_seq_fifo *f;
68
69         snd_assert(fifo != NULL, return);
70         f = *fifo;
71         snd_assert(f != NULL, return);
72         *fifo = NULL;
73
74         snd_seq_fifo_clear(f);
75
76         /* wake up clients if any */
77         if (waitqueue_active(&f->input_sleep))
78                 wake_up(&f->input_sleep);
79
80         /* release resources...*/
81         /*....................*/
82
83         if (f->pool) {
84                 snd_seq_pool_done(f->pool);
85                 snd_seq_pool_delete(&f->pool);
86         }
87         
88         kfree(f);
89 }
90
91 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
92
93 /* clear queue */
94 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
95 {
96         struct snd_seq_event_cell *cell;
97         unsigned long flags;
98
99         /* clear overflow flag */
100         atomic_set(&f->overflow, 0);
101
102         snd_use_lock_sync(&f->use_lock);
103         spin_lock_irqsave(&f->lock, flags);
104         /* drain the fifo */
105         while ((cell = fifo_cell_out(f)) != NULL) {
106                 snd_seq_cell_free(cell);
107         }
108         spin_unlock_irqrestore(&f->lock, flags);
109 }
110
111
112 /* enqueue event to fifo */
113 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
114                           struct snd_seq_event *event)
115 {
116         struct snd_seq_event_cell *cell;
117         unsigned long flags;
118         int err;
119
120         snd_assert(f != NULL, return -EINVAL);
121
122         snd_use_lock_use(&f->use_lock);
123         err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
124         if (err < 0) {
125                 if (err == -ENOMEM)
126                         atomic_inc(&f->overflow);
127                 snd_use_lock_free(&f->use_lock);
128                 return err;
129         }
130                 
131         /* append new cells to fifo */
132         spin_lock_irqsave(&f->lock, flags);
133         if (f->tail != NULL)
134                 f->tail->next = cell;
135         f->tail = cell;
136         if (f->head == NULL)
137                 f->head = cell;
138         f->cells++;
139         spin_unlock_irqrestore(&f->lock, flags);
140
141         /* wakeup client */
142         if (waitqueue_active(&f->input_sleep))
143                 wake_up(&f->input_sleep);
144
145         snd_use_lock_free(&f->use_lock);
146
147         return 0; /* success */
148
149 }
150
151 /* dequeue cell from fifo */
152 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
153 {
154         struct snd_seq_event_cell *cell;
155
156         if ((cell = f->head) != NULL) {
157                 f->head = cell->next;
158
159                 /* reset tail if this was the last element */
160                 if (f->tail == cell)
161                         f->tail = NULL;
162
163                 cell->next = NULL;
164                 f->cells--;
165         }
166
167         return cell;
168 }
169
170 /* dequeue cell from fifo and copy on user space */
171 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
172                           struct snd_seq_event_cell **cellp, int nonblock)
173 {
174         struct snd_seq_event_cell *cell;
175         unsigned long flags;
176         wait_queue_t wait;
177
178         snd_assert(f != NULL, return -EINVAL);
179
180         *cellp = NULL;
181         init_waitqueue_entry(&wait, current);
182         spin_lock_irqsave(&f->lock, flags);
183         while ((cell = fifo_cell_out(f)) == NULL) {
184                 if (nonblock) {
185                         /* non-blocking - return immediately */
186                         spin_unlock_irqrestore(&f->lock, flags);
187                         return -EAGAIN;
188                 }
189                 set_current_state(TASK_INTERRUPTIBLE);
190                 add_wait_queue(&f->input_sleep, &wait);
191                 spin_unlock_irq(&f->lock);
192                 schedule();
193                 spin_lock_irq(&f->lock);
194                 remove_wait_queue(&f->input_sleep, &wait);
195                 if (signal_pending(current)) {
196                         spin_unlock_irqrestore(&f->lock, flags);
197                         return -ERESTARTSYS;
198                 }
199         }
200         spin_unlock_irqrestore(&f->lock, flags);
201         *cellp = cell;
202
203         return 0;
204 }
205
206
207 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
208                                struct snd_seq_event_cell *cell)
209 {
210         unsigned long flags;
211
212         if (cell) {
213                 spin_lock_irqsave(&f->lock, flags);
214                 cell->next = f->head;
215                 f->head = cell;
216                 f->cells++;
217                 spin_unlock_irqrestore(&f->lock, flags);
218         }
219 }
220
221
222 /* polling; return non-zero if queue is available */
223 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
224                            poll_table *wait)
225 {
226         poll_wait(file, &f->input_sleep, wait);
227         return (f->cells > 0);
228 }
229
230 /* change the size of pool; all old events are removed */
231 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
232 {
233         unsigned long flags;
234         struct snd_seq_pool *newpool, *oldpool;
235         struct snd_seq_event_cell *cell, *next, *oldhead;
236
237         snd_assert(f != NULL && f->pool != NULL, return -EINVAL);
238
239         /* allocate new pool */
240         newpool = snd_seq_pool_new(poolsize);
241         if (newpool == NULL)
242                 return -ENOMEM;
243         if (snd_seq_pool_init(newpool) < 0) {
244                 snd_seq_pool_delete(&newpool);
245                 return -ENOMEM;
246         }
247
248         spin_lock_irqsave(&f->lock, flags);
249         /* remember old pool */
250         oldpool = f->pool;
251         oldhead = f->head;
252         /* exchange pools */
253         f->pool = newpool;
254         f->head = NULL;
255         f->tail = NULL;
256         f->cells = 0;
257         /* NOTE: overflow flag is not cleared */
258         spin_unlock_irqrestore(&f->lock, flags);
259
260         /* release cells in old pool */
261         for (cell = oldhead; cell; cell = next) {
262                 next = cell->next;
263                 snd_seq_cell_free(cell);
264         }
265         snd_seq_pool_delete(&oldpool);
266
267         return 0;
268 }