2 * Generic Instrument routines for ALSA sequencer
3 * Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
34 static void snd_instr_lock_ops(struct snd_seq_kinstr_list *list)
36 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
37 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
39 mutex_lock(&list->ops_mutex);
43 static void snd_instr_unlock_ops(struct snd_seq_kinstr_list *list)
45 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
46 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
48 mutex_unlock(&list->ops_mutex);
52 static struct snd_seq_kinstr *snd_seq_instr_new(int add_len, int atomic)
54 struct snd_seq_kinstr *instr;
56 instr = kzalloc(sizeof(struct snd_seq_kinstr) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
59 instr->add_len = add_len;
63 static int snd_seq_instr_free(struct snd_seq_kinstr *instr, int atomic)
69 if (instr->ops && instr->ops->remove)
70 result = instr->ops->remove(instr->ops->private_data, instr, 1);
76 struct snd_seq_kinstr_list *snd_seq_instr_list_new(void)
78 struct snd_seq_kinstr_list *list;
80 list = kzalloc(sizeof(struct snd_seq_kinstr_list), GFP_KERNEL);
83 spin_lock_init(&list->lock);
84 spin_lock_init(&list->ops_lock);
85 mutex_init(&list->ops_mutex);
90 void snd_seq_instr_list_free(struct snd_seq_kinstr_list **list_ptr)
92 struct snd_seq_kinstr_list *list;
93 struct snd_seq_kinstr *instr;
94 struct snd_seq_kcluster *cluster;
105 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
106 while ((instr = list->hash[idx]) != NULL) {
107 list->hash[idx] = instr->next;
109 spin_lock_irqsave(&list->lock, flags);
111 spin_unlock_irqrestore(&list->lock, flags);
112 schedule_timeout_interruptible(1);
113 spin_lock_irqsave(&list->lock, flags);
115 spin_unlock_irqrestore(&list->lock, flags);
116 if (snd_seq_instr_free(instr, 0)<0)
117 snd_printk(KERN_WARNING "instrument free problem\n");
119 while ((cluster = list->chash[idx]) != NULL) {
120 list->chash[idx] = cluster->next;
128 static int instr_free_compare(struct snd_seq_kinstr *instr,
129 struct snd_seq_instr_header *ifree,
132 switch (ifree->cmd) {
133 case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
134 /* all, except private for other clients */
135 if ((instr->instr.std & 0xff000000) == 0)
137 if (((instr->instr.std >> 24) & 0xff) == client)
140 case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
141 /* all my private instruments */
142 if ((instr->instr.std & 0xff000000) == 0)
144 if (((instr->instr.std >> 24) & 0xff) == client)
147 case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
148 /* all my private instruments */
149 if ((instr->instr.std & 0xff000000) == 0) {
150 if (instr->instr.cluster == ifree->id.cluster)
154 if (((instr->instr.std >> 24) & 0xff) == client) {
155 if (instr->instr.cluster == ifree->id.cluster)
163 int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list *list,
164 struct snd_seq_instr_header *ifree,
168 struct snd_seq_kinstr *instr, *prev, *next, *flist;
172 snd_instr_lock_ops(list);
173 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
174 spin_lock_irqsave(&list->lock, flags);
175 instr = list->hash[idx];
178 while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
184 if (instr->ops && instr->ops->notify)
185 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
188 list->hash[idx] = next;
197 spin_unlock_irqrestore(&list->lock, flags);
202 schedule_timeout_interruptible(1);
203 if (snd_seq_instr_free(instr, atomic)<0)
204 snd_printk(KERN_WARNING "instrument free problem\n");
208 snd_instr_unlock_ops(list);
212 static int compute_hash_instr_key(struct snd_seq_instr *instr)
216 result = instr->bank | (instr->prg << 16);
217 result += result >> 24;
218 result += result >> 16;
219 result += result >> 8;
220 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
224 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
229 result += result >> 24;
230 result += result >> 16;
231 result += result >> 8;
232 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
236 static int compare_instr(struct snd_seq_instr *i1, struct snd_seq_instr *i2, int exact)
239 if (i1->cluster != i2->cluster ||
240 i1->bank != i2->bank ||
243 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
245 if (!(i1->std & i2->std))
249 unsigned int client_check;
251 if (i2->cluster && i1->cluster != i2->cluster)
253 client_check = i2->std & 0xff000000;
255 if ((i1->std & 0xff000000) != client_check)
258 if ((i1->std & i2->std) != i2->std)
261 return i1->bank != i2->bank || i1->prg != i2->prg;
265 struct snd_seq_kinstr *snd_seq_instr_find(struct snd_seq_kinstr_list *list,
266 struct snd_seq_instr *instr,
272 struct snd_seq_kinstr *result;
274 if (list == NULL || instr == NULL)
276 spin_lock_irqsave(&list->lock, flags);
278 result = list->hash[compute_hash_instr_key(instr)];
280 if (!compare_instr(&result->instr, instr, exact)) {
281 if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
282 instr = (struct snd_seq_instr *)KINSTR_DATA(result);
288 spin_unlock_irqrestore(&list->lock, flags);
291 result = result->next;
294 spin_unlock_irqrestore(&list->lock, flags);
298 void snd_seq_instr_free_use(struct snd_seq_kinstr_list *list,
299 struct snd_seq_kinstr *instr)
303 if (list == NULL || instr == NULL)
305 spin_lock_irqsave(&list->lock, flags);
306 if (instr->use <= 0) {
307 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
311 spin_unlock_irqrestore(&list->lock, flags);
314 static struct snd_seq_kinstr_ops *instr_ops(struct snd_seq_kinstr_ops *ops,
318 if (!strcmp(ops->instr_type, instr_type))
325 static int instr_result(struct snd_seq_event *ev,
326 int type, int result,
329 struct snd_seq_event sev;
331 memset(&sev, 0, sizeof(sev));
332 sev.type = SNDRV_SEQ_EVENT_RESULT;
333 sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
334 SNDRV_SEQ_PRIORITY_NORMAL;
335 sev.source = ev->dest;
336 sev.dest = ev->source;
337 sev.data.result.event = type;
338 sev.data.result.result = result;
340 printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
343 sev.source.client, sev.source.port,
344 sev.dest.client, sev.dest.port);
346 return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
349 static int instr_begin(struct snd_seq_kinstr_ops *ops,
350 struct snd_seq_kinstr_list *list,
351 struct snd_seq_event *ev,
356 spin_lock_irqsave(&list->lock, flags);
357 if (list->owner >= 0 && list->owner != ev->source.client) {
358 spin_unlock_irqrestore(&list->lock, flags);
359 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
361 list->owner = ev->source.client;
362 spin_unlock_irqrestore(&list->lock, flags);
363 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
366 static int instr_end(struct snd_seq_kinstr_ops *ops,
367 struct snd_seq_kinstr_list *list,
368 struct snd_seq_event *ev,
373 /* TODO: timeout handling */
374 spin_lock_irqsave(&list->lock, flags);
375 if (list->owner == ev->source.client) {
377 spin_unlock_irqrestore(&list->lock, flags);
378 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
380 spin_unlock_irqrestore(&list->lock, flags);
381 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
384 static int instr_info(struct snd_seq_kinstr_ops *ops,
385 struct snd_seq_kinstr_list *list,
386 struct snd_seq_event *ev,
392 static int instr_format_info(struct snd_seq_kinstr_ops *ops,
393 struct snd_seq_kinstr_list *list,
394 struct snd_seq_event *ev,
400 static int instr_reset(struct snd_seq_kinstr_ops *ops,
401 struct snd_seq_kinstr_list *list,
402 struct snd_seq_event *ev,
408 static int instr_status(struct snd_seq_kinstr_ops *ops,
409 struct snd_seq_kinstr_list *list,
410 struct snd_seq_event *ev,
416 static int instr_put(struct snd_seq_kinstr_ops *ops,
417 struct snd_seq_kinstr_list *list,
418 struct snd_seq_event *ev,
422 struct snd_seq_instr_header put;
423 struct snd_seq_kinstr *instr;
424 int result = -EINVAL, len, key;
426 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
429 if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
431 if (copy_from_user(&put, (void __user *)ev->data.ext.ptr,
432 sizeof(struct snd_seq_instr_header))) {
436 snd_instr_lock_ops(list);
437 if (put.id.instr.std & 0xff000000) { /* private instrument */
438 put.id.instr.std &= 0x00ffffff;
439 put.id.instr.std |= (unsigned int)ev->source.client << 24;
441 if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
442 snd_seq_instr_free_use(list, instr);
443 snd_instr_unlock_ops(list);
447 ops = instr_ops(ops, put.data.data.format);
449 snd_instr_unlock_ops(list);
453 if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
454 len = sizeof(struct snd_seq_instr);
455 instr = snd_seq_instr_new(len, atomic);
457 snd_instr_unlock_ops(list);
462 instr->instr = put.id.instr;
463 strlcpy(instr->name, put.data.name, sizeof(instr->name));
464 instr->type = put.data.type;
465 if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
466 result = ops->put(ops->private_data,
468 (void __user *)ev->data.ext.ptr + sizeof(struct snd_seq_instr_header),
469 ev->data.ext.len - sizeof(struct snd_seq_instr_header),
473 snd_seq_instr_free(instr, atomic);
474 snd_instr_unlock_ops(list);
478 key = compute_hash_instr_key(&instr->instr);
479 spin_lock_irqsave(&list->lock, flags);
480 instr->next = list->hash[key];
481 list->hash[key] = instr;
483 spin_unlock_irqrestore(&list->lock, flags);
484 snd_instr_unlock_ops(list);
487 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
491 static int instr_get(struct snd_seq_kinstr_ops *ops,
492 struct snd_seq_kinstr_list *list,
493 struct snd_seq_event *ev,
499 static int instr_free(struct snd_seq_kinstr_ops *ops,
500 struct snd_seq_kinstr_list *list,
501 struct snd_seq_event *ev,
504 struct snd_seq_instr_header ifree;
505 struct snd_seq_kinstr *instr, *prev;
506 int result = -EINVAL;
510 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
513 if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
515 if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr,
516 sizeof(struct snd_seq_instr_header))) {
520 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
521 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
522 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
523 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
526 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
527 if (ifree.id.instr.std & 0xff000000) {
528 ifree.id.instr.std &= 0x00ffffff;
529 ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
531 hash = compute_hash_instr_key(&ifree.id.instr);
532 snd_instr_lock_ops(list);
533 spin_lock_irqsave(&list->lock, flags);
534 instr = list->hash[hash];
537 if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
543 spin_unlock_irqrestore(&list->lock, flags);
544 snd_instr_unlock_ops(list);
549 prev->next = instr->next;
551 list->hash[hash] = instr->next;
553 if (instr->ops && instr->ops->notify)
554 instr->ops->notify(instr->ops->private_data, instr,
555 SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
557 spin_unlock_irqrestore(&list->lock, flags);
558 schedule_timeout_interruptible(1);
559 spin_lock_irqsave(&list->lock, flags);
561 spin_unlock_irqrestore(&list->lock, flags);
562 result = snd_seq_instr_free(instr, atomic);
563 snd_instr_unlock_ops(list);
568 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
572 static int instr_list(struct snd_seq_kinstr_ops *ops,
573 struct snd_seq_kinstr_list *list,
574 struct snd_seq_event *ev,
580 static int instr_cluster(struct snd_seq_kinstr_ops *ops,
581 struct snd_seq_kinstr_list *list,
582 struct snd_seq_event *ev,
588 int snd_seq_instr_event(struct snd_seq_kinstr_ops *ops,
589 struct snd_seq_kinstr_list *list,
590 struct snd_seq_event *ev,
597 snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
598 if (snd_seq_ev_is_direct(ev)) {
601 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
602 return instr_begin(ops, list, ev, atomic, hop);
603 case SNDRV_SEQ_EVENT_INSTR_END:
604 return instr_end(ops, list, ev, atomic, hop);
607 if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
610 case SNDRV_SEQ_EVENT_INSTR_INFO:
611 return instr_info(ops, list, ev, atomic, hop);
612 case SNDRV_SEQ_EVENT_INSTR_FINFO:
613 return instr_format_info(ops, list, ev, atomic, hop);
614 case SNDRV_SEQ_EVENT_INSTR_RESET:
615 return instr_reset(ops, list, ev, atomic, hop);
616 case SNDRV_SEQ_EVENT_INSTR_STATUS:
617 return instr_status(ops, list, ev, atomic, hop);
618 case SNDRV_SEQ_EVENT_INSTR_PUT:
619 return instr_put(ops, list, ev, atomic, hop);
620 case SNDRV_SEQ_EVENT_INSTR_GET:
621 return instr_get(ops, list, ev, atomic, hop);
622 case SNDRV_SEQ_EVENT_INSTR_FREE:
623 return instr_free(ops, list, ev, atomic, hop);
624 case SNDRV_SEQ_EVENT_INSTR_LIST:
625 return instr_list(ops, list, ev, atomic, hop);
626 case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
627 return instr_cluster(ops, list, ev, atomic, hop);
636 static int __init alsa_seq_instr_init(void)
641 static void __exit alsa_seq_instr_exit(void)
645 module_init(alsa_seq_instr_init)
646 module_exit(alsa_seq_instr_exit)
648 EXPORT_SYMBOL(snd_seq_instr_list_new);
649 EXPORT_SYMBOL(snd_seq_instr_list_free);
650 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
651 EXPORT_SYMBOL(snd_seq_instr_find);
652 EXPORT_SYMBOL(snd_seq_instr_free_use);
653 EXPORT_SYMBOL(snd_seq_instr_event);