]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - sound/core/seq/seq_instr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-2.6
[linux-2.6.git] / sound / core / seq / seq_instr.c
1 /*
2  *   Generic Instrument routines for ALSA sequencer
3  *   Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
4  *
5  *   This program is free software; you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *   GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program; if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  *
19  */
20  
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
28
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
32
33
34 static void snd_instr_lock_ops(struct snd_seq_kinstr_list *list)
35 {
36         if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
37                 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
38         } else {
39                 down(&list->ops_mutex);
40         }
41 }
42
43 static void snd_instr_unlock_ops(struct snd_seq_kinstr_list *list)
44 {
45         if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
46                 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
47         } else {
48                 up(&list->ops_mutex);
49         }
50 }
51
52 static struct snd_seq_kinstr *snd_seq_instr_new(int add_len, int atomic)
53 {
54         struct snd_seq_kinstr *instr;
55         
56         instr = kzalloc(sizeof(struct snd_seq_kinstr) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
57         if (instr == NULL)
58                 return NULL;
59         instr->add_len = add_len;
60         return instr;
61 }
62
63 static int snd_seq_instr_free(struct snd_seq_kinstr *instr, int atomic)
64 {
65         int result = 0;
66
67         if (instr == NULL)
68                 return -EINVAL;
69         if (instr->ops && instr->ops->remove)
70                 result = instr->ops->remove(instr->ops->private_data, instr, 1);
71         if (!result)
72                 kfree(instr);
73         return result;
74 }
75
76 struct snd_seq_kinstr_list *snd_seq_instr_list_new(void)
77 {
78         struct snd_seq_kinstr_list *list;
79
80         list = kzalloc(sizeof(struct snd_seq_kinstr_list), GFP_KERNEL);
81         if (list == NULL)
82                 return NULL;
83         spin_lock_init(&list->lock);
84         spin_lock_init(&list->ops_lock);
85         init_MUTEX(&list->ops_mutex);
86         list->owner = -1;
87         return list;
88 }
89
90 void snd_seq_instr_list_free(struct snd_seq_kinstr_list **list_ptr)
91 {
92         struct snd_seq_kinstr_list *list;
93         struct snd_seq_kinstr *instr;
94         struct snd_seq_kcluster *cluster;
95         int idx;
96         unsigned long flags;
97
98         if (list_ptr == NULL)
99                 return;
100         list = *list_ptr;
101         *list_ptr = NULL;
102         if (list == NULL)
103                 return;
104         
105         for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {         
106                 while ((instr = list->hash[idx]) != NULL) {
107                         list->hash[idx] = instr->next;
108                         list->count--;
109                         spin_lock_irqsave(&list->lock, flags);
110                         while (instr->use) {
111                                 spin_unlock_irqrestore(&list->lock, flags);
112                                 schedule_timeout_interruptible(1);
113                                 spin_lock_irqsave(&list->lock, flags);
114                         }                               
115                         spin_unlock_irqrestore(&list->lock, flags);
116                         if (snd_seq_instr_free(instr, 0)<0)
117                                 snd_printk(KERN_WARNING "instrument free problem\n");
118                 }
119                 while ((cluster = list->chash[idx]) != NULL) {
120                         list->chash[idx] = cluster->next;
121                         list->ccount--;
122                         kfree(cluster);
123                 }
124         }
125         kfree(list);
126 }
127
128 static int instr_free_compare(struct snd_seq_kinstr *instr,
129                               struct snd_seq_instr_header *ifree,
130                               unsigned int client)
131 {
132         switch (ifree->cmd) {
133         case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
134                 /* all, except private for other clients */
135                 if ((instr->instr.std & 0xff000000) == 0)
136                         return 0;
137                 if (((instr->instr.std >> 24) & 0xff) == client)
138                         return 0;
139                 return 1;
140         case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
141                 /* all my private instruments */
142                 if ((instr->instr.std & 0xff000000) == 0)
143                         return 1;
144                 if (((instr->instr.std >> 24) & 0xff) == client)
145                         return 0;
146                 return 1;
147         case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
148                 /* all my private instruments */
149                 if ((instr->instr.std & 0xff000000) == 0) {
150                         if (instr->instr.cluster == ifree->id.cluster)
151                                 return 0;
152                         return 1;
153                 }
154                 if (((instr->instr.std >> 24) & 0xff) == client) {
155                         if (instr->instr.cluster == ifree->id.cluster)
156                                 return 0;
157                 }
158                 return 1;
159         }
160         return 1;
161 }
162
163 int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list *list,
164                                  struct snd_seq_instr_header *ifree,
165                                  int client,
166                                  int atomic)
167 {
168         struct snd_seq_kinstr *instr, *prev, *next, *flist;
169         int idx;
170         unsigned long flags;
171
172         snd_instr_lock_ops(list);
173         for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
174                 spin_lock_irqsave(&list->lock, flags);
175                 instr = list->hash[idx];
176                 prev = flist = NULL;
177                 while (instr) {
178                         while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
179                                 prev = instr;
180                                 instr = instr->next;
181                         }
182                         if (instr == NULL)
183                                 continue;
184                         if (instr->ops && instr->ops->notify)
185                                 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
186                         next = instr->next;
187                         if (prev == NULL) {
188                                 list->hash[idx] = next;
189                         } else {
190                                 prev->next = next;
191                         }
192                         list->count--;
193                         instr->next = flist;
194                         flist = instr;
195                         instr = next;
196                 }
197                 spin_unlock_irqrestore(&list->lock, flags);
198                 while (flist) {
199                         instr = flist;
200                         flist = instr->next;
201                         while (instr->use)
202                                 schedule_timeout_interruptible(1);
203                         if (snd_seq_instr_free(instr, atomic)<0)
204                                 snd_printk(KERN_WARNING "instrument free problem\n");
205                         instr = next;
206                 }
207         }
208         snd_instr_unlock_ops(list);
209         return 0;       
210 }
211
212 static int compute_hash_instr_key(struct snd_seq_instr *instr)
213 {
214         int result;
215         
216         result = instr->bank | (instr->prg << 16);
217         result += result >> 24;
218         result += result >> 16;
219         result += result >> 8;
220         return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
221 }
222
223 #if 0
224 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
225 {
226         int result;
227         
228         result = cluster;
229         result += result >> 24;
230         result += result >> 16;
231         result += result >> 8;
232         return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
233 }
234 #endif
235
236 static int compare_instr(struct snd_seq_instr *i1, struct snd_seq_instr *i2, int exact)
237 {
238         if (exact) {
239                 if (i1->cluster != i2->cluster ||
240                     i1->bank != i2->bank ||
241                     i1->prg != i2->prg)
242                         return 1;
243                 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
244                         return 1;
245                 if (!(i1->std & i2->std))
246                         return 1;
247                 return 0;
248         } else {
249                 unsigned int client_check;
250                 
251                 if (i2->cluster && i1->cluster != i2->cluster)
252                         return 1;
253                 client_check = i2->std & 0xff000000;
254                 if (client_check) {
255                         if ((i1->std & 0xff000000) != client_check)
256                                 return 1;
257                 } else {
258                         if ((i1->std & i2->std) != i2->std)
259                                 return 1;
260                 }
261                 return i1->bank != i2->bank || i1->prg != i2->prg;
262         }
263 }
264
265 struct snd_seq_kinstr *snd_seq_instr_find(struct snd_seq_kinstr_list *list,
266                                           struct snd_seq_instr *instr,
267                                           int exact,
268                                           int follow_alias)
269 {
270         unsigned long flags;
271         int depth = 0;
272         struct snd_seq_kinstr *result;
273
274         if (list == NULL || instr == NULL)
275                 return NULL;
276         spin_lock_irqsave(&list->lock, flags);
277       __again:
278         result = list->hash[compute_hash_instr_key(instr)];
279         while (result) {
280                 if (!compare_instr(&result->instr, instr, exact)) {
281                         if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
282                                 instr = (struct snd_seq_instr *)KINSTR_DATA(result);
283                                 if (++depth > 10)
284                                         goto __not_found;
285                                 goto __again;
286                         }
287                         result->use++;
288                         spin_unlock_irqrestore(&list->lock, flags);
289                         return result;
290                 }
291                 result = result->next;
292         }
293       __not_found:
294         spin_unlock_irqrestore(&list->lock, flags);
295         return NULL;
296 }
297
298 void snd_seq_instr_free_use(struct snd_seq_kinstr_list *list,
299                             struct snd_seq_kinstr *instr)
300 {
301         unsigned long flags;
302
303         if (list == NULL || instr == NULL)
304                 return;
305         spin_lock_irqsave(&list->lock, flags);
306         if (instr->use <= 0) {
307                 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
308         } else {
309                 instr->use--;
310         }
311         spin_unlock_irqrestore(&list->lock, flags);
312 }
313
314 static struct snd_seq_kinstr_ops *instr_ops(struct snd_seq_kinstr_ops *ops,
315                                             char *instr_type)
316 {
317         while (ops) {
318                 if (!strcmp(ops->instr_type, instr_type))
319                         return ops;
320                 ops = ops->next;
321         }
322         return NULL;
323 }
324
325 static int instr_result(struct snd_seq_event *ev,
326                         int type, int result,
327                         int atomic)
328 {
329         struct snd_seq_event sev;
330         
331         memset(&sev, 0, sizeof(sev));
332         sev.type = SNDRV_SEQ_EVENT_RESULT;
333         sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
334                     SNDRV_SEQ_PRIORITY_NORMAL;
335         sev.source = ev->dest;
336         sev.dest = ev->source;
337         sev.data.result.event = type;
338         sev.data.result.result = result;
339 #if 0
340         printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
341                                 type, result,
342                                 sev.queue,
343                                 sev.source.client, sev.source.port,
344                                 sev.dest.client, sev.dest.port);
345 #endif
346         return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
347 }
348
349 static int instr_begin(struct snd_seq_kinstr_ops *ops,
350                        struct snd_seq_kinstr_list *list,
351                        struct snd_seq_event *ev,
352                        int atomic, int hop)
353 {
354         unsigned long flags;
355
356         spin_lock_irqsave(&list->lock, flags);
357         if (list->owner >= 0 && list->owner != ev->source.client) {
358                 spin_unlock_irqrestore(&list->lock, flags);
359                 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
360         }
361         list->owner = ev->source.client;
362         spin_unlock_irqrestore(&list->lock, flags);
363         return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
364 }
365
366 static int instr_end(struct snd_seq_kinstr_ops *ops,
367                      struct snd_seq_kinstr_list *list,
368                      struct snd_seq_event *ev,
369                      int atomic, int hop)
370 {
371         unsigned long flags;
372
373         /* TODO: timeout handling */
374         spin_lock_irqsave(&list->lock, flags);
375         if (list->owner == ev->source.client) {
376                 list->owner = -1;
377                 spin_unlock_irqrestore(&list->lock, flags);
378                 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
379         }
380         spin_unlock_irqrestore(&list->lock, flags);
381         return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
382 }
383
384 static int instr_info(struct snd_seq_kinstr_ops *ops,
385                       struct snd_seq_kinstr_list *list,
386                       struct snd_seq_event *ev,
387                       int atomic, int hop)
388 {
389         return -ENXIO;
390 }
391
392 static int instr_format_info(struct snd_seq_kinstr_ops *ops,
393                              struct snd_seq_kinstr_list *list,
394                              struct snd_seq_event *ev,
395                              int atomic, int hop)
396 {
397         return -ENXIO;
398 }
399
400 static int instr_reset(struct snd_seq_kinstr_ops *ops,
401                        struct snd_seq_kinstr_list *list,
402                        struct snd_seq_event *ev,
403                        int atomic, int hop)
404 {
405         return -ENXIO;
406 }
407
408 static int instr_status(struct snd_seq_kinstr_ops *ops,
409                         struct snd_seq_kinstr_list *list,
410                         struct snd_seq_event *ev,
411                         int atomic, int hop)
412 {
413         return -ENXIO;
414 }
415
416 static int instr_put(struct snd_seq_kinstr_ops *ops,
417                      struct snd_seq_kinstr_list *list,
418                      struct snd_seq_event *ev,
419                      int atomic, int hop)
420 {
421         unsigned long flags;
422         struct snd_seq_instr_header put;
423         struct snd_seq_kinstr *instr;
424         int result = -EINVAL, len, key;
425
426         if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
427                 goto __return;
428
429         if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
430                 goto __return;
431         if (copy_from_user(&put, (void __user *)ev->data.ext.ptr,
432                            sizeof(struct snd_seq_instr_header))) {
433                 result = -EFAULT;
434                 goto __return;
435         }
436         snd_instr_lock_ops(list);
437         if (put.id.instr.std & 0xff000000) {    /* private instrument */
438                 put.id.instr.std &= 0x00ffffff;
439                 put.id.instr.std |= (unsigned int)ev->source.client << 24;
440         }
441         if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
442                 snd_seq_instr_free_use(list, instr);
443                 snd_instr_unlock_ops(list);
444                 result = -EBUSY;
445                 goto __return;
446         }
447         ops = instr_ops(ops, put.data.data.format);
448         if (ops == NULL) {
449                 snd_instr_unlock_ops(list);
450                 goto __return;
451         }
452         len = ops->add_len;
453         if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
454                 len = sizeof(struct snd_seq_instr);
455         instr = snd_seq_instr_new(len, atomic);
456         if (instr == NULL) {
457                 snd_instr_unlock_ops(list);
458                 result = -ENOMEM;
459                 goto __return;
460         }
461         instr->ops = ops;
462         instr->instr = put.id.instr;
463         strlcpy(instr->name, put.data.name, sizeof(instr->name));
464         instr->type = put.data.type;
465         if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
466                 result = ops->put(ops->private_data,
467                                   instr,
468                                   (void __user *)ev->data.ext.ptr + sizeof(struct snd_seq_instr_header),
469                                   ev->data.ext.len - sizeof(struct snd_seq_instr_header),
470                                   atomic,
471                                   put.cmd);
472                 if (result < 0) {
473                         snd_seq_instr_free(instr, atomic);
474                         snd_instr_unlock_ops(list);
475                         goto __return;
476                 }
477         }
478         key = compute_hash_instr_key(&instr->instr);
479         spin_lock_irqsave(&list->lock, flags);
480         instr->next = list->hash[key];
481         list->hash[key] = instr;
482         list->count++;
483         spin_unlock_irqrestore(&list->lock, flags);
484         snd_instr_unlock_ops(list);
485         result = 0;
486       __return:
487         instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
488         return result;
489 }
490
491 static int instr_get(struct snd_seq_kinstr_ops *ops,
492                      struct snd_seq_kinstr_list *list,
493                      struct snd_seq_event *ev,
494                      int atomic, int hop)
495 {
496         return -ENXIO;
497 }
498
499 static int instr_free(struct snd_seq_kinstr_ops *ops,
500                       struct snd_seq_kinstr_list *list,
501                       struct snd_seq_event *ev,
502                       int atomic, int hop)
503 {
504         struct snd_seq_instr_header ifree;
505         struct snd_seq_kinstr *instr, *prev;
506         int result = -EINVAL;
507         unsigned long flags;
508         unsigned int hash;
509
510         if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
511                 goto __return;
512
513         if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
514                 goto __return;
515         if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr,
516                            sizeof(struct snd_seq_instr_header))) {
517                 result = -EFAULT;
518                 goto __return;
519         }
520         if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
521             ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
522             ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
523                 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
524                 goto __return;
525         }
526         if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
527                 if (ifree.id.instr.std & 0xff000000) {
528                         ifree.id.instr.std &= 0x00ffffff;
529                         ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
530                 }
531                 hash = compute_hash_instr_key(&ifree.id.instr);
532                 snd_instr_lock_ops(list);
533                 spin_lock_irqsave(&list->lock, flags);
534                 instr = list->hash[hash];
535                 prev = NULL;
536                 while (instr) {
537                         if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
538                                 goto __free_single;
539                         prev = instr;
540                         instr = instr->next;
541                 }
542                 result = -ENOENT;
543                 spin_unlock_irqrestore(&list->lock, flags);
544                 snd_instr_unlock_ops(list);
545                 goto __return;
546                 
547               __free_single:
548                 if (prev) {
549                         prev->next = instr->next;
550                 } else {
551                         list->hash[hash] = instr->next;
552                 }
553                 if (instr->ops && instr->ops->notify)
554                         instr->ops->notify(instr->ops->private_data, instr,
555                                            SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
556                 while (instr->use) {
557                         spin_unlock_irqrestore(&list->lock, flags);
558                         schedule_timeout_interruptible(1);
559                         spin_lock_irqsave(&list->lock, flags);
560                 }                               
561                 spin_unlock_irqrestore(&list->lock, flags);
562                 result = snd_seq_instr_free(instr, atomic);
563                 snd_instr_unlock_ops(list);
564                 goto __return;
565         }
566
567       __return:
568         instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
569         return result;
570 }
571
572 static int instr_list(struct snd_seq_kinstr_ops *ops,
573                       struct snd_seq_kinstr_list *list,
574                       struct snd_seq_event *ev,
575                       int atomic, int hop)
576 {
577         return -ENXIO;
578 }
579
580 static int instr_cluster(struct snd_seq_kinstr_ops *ops,
581                          struct snd_seq_kinstr_list *list,
582                          struct snd_seq_event *ev,
583                          int atomic, int hop)
584 {
585         return -ENXIO;
586 }
587
588 int snd_seq_instr_event(struct snd_seq_kinstr_ops *ops,
589                         struct snd_seq_kinstr_list *list,
590                         struct snd_seq_event *ev,
591                         int client,
592                         int atomic,
593                         int hop)
594 {
595         int direct = 0;
596
597         snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
598         if (snd_seq_ev_is_direct(ev)) {
599                 direct = 1;
600                 switch (ev->type) {
601                 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
602                         return instr_begin(ops, list, ev, atomic, hop);
603                 case SNDRV_SEQ_EVENT_INSTR_END:
604                         return instr_end(ops, list, ev, atomic, hop);
605                 }
606         }
607         if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
608                 return -EINVAL;
609         switch (ev->type) {
610         case SNDRV_SEQ_EVENT_INSTR_INFO:
611                 return instr_info(ops, list, ev, atomic, hop);
612         case SNDRV_SEQ_EVENT_INSTR_FINFO:
613                 return instr_format_info(ops, list, ev, atomic, hop);
614         case SNDRV_SEQ_EVENT_INSTR_RESET:
615                 return instr_reset(ops, list, ev, atomic, hop);
616         case SNDRV_SEQ_EVENT_INSTR_STATUS:
617                 return instr_status(ops, list, ev, atomic, hop);
618         case SNDRV_SEQ_EVENT_INSTR_PUT:
619                 return instr_put(ops, list, ev, atomic, hop);
620         case SNDRV_SEQ_EVENT_INSTR_GET:
621                 return instr_get(ops, list, ev, atomic, hop);
622         case SNDRV_SEQ_EVENT_INSTR_FREE:
623                 return instr_free(ops, list, ev, atomic, hop);
624         case SNDRV_SEQ_EVENT_INSTR_LIST:
625                 return instr_list(ops, list, ev, atomic, hop);
626         case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
627                 return instr_cluster(ops, list, ev, atomic, hop);
628         }
629         return -EINVAL;
630 }
631                         
632 /*
633  *  Init part
634  */
635
636 static int __init alsa_seq_instr_init(void)
637 {
638         return 0;
639 }
640
641 static void __exit alsa_seq_instr_exit(void)
642 {
643 }
644
645 module_init(alsa_seq_instr_init)
646 module_exit(alsa_seq_instr_exit)
647
648 EXPORT_SYMBOL(snd_seq_instr_list_new);
649 EXPORT_SYMBOL(snd_seq_instr_list_free);
650 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
651 EXPORT_SYMBOL(snd_seq_instr_find);
652 EXPORT_SYMBOL(snd_seq_instr_free_use);
653 EXPORT_SYMBOL(snd_seq_instr_event);