seq_instr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * Generic Instrument routines for ALSA sequencer
  3. * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. #include <sound/driver.h>
  21. #include <linux/init.h>
  22. #include <linux/slab.h>
  23. #include <sound/core.h>
  24. #include "seq_clientmgr.h"
  25. #include <sound/seq_instr.h>
  26. #include <sound/initval.h>
  27. MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
  28. MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
  29. MODULE_LICENSE("GPL");
  30. static void snd_instr_lock_ops(struct snd_seq_kinstr_list *list)
  31. {
  32. if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
  33. spin_lock_irqsave(&list->ops_lock, list->ops_flags);
  34. } else {
  35. mutex_lock(&list->ops_mutex);
  36. }
  37. }
  38. static void snd_instr_unlock_ops(struct snd_seq_kinstr_list *list)
  39. {
  40. if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
  41. spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
  42. } else {
  43. mutex_unlock(&list->ops_mutex);
  44. }
  45. }
  46. static struct snd_seq_kinstr *snd_seq_instr_new(int add_len, int atomic)
  47. {
  48. struct snd_seq_kinstr *instr;
  49. instr = kzalloc(sizeof(struct snd_seq_kinstr) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
  50. if (instr == NULL)
  51. return NULL;
  52. instr->add_len = add_len;
  53. return instr;
  54. }
  55. static int snd_seq_instr_free(struct snd_seq_kinstr *instr, int atomic)
  56. {
  57. int result = 0;
  58. if (instr == NULL)
  59. return -EINVAL;
  60. if (instr->ops && instr->ops->remove)
  61. result = instr->ops->remove(instr->ops->private_data, instr, 1);
  62. if (!result)
  63. kfree(instr);
  64. return result;
  65. }
  66. struct snd_seq_kinstr_list *snd_seq_instr_list_new(void)
  67. {
  68. struct snd_seq_kinstr_list *list;
  69. list = kzalloc(sizeof(struct snd_seq_kinstr_list), GFP_KERNEL);
  70. if (list == NULL)
  71. return NULL;
  72. spin_lock_init(&list->lock);
  73. spin_lock_init(&list->ops_lock);
  74. mutex_init(&list->ops_mutex);
  75. list->owner = -1;
  76. return list;
  77. }
  78. void snd_seq_instr_list_free(struct snd_seq_kinstr_list **list_ptr)
  79. {
  80. struct snd_seq_kinstr_list *list;
  81. struct snd_seq_kinstr *instr;
  82. struct snd_seq_kcluster *cluster;
  83. int idx;
  84. unsigned long flags;
  85. if (list_ptr == NULL)
  86. return;
  87. list = *list_ptr;
  88. *list_ptr = NULL;
  89. if (list == NULL)
  90. return;
  91. for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
  92. while ((instr = list->hash[idx]) != NULL) {
  93. list->hash[idx] = instr->next;
  94. list->count--;
  95. spin_lock_irqsave(&list->lock, flags);
  96. while (instr->use) {
  97. spin_unlock_irqrestore(&list->lock, flags);
  98. schedule_timeout_uninterruptible(1);
  99. spin_lock_irqsave(&list->lock, flags);
  100. }
  101. spin_unlock_irqrestore(&list->lock, flags);
  102. if (snd_seq_instr_free(instr, 0)<0)
  103. snd_printk(KERN_WARNING "instrument free problem\n");
  104. }
  105. while ((cluster = list->chash[idx]) != NULL) {
  106. list->chash[idx] = cluster->next;
  107. list->ccount--;
  108. kfree(cluster);
  109. }
  110. }
  111. kfree(list);
  112. }
  113. static int instr_free_compare(struct snd_seq_kinstr *instr,
  114. struct snd_seq_instr_header *ifree,
  115. unsigned int client)
  116. {
  117. switch (ifree->cmd) {
  118. case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
  119. /* all, except private for other clients */
  120. if ((instr->instr.std & 0xff000000) == 0)
  121. return 0;
  122. if (((instr->instr.std >> 24) & 0xff) == client)
  123. return 0;
  124. return 1;
  125. case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
  126. /* all my private instruments */
  127. if ((instr->instr.std & 0xff000000) == 0)
  128. return 1;
  129. if (((instr->instr.std >> 24) & 0xff) == client)
  130. return 0;
  131. return 1;
  132. case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
  133. /* all my private instruments */
  134. if ((instr->instr.std & 0xff000000) == 0) {
  135. if (instr->instr.cluster == ifree->id.cluster)
  136. return 0;
  137. return 1;
  138. }
  139. if (((instr->instr.std >> 24) & 0xff) == client) {
  140. if (instr->instr.cluster == ifree->id.cluster)
  141. return 0;
  142. }
  143. return 1;
  144. }
  145. return 1;
  146. }
  147. int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list *list,
  148. struct snd_seq_instr_header *ifree,
  149. int client,
  150. int atomic)
  151. {
  152. struct snd_seq_kinstr *instr, *prev, *next, *flist;
  153. int idx;
  154. unsigned long flags;
  155. snd_instr_lock_ops(list);
  156. for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
  157. spin_lock_irqsave(&list->lock, flags);
  158. instr = list->hash[idx];
  159. prev = flist = NULL;
  160. while (instr) {
  161. while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
  162. prev = instr;
  163. instr = instr->next;
  164. }
  165. if (instr == NULL)
  166. continue;
  167. if (instr->ops && instr->ops->notify)
  168. instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
  169. next = instr->next;
  170. if (prev == NULL) {
  171. list->hash[idx] = next;
  172. } else {
  173. prev->next = next;
  174. }
  175. list->count--;
  176. instr->next = flist;
  177. flist = instr;
  178. instr = next;
  179. }
  180. spin_unlock_irqrestore(&list->lock, flags);
  181. while (flist) {
  182. instr = flist;
  183. flist = instr->next;
  184. while (instr->use) {
  185. schedule_timeout_uninterruptible(1);
  186. barrier();
  187. }
  188. if (snd_seq_instr_free(instr, atomic)<0)
  189. snd_printk(KERN_WARNING "instrument free problem\n");
  190. instr = next;
  191. }
  192. }
  193. snd_instr_unlock_ops(list);
  194. return 0;
  195. }
  196. static int compute_hash_instr_key(struct snd_seq_instr *instr)
  197. {
  198. int result;
  199. result = instr->bank | (instr->prg << 16);
  200. result += result >> 24;
  201. result += result >> 16;
  202. result += result >> 8;
  203. return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
  204. }
  205. #if 0
  206. static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
  207. {
  208. int result;
  209. result = cluster;
  210. result += result >> 24;
  211. result += result >> 16;
  212. result += result >> 8;
  213. return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
  214. }
  215. #endif
  216. static int compare_instr(struct snd_seq_instr *i1, struct snd_seq_instr *i2, int exact)
  217. {
  218. if (exact) {
  219. if (i1->cluster != i2->cluster ||
  220. i1->bank != i2->bank ||
  221. i1->prg != i2->prg)
  222. return 1;
  223. if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
  224. return 1;
  225. if (!(i1->std & i2->std))
  226. return 1;
  227. return 0;
  228. } else {
  229. unsigned int client_check;
  230. if (i2->cluster && i1->cluster != i2->cluster)
  231. return 1;
  232. client_check = i2->std & 0xff000000;
  233. if (client_check) {
  234. if ((i1->std & 0xff000000) != client_check)
  235. return 1;
  236. } else {
  237. if ((i1->std & i2->std) != i2->std)
  238. return 1;
  239. }
  240. return i1->bank != i2->bank || i1->prg != i2->prg;
  241. }
  242. }
  243. struct snd_seq_kinstr *snd_seq_instr_find(struct snd_seq_kinstr_list *list,
  244. struct snd_seq_instr *instr,
  245. int exact,
  246. int follow_alias)
  247. {
  248. unsigned long flags;
  249. int depth = 0;
  250. struct snd_seq_kinstr *result;
  251. if (list == NULL || instr == NULL)
  252. return NULL;
  253. spin_lock_irqsave(&list->lock, flags);
  254. __again:
  255. result = list->hash[compute_hash_instr_key(instr)];
  256. while (result) {
  257. if (!compare_instr(&result->instr, instr, exact)) {
  258. if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
  259. instr = (struct snd_seq_instr *)KINSTR_DATA(result);
  260. if (++depth > 10)
  261. goto __not_found;
  262. goto __again;
  263. }
  264. result->use++;
  265. spin_unlock_irqrestore(&list->lock, flags);
  266. return result;
  267. }
  268. result = result->next;
  269. }
  270. __not_found:
  271. spin_unlock_irqrestore(&list->lock, flags);
  272. return NULL;
  273. }
  274. void snd_seq_instr_free_use(struct snd_seq_kinstr_list *list,
  275. struct snd_seq_kinstr *instr)
  276. {
  277. unsigned long flags;
  278. if (list == NULL || instr == NULL)
  279. return;
  280. spin_lock_irqsave(&list->lock, flags);
  281. if (instr->use <= 0) {
  282. snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
  283. } else {
  284. instr->use--;
  285. }
  286. spin_unlock_irqrestore(&list->lock, flags);
  287. }
  288. static struct snd_seq_kinstr_ops *instr_ops(struct snd_seq_kinstr_ops *ops,
  289. char *instr_type)
  290. {
  291. while (ops) {
  292. if (!strcmp(ops->instr_type, instr_type))
  293. return ops;
  294. ops = ops->next;
  295. }
  296. return NULL;
  297. }
  298. static int instr_result(struct snd_seq_event *ev,
  299. int type, int result,
  300. int atomic)
  301. {
  302. struct snd_seq_event sev;
  303. memset(&sev, 0, sizeof(sev));
  304. sev.type = SNDRV_SEQ_EVENT_RESULT;
  305. sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
  306. SNDRV_SEQ_PRIORITY_NORMAL;
  307. sev.source = ev->dest;
  308. sev.dest = ev->source;
  309. sev.data.result.event = type;
  310. sev.data.result.result = result;
  311. #if 0
  312. printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
  313. type, result,
  314. sev.queue,
  315. sev.source.client, sev.source.port,
  316. sev.dest.client, sev.dest.port);
  317. #endif
  318. return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
  319. }
  320. static int instr_begin(struct snd_seq_kinstr_ops *ops,
  321. struct snd_seq_kinstr_list *list,
  322. struct snd_seq_event *ev,
  323. int atomic, int hop)
  324. {
  325. unsigned long flags;
  326. spin_lock_irqsave(&list->lock, flags);
  327. if (list->owner >= 0 && list->owner != ev->source.client) {
  328. spin_unlock_irqrestore(&list->lock, flags);
  329. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
  330. }
  331. list->owner = ev->source.client;
  332. spin_unlock_irqrestore(&list->lock, flags);
  333. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
  334. }
  335. static int instr_end(struct snd_seq_kinstr_ops *ops,
  336. struct snd_seq_kinstr_list *list,
  337. struct snd_seq_event *ev,
  338. int atomic, int hop)
  339. {
  340. unsigned long flags;
  341. /* TODO: timeout handling */
  342. spin_lock_irqsave(&list->lock, flags);
  343. if (list->owner == ev->source.client) {
  344. list->owner = -1;
  345. spin_unlock_irqrestore(&list->lock, flags);
  346. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
  347. }
  348. spin_unlock_irqrestore(&list->lock, flags);
  349. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
  350. }
  351. static int instr_info(struct snd_seq_kinstr_ops *ops,
  352. struct snd_seq_kinstr_list *list,
  353. struct snd_seq_event *ev,
  354. int atomic, int hop)
  355. {
  356. return -ENXIO;
  357. }
  358. static int instr_format_info(struct snd_seq_kinstr_ops *ops,
  359. struct snd_seq_kinstr_list *list,
  360. struct snd_seq_event *ev,
  361. int atomic, int hop)
  362. {
  363. return -ENXIO;
  364. }
  365. static int instr_reset(struct snd_seq_kinstr_ops *ops,
  366. struct snd_seq_kinstr_list *list,
  367. struct snd_seq_event *ev,
  368. int atomic, int hop)
  369. {
  370. return -ENXIO;
  371. }
  372. static int instr_status(struct snd_seq_kinstr_ops *ops,
  373. struct snd_seq_kinstr_list *list,
  374. struct snd_seq_event *ev,
  375. int atomic, int hop)
  376. {
  377. return -ENXIO;
  378. }
  379. static int instr_put(struct snd_seq_kinstr_ops *ops,
  380. struct snd_seq_kinstr_list *list,
  381. struct snd_seq_event *ev,
  382. int atomic, int hop)
  383. {
  384. unsigned long flags;
  385. struct snd_seq_instr_header put;
  386. struct snd_seq_kinstr *instr;
  387. int result = -EINVAL, len, key;
  388. if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
  389. goto __return;
  390. if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
  391. goto __return;
  392. if (copy_from_user(&put, (void __user *)ev->data.ext.ptr,
  393. sizeof(struct snd_seq_instr_header))) {
  394. result = -EFAULT;
  395. goto __return;
  396. }
  397. snd_instr_lock_ops(list);
  398. if (put.id.instr.std & 0xff000000) { /* private instrument */
  399. put.id.instr.std &= 0x00ffffff;
  400. put.id.instr.std |= (unsigned int)ev->source.client << 24;
  401. }
  402. if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
  403. snd_seq_instr_free_use(list, instr);
  404. snd_instr_unlock_ops(list);
  405. result = -EBUSY;
  406. goto __return;
  407. }
  408. ops = instr_ops(ops, put.data.data.format);
  409. if (ops == NULL) {
  410. snd_instr_unlock_ops(list);
  411. goto __return;
  412. }
  413. len = ops->add_len;
  414. if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
  415. len = sizeof(struct snd_seq_instr);
  416. instr = snd_seq_instr_new(len, atomic);
  417. if (instr == NULL) {
  418. snd_instr_unlock_ops(list);
  419. result = -ENOMEM;
  420. goto __return;
  421. }
  422. instr->ops = ops;
  423. instr->instr = put.id.instr;
  424. strlcpy(instr->name, put.data.name, sizeof(instr->name));
  425. instr->type = put.data.type;
  426. if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
  427. result = ops->put(ops->private_data,
  428. instr,
  429. (void __user *)ev->data.ext.ptr + sizeof(struct snd_seq_instr_header),
  430. ev->data.ext.len - sizeof(struct snd_seq_instr_header),
  431. atomic,
  432. put.cmd);
  433. if (result < 0) {
  434. snd_seq_instr_free(instr, atomic);
  435. snd_instr_unlock_ops(list);
  436. goto __return;
  437. }
  438. }
  439. key = compute_hash_instr_key(&instr->instr);
  440. spin_lock_irqsave(&list->lock, flags);
  441. instr->next = list->hash[key];
  442. list->hash[key] = instr;
  443. list->count++;
  444. spin_unlock_irqrestore(&list->lock, flags);
  445. snd_instr_unlock_ops(list);
  446. result = 0;
  447. __return:
  448. instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
  449. return result;
  450. }
  451. static int instr_get(struct snd_seq_kinstr_ops *ops,
  452. struct snd_seq_kinstr_list *list,
  453. struct snd_seq_event *ev,
  454. int atomic, int hop)
  455. {
  456. return -ENXIO;
  457. }
  458. static int instr_free(struct snd_seq_kinstr_ops *ops,
  459. struct snd_seq_kinstr_list *list,
  460. struct snd_seq_event *ev,
  461. int atomic, int hop)
  462. {
  463. struct snd_seq_instr_header ifree;
  464. struct snd_seq_kinstr *instr, *prev;
  465. int result = -EINVAL;
  466. unsigned long flags;
  467. unsigned int hash;
  468. if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
  469. goto __return;
  470. if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
  471. goto __return;
  472. if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr,
  473. sizeof(struct snd_seq_instr_header))) {
  474. result = -EFAULT;
  475. goto __return;
  476. }
  477. if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
  478. ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
  479. ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
  480. result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
  481. goto __return;
  482. }
  483. if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
  484. if (ifree.id.instr.std & 0xff000000) {
  485. ifree.id.instr.std &= 0x00ffffff;
  486. ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
  487. }
  488. hash = compute_hash_instr_key(&ifree.id.instr);
  489. snd_instr_lock_ops(list);
  490. spin_lock_irqsave(&list->lock, flags);
  491. instr = list->hash[hash];
  492. prev = NULL;
  493. while (instr) {
  494. if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
  495. goto __free_single;
  496. prev = instr;
  497. instr = instr->next;
  498. }
  499. result = -ENOENT;
  500. spin_unlock_irqrestore(&list->lock, flags);
  501. snd_instr_unlock_ops(list);
  502. goto __return;
  503. __free_single:
  504. if (prev) {
  505. prev->next = instr->next;
  506. } else {
  507. list->hash[hash] = instr->next;
  508. }
  509. if (instr->ops && instr->ops->notify)
  510. instr->ops->notify(instr->ops->private_data, instr,
  511. SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
  512. while (instr->use) {
  513. spin_unlock_irqrestore(&list->lock, flags);
  514. schedule_timeout_uninterruptible(1);
  515. spin_lock_irqsave(&list->lock, flags);
  516. }
  517. spin_unlock_irqrestore(&list->lock, flags);
  518. result = snd_seq_instr_free(instr, atomic);
  519. snd_instr_unlock_ops(list);
  520. goto __return;
  521. }
  522. __return:
  523. instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
  524. return result;
  525. }
  526. static int instr_list(struct snd_seq_kinstr_ops *ops,
  527. struct snd_seq_kinstr_list *list,
  528. struct snd_seq_event *ev,
  529. int atomic, int hop)
  530. {
  531. return -ENXIO;
  532. }
  533. static int instr_cluster(struct snd_seq_kinstr_ops *ops,
  534. struct snd_seq_kinstr_list *list,
  535. struct snd_seq_event *ev,
  536. int atomic, int hop)
  537. {
  538. return -ENXIO;
  539. }
  540. int snd_seq_instr_event(struct snd_seq_kinstr_ops *ops,
  541. struct snd_seq_kinstr_list *list,
  542. struct snd_seq_event *ev,
  543. int client,
  544. int atomic,
  545. int hop)
  546. {
  547. int direct = 0;
  548. snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
  549. if (snd_seq_ev_is_direct(ev)) {
  550. direct = 1;
  551. switch (ev->type) {
  552. case SNDRV_SEQ_EVENT_INSTR_BEGIN:
  553. return instr_begin(ops, list, ev, atomic, hop);
  554. case SNDRV_SEQ_EVENT_INSTR_END:
  555. return instr_end(ops, list, ev, atomic, hop);
  556. }
  557. }
  558. if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
  559. return -EINVAL;
  560. switch (ev->type) {
  561. case SNDRV_SEQ_EVENT_INSTR_INFO:
  562. return instr_info(ops, list, ev, atomic, hop);
  563. case SNDRV_SEQ_EVENT_INSTR_FINFO:
  564. return instr_format_info(ops, list, ev, atomic, hop);
  565. case SNDRV_SEQ_EVENT_INSTR_RESET:
  566. return instr_reset(ops, list, ev, atomic, hop);
  567. case SNDRV_SEQ_EVENT_INSTR_STATUS:
  568. return instr_status(ops, list, ev, atomic, hop);
  569. case SNDRV_SEQ_EVENT_INSTR_PUT:
  570. return instr_put(ops, list, ev, atomic, hop);
  571. case SNDRV_SEQ_EVENT_INSTR_GET:
  572. return instr_get(ops, list, ev, atomic, hop);
  573. case SNDRV_SEQ_EVENT_INSTR_FREE:
  574. return instr_free(ops, list, ev, atomic, hop);
  575. case SNDRV_SEQ_EVENT_INSTR_LIST:
  576. return instr_list(ops, list, ev, atomic, hop);
  577. case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
  578. return instr_cluster(ops, list, ev, atomic, hop);
  579. }
  580. return -EINVAL;
  581. }
  582. /*
  583. * Init part
  584. */
  585. static int __init alsa_seq_instr_init(void)
  586. {
  587. return 0;
  588. }
  589. static void __exit alsa_seq_instr_exit(void)
  590. {
  591. }
  592. module_init(alsa_seq_instr_init)
  593. module_exit(alsa_seq_instr_exit)
  594. EXPORT_SYMBOL(snd_seq_instr_list_new);
  595. EXPORT_SYMBOL(snd_seq_instr_list_free);
  596. EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
  597. EXPORT_SYMBOL(snd_seq_instr_find);
  598. EXPORT_SYMBOL(snd_seq_instr_free_use);
  599. EXPORT_SYMBOL(snd_seq_instr_event);