seq_clientmgr.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574
  1. /*
  2. * ALSA sequencer Client Manager
  3. * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@suse.cz>
  5. * Takashi Iwai <tiwai@suse.de>
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <sound/driver.h>
  24. #include <linux/init.h>
  25. #include <linux/smp_lock.h>
  26. #include <linux/slab.h>
  27. #include <sound/core.h>
  28. #include <sound/minors.h>
  29. #include <linux/kmod.h>
  30. #include <sound/seq_kernel.h>
  31. #include "seq_clientmgr.h"
  32. #include "seq_memory.h"
  33. #include "seq_queue.h"
  34. #include "seq_timer.h"
  35. #include "seq_info.h"
  36. #include "seq_system.h"
  37. #include <sound/seq_device.h>
  38. #ifdef CONFIG_COMPAT
  39. #include <linux/compat.h>
  40. #endif
  41. /* Client Manager
  42. * this module handles the connections of userland and kernel clients
  43. *
  44. */
  45. /*
  46. * There are four ranges of client numbers (last two shared):
  47. * 0..15: global clients
  48. * 16..127: statically allocated client numbers for cards 0..27
  49. * 128..191: dynamically allocated client numbers for cards 28..31
  50. * 128..191: dynamically allocated client numbers for applications
  51. */
  52. /* number of kernel non-card clients */
  53. #define SNDRV_SEQ_GLOBAL_CLIENTS 16
  54. /* clients per cards, for static clients */
  55. #define SNDRV_SEQ_CLIENTS_PER_CARD 4
  56. /* dynamically allocated client numbers (both kernel drivers and user space) */
  57. #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
  58. #define SNDRV_SEQ_LFLG_INPUT 0x0001
  59. #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
  60. #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
  61. static DEFINE_SPINLOCK(clients_lock);
  62. static DEFINE_MUTEX(register_mutex);
  63. /*
  64. * client table
  65. */
  66. static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
  67. static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
  68. static struct snd_seq_usage client_usage;
  69. /*
  70. * prototypes
  71. */
  72. static int bounce_error_event(struct snd_seq_client *client,
  73. struct snd_seq_event *event,
  74. int err, int atomic, int hop);
  75. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  76. struct snd_seq_event *event,
  77. int filter, int atomic, int hop);
  78. /*
  79. */
  80. static inline mm_segment_t snd_enter_user(void)
  81. {
  82. mm_segment_t fs = get_fs();
  83. set_fs(get_ds());
  84. return fs;
  85. }
  86. static inline void snd_leave_user(mm_segment_t fs)
  87. {
  88. set_fs(fs);
  89. }
  90. /*
  91. */
  92. static inline unsigned short snd_seq_file_flags(struct file *file)
  93. {
  94. switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
  95. case FMODE_WRITE:
  96. return SNDRV_SEQ_LFLG_OUTPUT;
  97. case FMODE_READ:
  98. return SNDRV_SEQ_LFLG_INPUT;
  99. default:
  100. return SNDRV_SEQ_LFLG_OPEN;
  101. }
  102. }
  103. static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
  104. {
  105. return snd_seq_total_cells(client->pool) > 0;
  106. }
  107. /* return pointer to client structure for specified id */
  108. static struct snd_seq_client *clientptr(int clientid)
  109. {
  110. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  111. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  112. clientid);
  113. return NULL;
  114. }
  115. return clienttab[clientid];
  116. }
  117. extern int seq_client_load[];
  118. struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
  119. {
  120. unsigned long flags;
  121. struct snd_seq_client *client;
  122. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  123. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  124. clientid);
  125. return NULL;
  126. }
  127. spin_lock_irqsave(&clients_lock, flags);
  128. client = clientptr(clientid);
  129. if (client)
  130. goto __lock;
  131. if (clienttablock[clientid]) {
  132. spin_unlock_irqrestore(&clients_lock, flags);
  133. return NULL;
  134. }
  135. spin_unlock_irqrestore(&clients_lock, flags);
  136. #ifdef CONFIG_KMOD
  137. if (!in_interrupt() && current->fs->root) {
  138. static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
  139. static char card_requested[SNDRV_CARDS];
  140. if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
  141. int idx;
  142. if (! client_requested[clientid] && current->fs->root) {
  143. client_requested[clientid] = 1;
  144. for (idx = 0; idx < 15; idx++) {
  145. if (seq_client_load[idx] < 0)
  146. break;
  147. if (seq_client_load[idx] == clientid) {
  148. request_module("snd-seq-client-%i",
  149. clientid);
  150. break;
  151. }
  152. }
  153. }
  154. } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
  155. int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
  156. SNDRV_SEQ_CLIENTS_PER_CARD;
  157. if (card < snd_ecards_limit) {
  158. if (! card_requested[card]) {
  159. card_requested[card] = 1;
  160. snd_request_card(card);
  161. }
  162. snd_seq_device_load_drivers();
  163. }
  164. }
  165. spin_lock_irqsave(&clients_lock, flags);
  166. client = clientptr(clientid);
  167. if (client)
  168. goto __lock;
  169. spin_unlock_irqrestore(&clients_lock, flags);
  170. }
  171. #endif
  172. return NULL;
  173. __lock:
  174. snd_use_lock_use(&client->use_lock);
  175. spin_unlock_irqrestore(&clients_lock, flags);
  176. return client;
  177. }
  178. static void usage_alloc(struct snd_seq_usage *res, int num)
  179. {
  180. res->cur += num;
  181. if (res->cur > res->peak)
  182. res->peak = res->cur;
  183. }
  184. static void usage_free(struct snd_seq_usage *res, int num)
  185. {
  186. res->cur -= num;
  187. }
  188. /* initialise data structures */
  189. int __init client_init_data(void)
  190. {
  191. /* zap out the client table */
  192. memset(&clienttablock, 0, sizeof(clienttablock));
  193. memset(&clienttab, 0, sizeof(clienttab));
  194. return 0;
  195. }
  196. static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
  197. {
  198. unsigned long flags;
  199. int c;
  200. struct snd_seq_client *client;
  201. /* init client data */
  202. client = kzalloc(sizeof(*client), GFP_KERNEL);
  203. if (client == NULL)
  204. return NULL;
  205. client->pool = snd_seq_pool_new(poolsize);
  206. if (client->pool == NULL) {
  207. kfree(client);
  208. return NULL;
  209. }
  210. client->type = NO_CLIENT;
  211. snd_use_lock_init(&client->use_lock);
  212. rwlock_init(&client->ports_lock);
  213. mutex_init(&client->ports_mutex);
  214. INIT_LIST_HEAD(&client->ports_list_head);
  215. /* find free slot in the client table */
  216. spin_lock_irqsave(&clients_lock, flags);
  217. if (client_index < 0) {
  218. for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
  219. c < SNDRV_SEQ_MAX_CLIENTS;
  220. c++) {
  221. if (clienttab[c] || clienttablock[c])
  222. continue;
  223. clienttab[client->number = c] = client;
  224. spin_unlock_irqrestore(&clients_lock, flags);
  225. return client;
  226. }
  227. } else {
  228. if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
  229. clienttab[client->number = client_index] = client;
  230. spin_unlock_irqrestore(&clients_lock, flags);
  231. return client;
  232. }
  233. }
  234. spin_unlock_irqrestore(&clients_lock, flags);
  235. snd_seq_pool_delete(&client->pool);
  236. kfree(client);
  237. return NULL; /* no free slot found or busy, return failure code */
  238. }
  239. static int seq_free_client1(struct snd_seq_client *client)
  240. {
  241. unsigned long flags;
  242. snd_assert(client != NULL, return -EINVAL);
  243. snd_seq_delete_all_ports(client);
  244. snd_seq_queue_client_leave(client->number);
  245. spin_lock_irqsave(&clients_lock, flags);
  246. clienttablock[client->number] = 1;
  247. clienttab[client->number] = NULL;
  248. spin_unlock_irqrestore(&clients_lock, flags);
  249. snd_use_lock_sync(&client->use_lock);
  250. snd_seq_queue_client_termination(client->number);
  251. if (client->pool)
  252. snd_seq_pool_delete(&client->pool);
  253. spin_lock_irqsave(&clients_lock, flags);
  254. clienttablock[client->number] = 0;
  255. spin_unlock_irqrestore(&clients_lock, flags);
  256. return 0;
  257. }
  258. static void seq_free_client(struct snd_seq_client * client)
  259. {
  260. mutex_lock(&register_mutex);
  261. switch (client->type) {
  262. case NO_CLIENT:
  263. snd_printk(KERN_WARNING "Seq: Trying to free unused client %d\n",
  264. client->number);
  265. break;
  266. case USER_CLIENT:
  267. case KERNEL_CLIENT:
  268. seq_free_client1(client);
  269. usage_free(&client_usage, 1);
  270. break;
  271. default:
  272. snd_printk(KERN_ERR "Seq: Trying to free client %d with undefined type = %d\n",
  273. client->number, client->type);
  274. }
  275. mutex_unlock(&register_mutex);
  276. snd_seq_system_client_ev_client_exit(client->number);
  277. }
  278. /* -------------------------------------------------------- */
  279. /* create a user client */
  280. static int snd_seq_open(struct inode *inode, struct file *file)
  281. {
  282. int c, mode; /* client id */
  283. struct snd_seq_client *client;
  284. struct snd_seq_user_client *user;
  285. if (mutex_lock_interruptible(&register_mutex))
  286. return -ERESTARTSYS;
  287. client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
  288. if (client == NULL) {
  289. mutex_unlock(&register_mutex);
  290. return -ENOMEM; /* failure code */
  291. }
  292. mode = snd_seq_file_flags(file);
  293. if (mode & SNDRV_SEQ_LFLG_INPUT)
  294. client->accept_input = 1;
  295. if (mode & SNDRV_SEQ_LFLG_OUTPUT)
  296. client->accept_output = 1;
  297. user = &client->data.user;
  298. user->fifo = NULL;
  299. user->fifo_pool_size = 0;
  300. if (mode & SNDRV_SEQ_LFLG_INPUT) {
  301. user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
  302. user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
  303. if (user->fifo == NULL) {
  304. seq_free_client1(client);
  305. kfree(client);
  306. mutex_unlock(&register_mutex);
  307. return -ENOMEM;
  308. }
  309. }
  310. usage_alloc(&client_usage, 1);
  311. client->type = USER_CLIENT;
  312. mutex_unlock(&register_mutex);
  313. c = client->number;
  314. file->private_data = client;
  315. /* fill client data */
  316. user->file = file;
  317. sprintf(client->name, "Client-%d", c);
  318. /* make others aware this new client */
  319. snd_seq_system_client_ev_client_start(c);
  320. return 0;
  321. }
  322. /* delete a user client */
  323. static int snd_seq_release(struct inode *inode, struct file *file)
  324. {
  325. struct snd_seq_client *client = file->private_data;
  326. if (client) {
  327. seq_free_client(client);
  328. if (client->data.user.fifo)
  329. snd_seq_fifo_delete(&client->data.user.fifo);
  330. kfree(client);
  331. }
  332. return 0;
  333. }
  334. /* handle client read() */
  335. /* possible error values:
  336. * -ENXIO invalid client or file open mode
  337. * -ENOSPC FIFO overflow (the flag is cleared after this error report)
  338. * -EINVAL no enough user-space buffer to write the whole event
  339. * -EFAULT seg. fault during copy to user space
  340. */
  341. static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  342. loff_t *offset)
  343. {
  344. struct snd_seq_client *client = file->private_data;
  345. struct snd_seq_fifo *fifo;
  346. int err;
  347. long result = 0;
  348. struct snd_seq_event_cell *cell;
  349. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
  350. return -ENXIO;
  351. if (!access_ok(VERIFY_WRITE, buf, count))
  352. return -EFAULT;
  353. /* check client structures are in place */
  354. snd_assert(client != NULL, return -ENXIO);
  355. if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
  356. return -ENXIO;
  357. if (atomic_read(&fifo->overflow) > 0) {
  358. /* buffer overflow is detected */
  359. snd_seq_fifo_clear(fifo);
  360. /* return error code */
  361. return -ENOSPC;
  362. }
  363. cell = NULL;
  364. err = 0;
  365. snd_seq_fifo_lock(fifo);
  366. /* while data available in queue */
  367. while (count >= sizeof(struct snd_seq_event)) {
  368. int nonblock;
  369. nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
  370. if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
  371. break;
  372. }
  373. if (snd_seq_ev_is_variable(&cell->event)) {
  374. struct snd_seq_event tmpev;
  375. tmpev = cell->event;
  376. tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
  377. if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
  378. err = -EFAULT;
  379. break;
  380. }
  381. count -= sizeof(struct snd_seq_event);
  382. buf += sizeof(struct snd_seq_event);
  383. err = snd_seq_expand_var_event(&cell->event, count,
  384. (char __force *)buf, 0,
  385. sizeof(struct snd_seq_event));
  386. if (err < 0)
  387. break;
  388. result += err;
  389. count -= err;
  390. buf += err;
  391. } else {
  392. if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
  393. err = -EFAULT;
  394. break;
  395. }
  396. count -= sizeof(struct snd_seq_event);
  397. buf += sizeof(struct snd_seq_event);
  398. }
  399. snd_seq_cell_free(cell);
  400. cell = NULL; /* to be sure */
  401. result += sizeof(struct snd_seq_event);
  402. }
  403. if (err < 0) {
  404. if (cell)
  405. snd_seq_fifo_cell_putback(fifo, cell);
  406. if (err == -EAGAIN && result > 0)
  407. err = 0;
  408. }
  409. snd_seq_fifo_unlock(fifo);
  410. return (err < 0) ? err : result;
  411. }
  412. /*
  413. * check access permission to the port
  414. */
  415. static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
  416. {
  417. if ((port->capability & flags) != flags)
  418. return 0;
  419. return flags;
  420. }
  421. /*
  422. * check if the destination client is available, and return the pointer
  423. * if filter is non-zero, client filter bitmap is tested.
  424. */
  425. static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
  426. int filter)
  427. {
  428. struct snd_seq_client *dest;
  429. dest = snd_seq_client_use_ptr(event->dest.client);
  430. if (dest == NULL)
  431. return NULL;
  432. if (! dest->accept_input)
  433. goto __not_avail;
  434. if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
  435. ! test_bit(event->type, dest->event_filter))
  436. goto __not_avail;
  437. if (filter && !(dest->filter & filter))
  438. goto __not_avail;
  439. return dest; /* ok - accessible */
  440. __not_avail:
  441. snd_seq_client_unlock(dest);
  442. return NULL;
  443. }
  444. /*
  445. * Return the error event.
  446. *
  447. * If the receiver client is a user client, the original event is
  448. * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
  449. * the original event is also variable length, the external data is
  450. * copied after the event record.
  451. * If the receiver client is a kernel client, the original event is
  452. * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
  453. * kmalloc.
  454. */
  455. static int bounce_error_event(struct snd_seq_client *client,
  456. struct snd_seq_event *event,
  457. int err, int atomic, int hop)
  458. {
  459. struct snd_seq_event bounce_ev;
  460. int result;
  461. if (client == NULL ||
  462. ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
  463. ! client->accept_input)
  464. return 0; /* ignored */
  465. /* set up quoted error */
  466. memset(&bounce_ev, 0, sizeof(bounce_ev));
  467. bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
  468. bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
  469. bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
  470. bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
  471. bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
  472. bounce_ev.dest.client = client->number;
  473. bounce_ev.dest.port = event->source.port;
  474. bounce_ev.data.quote.origin = event->dest;
  475. bounce_ev.data.quote.event = event;
  476. bounce_ev.data.quote.value = -err; /* use positive value */
  477. result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
  478. if (result < 0) {
  479. client->event_lost++;
  480. return result;
  481. }
  482. return result;
  483. }
  484. /*
  485. * rewrite the time-stamp of the event record with the curren time
  486. * of the given queue.
  487. * return non-zero if updated.
  488. */
  489. static int update_timestamp_of_queue(struct snd_seq_event *event,
  490. int queue, int real_time)
  491. {
  492. struct snd_seq_queue *q;
  493. q = queueptr(queue);
  494. if (! q)
  495. return 0;
  496. event->queue = queue;
  497. event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
  498. if (real_time) {
  499. event->time.time = snd_seq_timer_get_cur_time(q->timer);
  500. event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
  501. } else {
  502. event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
  503. event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
  504. }
  505. queuefree(q);
  506. return 1;
  507. }
  508. /*
  509. * deliver an event to the specified destination.
  510. * if filter is non-zero, client filter bitmap is tested.
  511. *
  512. * RETURN VALUE: 0 : if succeeded
  513. * <0 : error
  514. */
  515. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  516. struct snd_seq_event *event,
  517. int filter, int atomic, int hop)
  518. {
  519. struct snd_seq_client *dest = NULL;
  520. struct snd_seq_client_port *dest_port = NULL;
  521. int result = -ENOENT;
  522. int direct;
  523. direct = snd_seq_ev_is_direct(event);
  524. dest = get_event_dest_client(event, filter);
  525. if (dest == NULL)
  526. goto __skip;
  527. dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
  528. if (dest_port == NULL)
  529. goto __skip;
  530. /* check permission */
  531. if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
  532. result = -EPERM;
  533. goto __skip;
  534. }
  535. if (dest_port->timestamping)
  536. update_timestamp_of_queue(event, dest_port->time_queue,
  537. dest_port->time_real);
  538. switch (dest->type) {
  539. case USER_CLIENT:
  540. if (dest->data.user.fifo)
  541. result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
  542. break;
  543. case KERNEL_CLIENT:
  544. if (dest_port->event_input == NULL)
  545. break;
  546. result = dest_port->event_input(event, direct,
  547. dest_port->private_data,
  548. atomic, hop);
  549. break;
  550. default:
  551. break;
  552. }
  553. __skip:
  554. if (dest_port)
  555. snd_seq_port_unlock(dest_port);
  556. if (dest)
  557. snd_seq_client_unlock(dest);
  558. if (result < 0 && !direct) {
  559. result = bounce_error_event(client, event, result, atomic, hop);
  560. }
  561. return result;
  562. }
  563. /*
  564. * send the event to all subscribers:
  565. */
  566. static int deliver_to_subscribers(struct snd_seq_client *client,
  567. struct snd_seq_event *event,
  568. int atomic, int hop)
  569. {
  570. struct snd_seq_subscribers *subs;
  571. int err = 0, num_ev = 0;
  572. struct snd_seq_event event_saved;
  573. struct snd_seq_client_port *src_port;
  574. struct list_head *p;
  575. struct snd_seq_port_subs_info *grp;
  576. src_port = snd_seq_port_use_ptr(client, event->source.port);
  577. if (src_port == NULL)
  578. return -EINVAL; /* invalid source port */
  579. /* save original event record */
  580. event_saved = *event;
  581. grp = &src_port->c_src;
  582. /* lock list */
  583. if (atomic)
  584. read_lock(&grp->list_lock);
  585. else
  586. down_read(&grp->list_mutex);
  587. list_for_each(p, &grp->list_head) {
  588. subs = list_entry(p, struct snd_seq_subscribers, src_list);
  589. event->dest = subs->info.dest;
  590. if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  591. /* convert time according to flag with subscription */
  592. update_timestamp_of_queue(event, subs->info.queue,
  593. subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
  594. err = snd_seq_deliver_single_event(client, event,
  595. 0, atomic, hop);
  596. if (err < 0)
  597. break;
  598. num_ev++;
  599. /* restore original event record */
  600. *event = event_saved;
  601. }
  602. if (atomic)
  603. read_unlock(&grp->list_lock);
  604. else
  605. up_read(&grp->list_mutex);
  606. *event = event_saved; /* restore */
  607. snd_seq_port_unlock(src_port);
  608. return (err < 0) ? err : num_ev;
  609. }
  610. #ifdef SUPPORT_BROADCAST
  611. /*
  612. * broadcast to all ports:
  613. */
  614. static int port_broadcast_event(struct snd_seq_client *client,
  615. struct snd_seq_event *event,
  616. int atomic, int hop)
  617. {
  618. int num_ev = 0, err = 0;
  619. struct snd_seq_client *dest_client;
  620. struct list_head *p;
  621. dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
  622. if (dest_client == NULL)
  623. return 0; /* no matching destination */
  624. read_lock(&dest_client->ports_lock);
  625. list_for_each(p, &dest_client->ports_list_head) {
  626. struct snd_seq_client_port *port = list_entry(p, struct snd_seq_client_port, list);
  627. event->dest.port = port->addr.port;
  628. /* pass NULL as source client to avoid error bounce */
  629. err = snd_seq_deliver_single_event(NULL, event,
  630. SNDRV_SEQ_FILTER_BROADCAST,
  631. atomic, hop);
  632. if (err < 0)
  633. break;
  634. num_ev++;
  635. }
  636. read_unlock(&dest_client->ports_lock);
  637. snd_seq_client_unlock(dest_client);
  638. event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
  639. return (err < 0) ? err : num_ev;
  640. }
  641. /*
  642. * send the event to all clients:
  643. * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
  644. */
  645. static int broadcast_event(struct snd_seq_client *client,
  646. struct snd_seq_event *event, int atomic, int hop)
  647. {
  648. int err = 0, num_ev = 0;
  649. int dest;
  650. struct snd_seq_addr addr;
  651. addr = event->dest; /* save */
  652. for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
  653. /* don't send to itself */
  654. if (dest == client->number)
  655. continue;
  656. event->dest.client = dest;
  657. event->dest.port = addr.port;
  658. if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  659. err = port_broadcast_event(client, event, atomic, hop);
  660. else
  661. /* pass NULL as source client to avoid error bounce */
  662. err = snd_seq_deliver_single_event(NULL, event,
  663. SNDRV_SEQ_FILTER_BROADCAST,
  664. atomic, hop);
  665. if (err < 0)
  666. break;
  667. num_ev += err;
  668. }
  669. event->dest = addr; /* restore */
  670. return (err < 0) ? err : num_ev;
  671. }
  672. /* multicast - not supported yet */
  673. static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
  674. int atomic, int hop)
  675. {
  676. snd_printd("seq: multicast not supported yet.\n");
  677. return 0; /* ignored */
  678. }
  679. #endif /* SUPPORT_BROADCAST */
  680. /* deliver an event to the destination port(s).
  681. * if the event is to subscribers or broadcast, the event is dispatched
  682. * to multiple targets.
  683. *
  684. * RETURN VALUE: n > 0 : the number of delivered events.
  685. * n == 0 : the event was not passed to any client.
  686. * n < 0 : error - event was not processed.
  687. */
  688. static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
  689. int atomic, int hop)
  690. {
  691. int result;
  692. hop++;
  693. if (hop >= SNDRV_SEQ_MAX_HOPS) {
  694. snd_printd("too long delivery path (%d:%d->%d:%d)\n",
  695. event->source.client, event->source.port,
  696. event->dest.client, event->dest.port);
  697. return -EMLINK;
  698. }
  699. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
  700. event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
  701. result = deliver_to_subscribers(client, event, atomic, hop);
  702. #ifdef SUPPORT_BROADCAST
  703. else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
  704. event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
  705. result = broadcast_event(client, event, atomic, hop);
  706. else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
  707. result = multicast_event(client, event, atomic, hop);
  708. else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  709. result = port_broadcast_event(client, event, atomic, hop);
  710. #endif
  711. else
  712. result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
  713. return result;
  714. }
  715. /*
  716. * dispatch an event cell:
  717. * This function is called only from queue check routines in timer
  718. * interrupts or after enqueued.
  719. * The event cell shall be released or re-queued in this function.
  720. *
  721. * RETURN VALUE: n > 0 : the number of delivered events.
  722. * n == 0 : the event was not passed to any client.
  723. * n < 0 : error - event was not processed.
  724. */
  725. int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
  726. {
  727. struct snd_seq_client *client;
  728. int result;
  729. snd_assert(cell != NULL, return -EINVAL);
  730. client = snd_seq_client_use_ptr(cell->event.source.client);
  731. if (client == NULL) {
  732. snd_seq_cell_free(cell); /* release this cell */
  733. return -EINVAL;
  734. }
  735. if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
  736. /* NOTE event:
  737. * the event cell is re-used as a NOTE-OFF event and
  738. * enqueued again.
  739. */
  740. struct snd_seq_event tmpev, *ev;
  741. /* reserve this event to enqueue note-off later */
  742. tmpev = cell->event;
  743. tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
  744. result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
  745. /*
  746. * This was originally a note event. We now re-use the
  747. * cell for the note-off event.
  748. */
  749. ev = &cell->event;
  750. ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
  751. ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
  752. /* add the duration time */
  753. switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
  754. case SNDRV_SEQ_TIME_STAMP_TICK:
  755. ev->time.tick += ev->data.note.duration;
  756. break;
  757. case SNDRV_SEQ_TIME_STAMP_REAL:
  758. /* unit for duration is ms */
  759. ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
  760. ev->time.time.tv_sec += ev->data.note.duration / 1000 +
  761. ev->time.time.tv_nsec / 1000000000;
  762. ev->time.time.tv_nsec %= 1000000000;
  763. break;
  764. }
  765. ev->data.note.velocity = ev->data.note.off_velocity;
  766. /* Now queue this cell as the note off event */
  767. if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
  768. snd_seq_cell_free(cell); /* release this cell */
  769. } else {
  770. /* Normal events:
  771. * event cell is freed after processing the event
  772. */
  773. result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
  774. snd_seq_cell_free(cell);
  775. }
  776. snd_seq_client_unlock(client);
  777. return result;
  778. }
  779. /* Allocate a cell from client pool and enqueue it to queue:
  780. * if pool is empty and blocking is TRUE, sleep until a new cell is
  781. * available.
  782. */
  783. static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
  784. struct snd_seq_event *event,
  785. struct file *file, int blocking,
  786. int atomic, int hop)
  787. {
  788. struct snd_seq_event_cell *cell;
  789. int err;
  790. /* special queue values - force direct passing */
  791. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  792. event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
  793. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  794. } else
  795. #ifdef SUPPORT_BROADCAST
  796. if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
  797. event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
  798. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  799. }
  800. #endif
  801. if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  802. /* check presence of source port */
  803. struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
  804. if (src_port == NULL)
  805. return -EINVAL;
  806. snd_seq_port_unlock(src_port);
  807. }
  808. /* direct event processing without enqueued */
  809. if (snd_seq_ev_is_direct(event)) {
  810. if (event->type == SNDRV_SEQ_EVENT_NOTE)
  811. return -EINVAL; /* this event must be enqueued! */
  812. return snd_seq_deliver_event(client, event, atomic, hop);
  813. }
  814. /* Not direct, normal queuing */
  815. if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
  816. return -EINVAL; /* invalid queue */
  817. if (! snd_seq_write_pool_allocated(client))
  818. return -ENXIO; /* queue is not allocated */
  819. /* allocate an event cell */
  820. err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
  821. if (err < 0)
  822. return err;
  823. /* we got a cell. enqueue it. */
  824. if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
  825. snd_seq_cell_free(cell);
  826. return err;
  827. }
  828. return 0;
  829. }
  830. /*
  831. * check validity of event type and data length.
  832. * return non-zero if invalid.
  833. */
  834. static int check_event_type_and_length(struct snd_seq_event *ev)
  835. {
  836. switch (snd_seq_ev_length_type(ev)) {
  837. case SNDRV_SEQ_EVENT_LENGTH_FIXED:
  838. if (snd_seq_ev_is_variable_type(ev))
  839. return -EINVAL;
  840. break;
  841. case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
  842. if (! snd_seq_ev_is_variable_type(ev) ||
  843. (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
  844. return -EINVAL;
  845. break;
  846. case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
  847. if (! snd_seq_ev_is_instr_type(ev) ||
  848. ! snd_seq_ev_is_direct(ev))
  849. return -EINVAL;
  850. break;
  851. }
  852. return 0;
  853. }
  854. /* handle write() */
  855. /* possible error values:
  856. * -ENXIO invalid client or file open mode
  857. * -ENOMEM malloc failed
  858. * -EFAULT seg. fault during copy from user space
  859. * -EINVAL invalid event
  860. * -EAGAIN no space in output pool
  861. * -EINTR interrupts while sleep
  862. * -EMLINK too many hops
  863. * others depends on return value from driver callback
  864. */
  865. static ssize_t snd_seq_write(struct file *file, const char __user *buf,
  866. size_t count, loff_t *offset)
  867. {
  868. struct snd_seq_client *client = file->private_data;
  869. int written = 0, len;
  870. int err = -EINVAL;
  871. struct snd_seq_event event;
  872. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
  873. return -ENXIO;
  874. /* check client structures are in place */
  875. snd_assert(client != NULL, return -ENXIO);
  876. if (!client->accept_output || client->pool == NULL)
  877. return -ENXIO;
  878. /* allocate the pool now if the pool is not allocated yet */
  879. if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
  880. if (snd_seq_pool_init(client->pool) < 0)
  881. return -ENOMEM;
  882. }
  883. /* only process whole events */
  884. while (count >= sizeof(struct snd_seq_event)) {
  885. /* Read in the event header from the user */
  886. len = sizeof(event);
  887. if (copy_from_user(&event, buf, len)) {
  888. err = -EFAULT;
  889. break;
  890. }
  891. event.source.client = client->number; /* fill in client number */
  892. /* Check for extension data length */
  893. if (check_event_type_and_length(&event)) {
  894. err = -EINVAL;
  895. break;
  896. }
  897. /* check for special events */
  898. if (event.type == SNDRV_SEQ_EVENT_NONE)
  899. goto __skip_event;
  900. else if (snd_seq_ev_is_reserved(&event)) {
  901. err = -EINVAL;
  902. break;
  903. }
  904. if (snd_seq_ev_is_variable(&event)) {
  905. int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  906. if ((size_t)(extlen + len) > count) {
  907. /* back out, will get an error this time or next */
  908. err = -EINVAL;
  909. break;
  910. }
  911. /* set user space pointer */
  912. event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
  913. event.data.ext.ptr = (char __force *)buf
  914. + sizeof(struct snd_seq_event);
  915. len += extlen; /* increment data length */
  916. } else {
  917. #ifdef CONFIG_COMPAT
  918. if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
  919. void *ptr = compat_ptr(event.data.raw32.d[1]);
  920. event.data.ext.ptr = ptr;
  921. }
  922. #endif
  923. }
  924. /* ok, enqueue it */
  925. err = snd_seq_client_enqueue_event(client, &event, file,
  926. !(file->f_flags & O_NONBLOCK),
  927. 0, 0);
  928. if (err < 0)
  929. break;
  930. __skip_event:
  931. /* Update pointers and counts */
  932. count -= len;
  933. buf += len;
  934. written += len;
  935. }
  936. return written ? written : err;
  937. }
  938. /*
  939. * handle polling
  940. */
  941. static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
  942. {
  943. struct snd_seq_client *client = file->private_data;
  944. unsigned int mask = 0;
  945. /* check client structures are in place */
  946. snd_assert(client != NULL, return -ENXIO);
  947. if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
  948. client->data.user.fifo) {
  949. /* check if data is available in the outqueue */
  950. if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
  951. mask |= POLLIN | POLLRDNORM;
  952. }
  953. if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
  954. /* check if data is available in the pool */
  955. if (!snd_seq_write_pool_allocated(client) ||
  956. snd_seq_pool_poll_wait(client->pool, file, wait))
  957. mask |= POLLOUT | POLLWRNORM;
  958. }
  959. return mask;
  960. }
  961. /*-----------------------------------------------------*/
  962. /* SYSTEM_INFO ioctl() */
  963. static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user *arg)
  964. {
  965. struct snd_seq_system_info info;
  966. memset(&info, 0, sizeof(info));
  967. /* fill the info fields */
  968. info.queues = SNDRV_SEQ_MAX_QUEUES;
  969. info.clients = SNDRV_SEQ_MAX_CLIENTS;
  970. info.ports = 256; /* fixed limit */
  971. info.channels = 256; /* fixed limit */
  972. info.cur_clients = client_usage.cur;
  973. info.cur_queues = snd_seq_queue_get_cur_queues();
  974. if (copy_to_user(arg, &info, sizeof(info)))
  975. return -EFAULT;
  976. return 0;
  977. }
  978. /* RUNNING_MODE ioctl() */
  979. static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void __user *arg)
  980. {
  981. struct snd_seq_running_info info;
  982. struct snd_seq_client *cptr;
  983. int err = 0;
  984. if (copy_from_user(&info, arg, sizeof(info)))
  985. return -EFAULT;
  986. /* requested client number */
  987. cptr = snd_seq_client_use_ptr(info.client);
  988. if (cptr == NULL)
  989. return -ENOENT; /* don't change !!! */
  990. #ifdef SNDRV_BIG_ENDIAN
  991. if (! info.big_endian) {
  992. err = -EINVAL;
  993. goto __err;
  994. }
  995. #else
  996. if (info.big_endian) {
  997. err = -EINVAL;
  998. goto __err;
  999. }
  1000. #endif
  1001. if (info.cpu_mode > sizeof(long)) {
  1002. err = -EINVAL;
  1003. goto __err;
  1004. }
  1005. cptr->convert32 = (info.cpu_mode < sizeof(long));
  1006. __err:
  1007. snd_seq_client_unlock(cptr);
  1008. return err;
  1009. }
  1010. /* CLIENT_INFO ioctl() */
  1011. static void get_client_info(struct snd_seq_client *cptr,
  1012. struct snd_seq_client_info *info)
  1013. {
  1014. info->client = cptr->number;
  1015. /* fill the info fields */
  1016. info->type = cptr->type;
  1017. strcpy(info->name, cptr->name);
  1018. info->filter = cptr->filter;
  1019. info->event_lost = cptr->event_lost;
  1020. memcpy(info->event_filter, cptr->event_filter, 32);
  1021. info->num_ports = cptr->num_ports;
  1022. memset(info->reserved, 0, sizeof(info->reserved));
  1023. }
  1024. static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
  1025. void __user *arg)
  1026. {
  1027. struct snd_seq_client *cptr;
  1028. struct snd_seq_client_info client_info;
  1029. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1030. return -EFAULT;
  1031. /* requested client number */
  1032. cptr = snd_seq_client_use_ptr(client_info.client);
  1033. if (cptr == NULL)
  1034. return -ENOENT; /* don't change !!! */
  1035. get_client_info(cptr, &client_info);
  1036. snd_seq_client_unlock(cptr);
  1037. if (copy_to_user(arg, &client_info, sizeof(client_info)))
  1038. return -EFAULT;
  1039. return 0;
  1040. }
  1041. /* CLIENT_INFO ioctl() */
  1042. static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
  1043. void __user *arg)
  1044. {
  1045. struct snd_seq_client_info client_info;
  1046. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1047. return -EFAULT;
  1048. /* it is not allowed to set the info fields for an another client */
  1049. if (client->number != client_info.client)
  1050. return -EPERM;
  1051. /* also client type must be set now */
  1052. if (client->type != client_info.type)
  1053. return -EINVAL;
  1054. /* fill the info fields */
  1055. if (client_info.name[0])
  1056. strlcpy(client->name, client_info.name, sizeof(client->name));
  1057. client->filter = client_info.filter;
  1058. client->event_lost = client_info.event_lost;
  1059. memcpy(client->event_filter, client_info.event_filter, 32);
  1060. return 0;
  1061. }
  1062. /*
  1063. * CREATE PORT ioctl()
  1064. */
  1065. static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
  1066. void __user *arg)
  1067. {
  1068. struct snd_seq_client_port *port;
  1069. struct snd_seq_port_info info;
  1070. struct snd_seq_port_callback *callback;
  1071. if (copy_from_user(&info, arg, sizeof(info)))
  1072. return -EFAULT;
  1073. /* it is not allowed to create the port for an another client */
  1074. if (info.addr.client != client->number)
  1075. return -EPERM;
  1076. port = snd_seq_create_port(client, (info.flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info.addr.port : -1);
  1077. if (port == NULL)
  1078. return -ENOMEM;
  1079. if (client->type == USER_CLIENT && info.kernel) {
  1080. snd_seq_delete_port(client, port->addr.port);
  1081. return -EINVAL;
  1082. }
  1083. if (client->type == KERNEL_CLIENT) {
  1084. if ((callback = info.kernel) != NULL) {
  1085. if (callback->owner)
  1086. port->owner = callback->owner;
  1087. port->private_data = callback->private_data;
  1088. port->private_free = callback->private_free;
  1089. port->callback_all = callback->callback_all;
  1090. port->event_input = callback->event_input;
  1091. port->c_src.open = callback->subscribe;
  1092. port->c_src.close = callback->unsubscribe;
  1093. port->c_dest.open = callback->use;
  1094. port->c_dest.close = callback->unuse;
  1095. }
  1096. }
  1097. info.addr = port->addr;
  1098. snd_seq_set_port_info(port, &info);
  1099. snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
  1100. if (copy_to_user(arg, &info, sizeof(info)))
  1101. return -EFAULT;
  1102. return 0;
  1103. }
  1104. /*
  1105. * DELETE PORT ioctl()
  1106. */
  1107. static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
  1108. void __user *arg)
  1109. {
  1110. struct snd_seq_port_info info;
  1111. int err;
  1112. /* set passed parameters */
  1113. if (copy_from_user(&info, arg, sizeof(info)))
  1114. return -EFAULT;
  1115. /* it is not allowed to remove the port for an another client */
  1116. if (info.addr.client != client->number)
  1117. return -EPERM;
  1118. err = snd_seq_delete_port(client, info.addr.port);
  1119. if (err >= 0)
  1120. snd_seq_system_client_ev_port_exit(client->number, info.addr.port);
  1121. return err;
  1122. }
  1123. /*
  1124. * GET_PORT_INFO ioctl() (on any client)
  1125. */
  1126. static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
  1127. void __user *arg)
  1128. {
  1129. struct snd_seq_client *cptr;
  1130. struct snd_seq_client_port *port;
  1131. struct snd_seq_port_info info;
  1132. if (copy_from_user(&info, arg, sizeof(info)))
  1133. return -EFAULT;
  1134. cptr = snd_seq_client_use_ptr(info.addr.client);
  1135. if (cptr == NULL)
  1136. return -ENXIO;
  1137. port = snd_seq_port_use_ptr(cptr, info.addr.port);
  1138. if (port == NULL) {
  1139. snd_seq_client_unlock(cptr);
  1140. return -ENOENT; /* don't change */
  1141. }
  1142. /* get port info */
  1143. snd_seq_get_port_info(port, &info);
  1144. snd_seq_port_unlock(port);
  1145. snd_seq_client_unlock(cptr);
  1146. if (copy_to_user(arg, &info, sizeof(info)))
  1147. return -EFAULT;
  1148. return 0;
  1149. }
  1150. /*
  1151. * SET_PORT_INFO ioctl() (only ports on this/own client)
  1152. */
  1153. static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client,
  1154. void __user *arg)
  1155. {
  1156. struct snd_seq_client_port *port;
  1157. struct snd_seq_port_info info;
  1158. if (copy_from_user(&info, arg, sizeof(info)))
  1159. return -EFAULT;
  1160. if (info.addr.client != client->number) /* only set our own ports ! */
  1161. return -EPERM;
  1162. port = snd_seq_port_use_ptr(client, info.addr.port);
  1163. if (port) {
  1164. snd_seq_set_port_info(port, &info);
  1165. snd_seq_port_unlock(port);
  1166. }
  1167. return 0;
  1168. }
  1169. /*
  1170. * port subscription (connection)
  1171. */
  1172. #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
  1173. #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
  1174. static int check_subscription_permission(struct snd_seq_client *client,
  1175. struct snd_seq_client_port *sport,
  1176. struct snd_seq_client_port *dport,
  1177. struct snd_seq_port_subscribe *subs)
  1178. {
  1179. if (client->number != subs->sender.client &&
  1180. client->number != subs->dest.client) {
  1181. /* connection by third client - check export permission */
  1182. if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1183. return -EPERM;
  1184. if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1185. return -EPERM;
  1186. }
  1187. /* check read permission */
  1188. /* if sender or receiver is the subscribing client itself,
  1189. * no permission check is necessary
  1190. */
  1191. if (client->number != subs->sender.client) {
  1192. if (! check_port_perm(sport, PERM_RD))
  1193. return -EPERM;
  1194. }
  1195. /* check write permission */
  1196. if (client->number != subs->dest.client) {
  1197. if (! check_port_perm(dport, PERM_WR))
  1198. return -EPERM;
  1199. }
  1200. return 0;
  1201. }
  1202. /*
  1203. * send an subscription notify event to user client:
  1204. * client must be user client.
  1205. */
  1206. int snd_seq_client_notify_subscription(int client, int port,
  1207. struct snd_seq_port_subscribe *info,
  1208. int evtype)
  1209. {
  1210. struct snd_seq_event event;
  1211. memset(&event, 0, sizeof(event));
  1212. event.type = evtype;
  1213. event.data.connect.dest = info->dest;
  1214. event.data.connect.sender = info->sender;
  1215. return snd_seq_system_notify(client, port, &event); /* non-atomic */
  1216. }
  1217. /*
  1218. * add to port's subscription list IOCTL interface
  1219. */
  1220. static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
  1221. void __user *arg)
  1222. {
  1223. int result = -EINVAL;
  1224. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1225. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1226. struct snd_seq_port_subscribe subs;
  1227. if (copy_from_user(&subs, arg, sizeof(subs)))
  1228. return -EFAULT;
  1229. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1230. goto __end;
  1231. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1232. goto __end;
  1233. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1234. goto __end;
  1235. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1236. goto __end;
  1237. result = check_subscription_permission(client, sport, dport, &subs);
  1238. if (result < 0)
  1239. goto __end;
  1240. /* connect them */
  1241. result = snd_seq_port_connect(client, sender, sport, receiver, dport, &subs);
  1242. if (! result) /* broadcast announce */
  1243. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1244. &subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
  1245. __end:
  1246. if (sport)
  1247. snd_seq_port_unlock(sport);
  1248. if (dport)
  1249. snd_seq_port_unlock(dport);
  1250. if (sender)
  1251. snd_seq_client_unlock(sender);
  1252. if (receiver)
  1253. snd_seq_client_unlock(receiver);
  1254. return result;
  1255. }
  1256. /*
  1257. * remove from port's subscription list
  1258. */
  1259. static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
  1260. void __user *arg)
  1261. {
  1262. int result = -ENXIO;
  1263. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1264. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1265. struct snd_seq_port_subscribe subs;
  1266. if (copy_from_user(&subs, arg, sizeof(subs)))
  1267. return -EFAULT;
  1268. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1269. goto __end;
  1270. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1271. goto __end;
  1272. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1273. goto __end;
  1274. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1275. goto __end;
  1276. result = check_subscription_permission(client, sport, dport, &subs);
  1277. if (result < 0)
  1278. goto __end;
  1279. result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, &subs);
  1280. if (! result) /* broadcast announce */
  1281. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1282. &subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
  1283. __end:
  1284. if (sport)
  1285. snd_seq_port_unlock(sport);
  1286. if (dport)
  1287. snd_seq_port_unlock(dport);
  1288. if (sender)
  1289. snd_seq_client_unlock(sender);
  1290. if (receiver)
  1291. snd_seq_client_unlock(receiver);
  1292. return result;
  1293. }
  1294. /* CREATE_QUEUE ioctl() */
  1295. static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
  1296. void __user *arg)
  1297. {
  1298. struct snd_seq_queue_info info;
  1299. int result;
  1300. struct snd_seq_queue *q;
  1301. if (copy_from_user(&info, arg, sizeof(info)))
  1302. return -EFAULT;
  1303. result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
  1304. if (result < 0)
  1305. return result;
  1306. q = queueptr(result);
  1307. if (q == NULL)
  1308. return -EINVAL;
  1309. info.queue = q->queue;
  1310. info.locked = q->locked;
  1311. info.owner = q->owner;
  1312. /* set queue name */
  1313. if (! info.name[0])
  1314. snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
  1315. strlcpy(q->name, info.name, sizeof(q->name));
  1316. queuefree(q);
  1317. if (copy_to_user(arg, &info, sizeof(info)))
  1318. return -EFAULT;
  1319. return 0;
  1320. }
  1321. /* DELETE_QUEUE ioctl() */
  1322. static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client,
  1323. void __user *arg)
  1324. {
  1325. struct snd_seq_queue_info info;
  1326. if (copy_from_user(&info, arg, sizeof(info)))
  1327. return -EFAULT;
  1328. return snd_seq_queue_delete(client->number, info.queue);
  1329. }
  1330. /* GET_QUEUE_INFO ioctl() */
  1331. static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
  1332. void __user *arg)
  1333. {
  1334. struct snd_seq_queue_info info;
  1335. struct snd_seq_queue *q;
  1336. if (copy_from_user(&info, arg, sizeof(info)))
  1337. return -EFAULT;
  1338. q = queueptr(info.queue);
  1339. if (q == NULL)
  1340. return -EINVAL;
  1341. memset(&info, 0, sizeof(info));
  1342. info.queue = q->queue;
  1343. info.owner = q->owner;
  1344. info.locked = q->locked;
  1345. strlcpy(info.name, q->name, sizeof(info.name));
  1346. queuefree(q);
  1347. if (copy_to_user(arg, &info, sizeof(info)))
  1348. return -EFAULT;
  1349. return 0;
  1350. }
  1351. /* SET_QUEUE_INFO ioctl() */
  1352. static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
  1353. void __user *arg)
  1354. {
  1355. struct snd_seq_queue_info info;
  1356. struct snd_seq_queue *q;
  1357. if (copy_from_user(&info, arg, sizeof(info)))
  1358. return -EFAULT;
  1359. if (info.owner != client->number)
  1360. return -EINVAL;
  1361. /* change owner/locked permission */
  1362. if (snd_seq_queue_check_access(info.queue, client->number)) {
  1363. if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0)
  1364. return -EPERM;
  1365. if (info.locked)
  1366. snd_seq_queue_use(info.queue, client->number, 1);
  1367. } else {
  1368. return -EPERM;
  1369. }
  1370. q = queueptr(info.queue);
  1371. if (! q)
  1372. return -EINVAL;
  1373. if (q->owner != client->number) {
  1374. queuefree(q);
  1375. return -EPERM;
  1376. }
  1377. strlcpy(q->name, info.name, sizeof(q->name));
  1378. queuefree(q);
  1379. return 0;
  1380. }
  1381. /* GET_NAMED_QUEUE ioctl() */
  1382. static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void __user *arg)
  1383. {
  1384. struct snd_seq_queue_info info;
  1385. struct snd_seq_queue *q;
  1386. if (copy_from_user(&info, arg, sizeof(info)))
  1387. return -EFAULT;
  1388. q = snd_seq_queue_find_name(info.name);
  1389. if (q == NULL)
  1390. return -EINVAL;
  1391. info.queue = q->queue;
  1392. info.owner = q->owner;
  1393. info.locked = q->locked;
  1394. queuefree(q);
  1395. if (copy_to_user(arg, &info, sizeof(info)))
  1396. return -EFAULT;
  1397. return 0;
  1398. }
  1399. /* GET_QUEUE_STATUS ioctl() */
  1400. static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
  1401. void __user *arg)
  1402. {
  1403. struct snd_seq_queue_status status;
  1404. struct snd_seq_queue *queue;
  1405. struct snd_seq_timer *tmr;
  1406. if (copy_from_user(&status, arg, sizeof(status)))
  1407. return -EFAULT;
  1408. queue = queueptr(status.queue);
  1409. if (queue == NULL)
  1410. return -EINVAL;
  1411. memset(&status, 0, sizeof(status));
  1412. status.queue = queue->queue;
  1413. tmr = queue->timer;
  1414. status.events = queue->tickq->cells + queue->timeq->cells;
  1415. status.time = snd_seq_timer_get_cur_time(tmr);
  1416. status.tick = snd_seq_timer_get_cur_tick(tmr);
  1417. status.running = tmr->running;
  1418. status.flags = queue->flags;
  1419. queuefree(queue);
  1420. if (copy_to_user(arg, &status, sizeof(status)))
  1421. return -EFAULT;
  1422. return 0;
  1423. }
  1424. /* GET_QUEUE_TEMPO ioctl() */
  1425. static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
  1426. void __user *arg)
  1427. {
  1428. struct snd_seq_queue_tempo tempo;
  1429. struct snd_seq_queue *queue;
  1430. struct snd_seq_timer *tmr;
  1431. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1432. return -EFAULT;
  1433. queue = queueptr(tempo.queue);
  1434. if (queue == NULL)
  1435. return -EINVAL;
  1436. memset(&tempo, 0, sizeof(tempo));
  1437. tempo.queue = queue->queue;
  1438. tmr = queue->timer;
  1439. tempo.tempo = tmr->tempo;
  1440. tempo.ppq = tmr->ppq;
  1441. tempo.skew_value = tmr->skew;
  1442. tempo.skew_base = tmr->skew_base;
  1443. queuefree(queue);
  1444. if (copy_to_user(arg, &tempo, sizeof(tempo)))
  1445. return -EFAULT;
  1446. return 0;
  1447. }
  1448. /* SET_QUEUE_TEMPO ioctl() */
  1449. int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
  1450. {
  1451. if (!snd_seq_queue_check_access(tempo->queue, client))
  1452. return -EPERM;
  1453. return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
  1454. }
  1455. static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
  1456. void __user *arg)
  1457. {
  1458. int result;
  1459. struct snd_seq_queue_tempo tempo;
  1460. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1461. return -EFAULT;
  1462. result = snd_seq_set_queue_tempo(client->number, &tempo);
  1463. return result < 0 ? result : 0;
  1464. }
  1465. /* GET_QUEUE_TIMER ioctl() */
  1466. static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
  1467. void __user *arg)
  1468. {
  1469. struct snd_seq_queue_timer timer;
  1470. struct snd_seq_queue *queue;
  1471. struct snd_seq_timer *tmr;
  1472. if (copy_from_user(&timer, arg, sizeof(timer)))
  1473. return -EFAULT;
  1474. queue = queueptr(timer.queue);
  1475. if (queue == NULL)
  1476. return -EINVAL;
  1477. if (mutex_lock_interruptible(&queue->timer_mutex)) {
  1478. queuefree(queue);
  1479. return -ERESTARTSYS;
  1480. }
  1481. tmr = queue->timer;
  1482. memset(&timer, 0, sizeof(timer));
  1483. timer.queue = queue->queue;
  1484. timer.type = tmr->type;
  1485. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1486. timer.u.alsa.id = tmr->alsa_id;
  1487. timer.u.alsa.resolution = tmr->preferred_resolution;
  1488. }
  1489. mutex_unlock(&queue->timer_mutex);
  1490. queuefree(queue);
  1491. if (copy_to_user(arg, &timer, sizeof(timer)))
  1492. return -EFAULT;
  1493. return 0;
  1494. }
  1495. /* SET_QUEUE_TIMER ioctl() */
  1496. static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
  1497. void __user *arg)
  1498. {
  1499. int result = 0;
  1500. struct snd_seq_queue_timer timer;
  1501. if (copy_from_user(&timer, arg, sizeof(timer)))
  1502. return -EFAULT;
  1503. if (timer.type != SNDRV_SEQ_TIMER_ALSA)
  1504. return -EINVAL;
  1505. if (snd_seq_queue_check_access(timer.queue, client->number)) {
  1506. struct snd_seq_queue *q;
  1507. struct snd_seq_timer *tmr;
  1508. q = queueptr(timer.queue);
  1509. if (q == NULL)
  1510. return -ENXIO;
  1511. if (mutex_lock_interruptible(&q->timer_mutex)) {
  1512. queuefree(q);
  1513. return -ERESTARTSYS;
  1514. }
  1515. tmr = q->timer;
  1516. snd_seq_queue_timer_close(timer.queue);
  1517. tmr->type = timer.type;
  1518. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1519. tmr->alsa_id = timer.u.alsa.id;
  1520. tmr->preferred_resolution = timer.u.alsa.resolution;
  1521. }
  1522. result = snd_seq_queue_timer_open(timer.queue);
  1523. mutex_unlock(&q->timer_mutex);
  1524. queuefree(q);
  1525. } else {
  1526. return -EPERM;
  1527. }
  1528. return result;
  1529. }
  1530. /* GET_QUEUE_CLIENT ioctl() */
  1531. static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
  1532. void __user *arg)
  1533. {
  1534. struct snd_seq_queue_client info;
  1535. int used;
  1536. if (copy_from_user(&info, arg, sizeof(info)))
  1537. return -EFAULT;
  1538. used = snd_seq_queue_is_used(info.queue, client->number);
  1539. if (used < 0)
  1540. return -EINVAL;
  1541. info.used = used;
  1542. info.client = client->number;
  1543. if (copy_to_user(arg, &info, sizeof(info)))
  1544. return -EFAULT;
  1545. return 0;
  1546. }
  1547. /* SET_QUEUE_CLIENT ioctl() */
  1548. static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
  1549. void __user *arg)
  1550. {
  1551. int err;
  1552. struct snd_seq_queue_client info;
  1553. if (copy_from_user(&info, arg, sizeof(info)))
  1554. return -EFAULT;
  1555. if (info.used >= 0) {
  1556. err = snd_seq_queue_use(info.queue, client->number, info.used);
  1557. if (err < 0)
  1558. return err;
  1559. }
  1560. return snd_seq_ioctl_get_queue_client(client, arg);
  1561. }
  1562. /* GET_CLIENT_POOL ioctl() */
  1563. static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
  1564. void __user *arg)
  1565. {
  1566. struct snd_seq_client_pool info;
  1567. struct snd_seq_client *cptr;
  1568. if (copy_from_user(&info, arg, sizeof(info)))
  1569. return -EFAULT;
  1570. cptr = snd_seq_client_use_ptr(info.client);
  1571. if (cptr == NULL)
  1572. return -ENOENT;
  1573. memset(&info, 0, sizeof(info));
  1574. info.output_pool = cptr->pool->size;
  1575. info.output_room = cptr->pool->room;
  1576. info.output_free = info.output_pool;
  1577. info.output_free = snd_seq_unused_cells(cptr->pool);
  1578. if (cptr->type == USER_CLIENT) {
  1579. info.input_pool = cptr->data.user.fifo_pool_size;
  1580. info.input_free = info.input_pool;
  1581. if (cptr->data.user.fifo)
  1582. info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
  1583. } else {
  1584. info.input_pool = 0;
  1585. info.input_free = 0;
  1586. }
  1587. snd_seq_client_unlock(cptr);
  1588. if (copy_to_user(arg, &info, sizeof(info)))
  1589. return -EFAULT;
  1590. return 0;
  1591. }
  1592. /* SET_CLIENT_POOL ioctl() */
  1593. static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
  1594. void __user *arg)
  1595. {
  1596. struct snd_seq_client_pool info;
  1597. int rc;
  1598. if (copy_from_user(&info, arg, sizeof(info)))
  1599. return -EFAULT;
  1600. if (client->number != info.client)
  1601. return -EINVAL; /* can't change other clients */
  1602. if (info.output_pool >= 1 && info.output_pool <= SNDRV_SEQ_MAX_EVENTS &&
  1603. (! snd_seq_write_pool_allocated(client) ||
  1604. info.output_pool != client->pool->size)) {
  1605. if (snd_seq_write_pool_allocated(client)) {
  1606. /* remove all existing cells */
  1607. snd_seq_queue_client_leave_cells(client->number);
  1608. snd_seq_pool_done(client->pool);
  1609. }
  1610. client->pool->size = info.output_pool;
  1611. rc = snd_seq_pool_init(client->pool);
  1612. if (rc < 0)
  1613. return rc;
  1614. }
  1615. if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
  1616. info.input_pool >= 1 &&
  1617. info.input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
  1618. info.input_pool != client->data.user.fifo_pool_size) {
  1619. /* change pool size */
  1620. rc = snd_seq_fifo_resize(client->data.user.fifo, info.input_pool);
  1621. if (rc < 0)
  1622. return rc;
  1623. client->data.user.fifo_pool_size = info.input_pool;
  1624. }
  1625. if (info.output_room >= 1 &&
  1626. info.output_room <= client->pool->size) {
  1627. client->pool->room = info.output_room;
  1628. }
  1629. return snd_seq_ioctl_get_client_pool(client, arg);
  1630. }
  1631. /* REMOVE_EVENTS ioctl() */
  1632. static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
  1633. void __user *arg)
  1634. {
  1635. struct snd_seq_remove_events info;
  1636. if (copy_from_user(&info, arg, sizeof(info)))
  1637. return -EFAULT;
  1638. /*
  1639. * Input mostly not implemented XXX.
  1640. */
  1641. if (info.remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
  1642. /*
  1643. * No restrictions so for a user client we can clear
  1644. * the whole fifo
  1645. */
  1646. if (client->type == USER_CLIENT)
  1647. snd_seq_fifo_clear(client->data.user.fifo);
  1648. }
  1649. if (info.remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
  1650. snd_seq_queue_remove_cells(client->number, &info);
  1651. return 0;
  1652. }
  1653. /*
  1654. * get subscription info
  1655. */
  1656. static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
  1657. void __user *arg)
  1658. {
  1659. int result;
  1660. struct snd_seq_client *sender = NULL;
  1661. struct snd_seq_client_port *sport = NULL;
  1662. struct snd_seq_port_subscribe subs;
  1663. struct snd_seq_subscribers *p;
  1664. if (copy_from_user(&subs, arg, sizeof(subs)))
  1665. return -EFAULT;
  1666. result = -EINVAL;
  1667. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1668. goto __end;
  1669. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1670. goto __end;
  1671. p = snd_seq_port_get_subscription(&sport->c_src, &subs.dest);
  1672. if (p) {
  1673. result = 0;
  1674. subs = p->info;
  1675. } else
  1676. result = -ENOENT;
  1677. __end:
  1678. if (sport)
  1679. snd_seq_port_unlock(sport);
  1680. if (sender)
  1681. snd_seq_client_unlock(sender);
  1682. if (result >= 0) {
  1683. if (copy_to_user(arg, &subs, sizeof(subs)))
  1684. return -EFAULT;
  1685. }
  1686. return result;
  1687. }
  1688. /*
  1689. * get subscription info - check only its presence
  1690. */
  1691. static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
  1692. void __user *arg)
  1693. {
  1694. int result = -ENXIO;
  1695. struct snd_seq_client *cptr = NULL;
  1696. struct snd_seq_client_port *port = NULL;
  1697. struct snd_seq_query_subs subs;
  1698. struct snd_seq_port_subs_info *group;
  1699. struct list_head *p;
  1700. int i;
  1701. if (copy_from_user(&subs, arg, sizeof(subs)))
  1702. return -EFAULT;
  1703. if ((cptr = snd_seq_client_use_ptr(subs.root.client)) == NULL)
  1704. goto __end;
  1705. if ((port = snd_seq_port_use_ptr(cptr, subs.root.port)) == NULL)
  1706. goto __end;
  1707. switch (subs.type) {
  1708. case SNDRV_SEQ_QUERY_SUBS_READ:
  1709. group = &port->c_src;
  1710. break;
  1711. case SNDRV_SEQ_QUERY_SUBS_WRITE:
  1712. group = &port->c_dest;
  1713. break;
  1714. default:
  1715. goto __end;
  1716. }
  1717. down_read(&group->list_mutex);
  1718. /* search for the subscriber */
  1719. subs.num_subs = group->count;
  1720. i = 0;
  1721. result = -ENOENT;
  1722. list_for_each(p, &group->list_head) {
  1723. if (i++ == subs.index) {
  1724. /* found! */
  1725. struct snd_seq_subscribers *s;
  1726. if (subs.type == SNDRV_SEQ_QUERY_SUBS_READ) {
  1727. s = list_entry(p, struct snd_seq_subscribers, src_list);
  1728. subs.addr = s->info.dest;
  1729. } else {
  1730. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  1731. subs.addr = s->info.sender;
  1732. }
  1733. subs.flags = s->info.flags;
  1734. subs.queue = s->info.queue;
  1735. result = 0;
  1736. break;
  1737. }
  1738. }
  1739. up_read(&group->list_mutex);
  1740. __end:
  1741. if (port)
  1742. snd_seq_port_unlock(port);
  1743. if (cptr)
  1744. snd_seq_client_unlock(cptr);
  1745. if (result >= 0) {
  1746. if (copy_to_user(arg, &subs, sizeof(subs)))
  1747. return -EFAULT;
  1748. }
  1749. return result;
  1750. }
  1751. /*
  1752. * query next client
  1753. */
  1754. static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
  1755. void __user *arg)
  1756. {
  1757. struct snd_seq_client *cptr = NULL;
  1758. struct snd_seq_client_info info;
  1759. if (copy_from_user(&info, arg, sizeof(info)))
  1760. return -EFAULT;
  1761. /* search for next client */
  1762. info.client++;
  1763. if (info.client < 0)
  1764. info.client = 0;
  1765. for (; info.client < SNDRV_SEQ_MAX_CLIENTS; info.client++) {
  1766. cptr = snd_seq_client_use_ptr(info.client);
  1767. if (cptr)
  1768. break; /* found */
  1769. }
  1770. if (cptr == NULL)
  1771. return -ENOENT;
  1772. get_client_info(cptr, &info);
  1773. snd_seq_client_unlock(cptr);
  1774. if (copy_to_user(arg, &info, sizeof(info)))
  1775. return -EFAULT;
  1776. return 0;
  1777. }
  1778. /*
  1779. * query next port
  1780. */
  1781. static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
  1782. void __user *arg)
  1783. {
  1784. struct snd_seq_client *cptr;
  1785. struct snd_seq_client_port *port = NULL;
  1786. struct snd_seq_port_info info;
  1787. if (copy_from_user(&info, arg, sizeof(info)))
  1788. return -EFAULT;
  1789. cptr = snd_seq_client_use_ptr(info.addr.client);
  1790. if (cptr == NULL)
  1791. return -ENXIO;
  1792. /* search for next port */
  1793. info.addr.port++;
  1794. port = snd_seq_port_query_nearest(cptr, &info);
  1795. if (port == NULL) {
  1796. snd_seq_client_unlock(cptr);
  1797. return -ENOENT;
  1798. }
  1799. /* get port info */
  1800. info.addr = port->addr;
  1801. snd_seq_get_port_info(port, &info);
  1802. snd_seq_port_unlock(port);
  1803. snd_seq_client_unlock(cptr);
  1804. if (copy_to_user(arg, &info, sizeof(info)))
  1805. return -EFAULT;
  1806. return 0;
  1807. }
  1808. /* -------------------------------------------------------- */
  1809. static struct seq_ioctl_table {
  1810. unsigned int cmd;
  1811. int (*func)(struct snd_seq_client *client, void __user * arg);
  1812. } ioctl_tables[] = {
  1813. { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
  1814. { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
  1815. { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
  1816. { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
  1817. { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
  1818. { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
  1819. { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
  1820. { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
  1821. { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
  1822. { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
  1823. { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
  1824. { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
  1825. { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
  1826. { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
  1827. { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
  1828. { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
  1829. { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
  1830. { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
  1831. { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
  1832. { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
  1833. { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
  1834. { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
  1835. { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
  1836. { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
  1837. { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
  1838. { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
  1839. { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
  1840. { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
  1841. { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
  1842. { 0, NULL },
  1843. };
  1844. static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
  1845. void __user *arg)
  1846. {
  1847. struct seq_ioctl_table *p;
  1848. switch (cmd) {
  1849. case SNDRV_SEQ_IOCTL_PVERSION:
  1850. /* return sequencer version number */
  1851. return put_user(SNDRV_SEQ_VERSION, (int __user *)arg) ? -EFAULT : 0;
  1852. case SNDRV_SEQ_IOCTL_CLIENT_ID:
  1853. /* return the id of this client */
  1854. return put_user(client->number, (int __user *)arg) ? -EFAULT : 0;
  1855. }
  1856. if (! arg)
  1857. return -EFAULT;
  1858. for (p = ioctl_tables; p->cmd; p++) {
  1859. if (p->cmd == cmd)
  1860. return p->func(client, arg);
  1861. }
  1862. snd_printd("seq unknown ioctl() 0x%x (type='%c', number=0x%2x)\n",
  1863. cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
  1864. return -ENOTTY;
  1865. }
  1866. static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1867. {
  1868. struct snd_seq_client *client = file->private_data;
  1869. snd_assert(client != NULL, return -ENXIO);
  1870. return snd_seq_do_ioctl(client, cmd, (void __user *) arg);
  1871. }
  1872. #ifdef CONFIG_COMPAT
  1873. #include "seq_compat.c"
  1874. #else
  1875. #define snd_seq_ioctl_compat NULL
  1876. #endif
  1877. /* -------------------------------------------------------- */
  1878. /* exported to kernel modules */
  1879. int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
  1880. const char *name_fmt, ...)
  1881. {
  1882. struct snd_seq_client *client;
  1883. va_list args;
  1884. snd_assert(! in_interrupt(), return -EBUSY);
  1885. if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
  1886. return -EINVAL;
  1887. if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
  1888. return -EINVAL;
  1889. if (mutex_lock_interruptible(&register_mutex))
  1890. return -ERESTARTSYS;
  1891. if (card) {
  1892. client_index += SNDRV_SEQ_GLOBAL_CLIENTS
  1893. + card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
  1894. if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
  1895. client_index = -1;
  1896. }
  1897. /* empty write queue as default */
  1898. client = seq_create_client1(client_index, 0);
  1899. if (client == NULL) {
  1900. mutex_unlock(&register_mutex);
  1901. return -EBUSY; /* failure code */
  1902. }
  1903. usage_alloc(&client_usage, 1);
  1904. client->accept_input = 1;
  1905. client->accept_output = 1;
  1906. va_start(args, name_fmt);
  1907. vsnprintf(client->name, sizeof(client->name), name_fmt, args);
  1908. va_end(args);
  1909. client->type = KERNEL_CLIENT;
  1910. mutex_unlock(&register_mutex);
  1911. /* make others aware this new client */
  1912. snd_seq_system_client_ev_client_start(client->number);
  1913. /* return client number to caller */
  1914. return client->number;
  1915. }
  1916. /* exported to kernel modules */
  1917. int snd_seq_delete_kernel_client(int client)
  1918. {
  1919. struct snd_seq_client *ptr;
  1920. snd_assert(! in_interrupt(), return -EBUSY);
  1921. ptr = clientptr(client);
  1922. if (ptr == NULL)
  1923. return -EINVAL;
  1924. seq_free_client(ptr);
  1925. kfree(ptr);
  1926. return 0;
  1927. }
  1928. /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
  1929. * and snd_seq_kernel_client_enqueue_blocking
  1930. */
  1931. static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
  1932. struct file *file, int blocking,
  1933. int atomic, int hop)
  1934. {
  1935. struct snd_seq_client *cptr;
  1936. int result;
  1937. snd_assert(ev != NULL, return -EINVAL);
  1938. if (ev->type == SNDRV_SEQ_EVENT_NONE)
  1939. return 0; /* ignore this */
  1940. if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
  1941. return -EINVAL; /* quoted events can't be enqueued */
  1942. /* fill in client number */
  1943. ev->source.client = client;
  1944. if (check_event_type_and_length(ev))
  1945. return -EINVAL;
  1946. cptr = snd_seq_client_use_ptr(client);
  1947. if (cptr == NULL)
  1948. return -EINVAL;
  1949. if (! cptr->accept_output)
  1950. result = -EPERM;
  1951. else /* send it */
  1952. result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
  1953. snd_seq_client_unlock(cptr);
  1954. return result;
  1955. }
  1956. /*
  1957. * exported, called by kernel clients to enqueue events (w/o blocking)
  1958. *
  1959. * RETURN VALUE: zero if succeed, negative if error
  1960. */
  1961. int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
  1962. int atomic, int hop)
  1963. {
  1964. return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
  1965. }
  1966. /*
  1967. * exported, called by kernel clients to enqueue events (with blocking)
  1968. *
  1969. * RETURN VALUE: zero if succeed, negative if error
  1970. */
  1971. int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
  1972. struct file *file,
  1973. int atomic, int hop)
  1974. {
  1975. return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
  1976. }
  1977. /*
  1978. * exported, called by kernel clients to dispatch events directly to other
  1979. * clients, bypassing the queues. Event time-stamp will be updated.
  1980. *
  1981. * RETURN VALUE: negative = delivery failed,
  1982. * zero, or positive: the number of delivered events
  1983. */
  1984. int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
  1985. int atomic, int hop)
  1986. {
  1987. struct snd_seq_client *cptr;
  1988. int result;
  1989. snd_assert(ev != NULL, return -EINVAL);
  1990. /* fill in client number */
  1991. ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
  1992. ev->source.client = client;
  1993. if (check_event_type_and_length(ev))
  1994. return -EINVAL;
  1995. cptr = snd_seq_client_use_ptr(client);
  1996. if (cptr == NULL)
  1997. return -EINVAL;
  1998. if (!cptr->accept_output)
  1999. result = -EPERM;
  2000. else
  2001. result = snd_seq_deliver_event(cptr, ev, atomic, hop);
  2002. snd_seq_client_unlock(cptr);
  2003. return result;
  2004. }
  2005. /*
  2006. * exported, called by kernel clients to perform same functions as with
  2007. * userland ioctl()
  2008. */
  2009. int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
  2010. {
  2011. struct snd_seq_client *client;
  2012. mm_segment_t fs;
  2013. int result;
  2014. client = clientptr(clientid);
  2015. if (client == NULL)
  2016. return -ENXIO;
  2017. fs = snd_enter_user();
  2018. result = snd_seq_do_ioctl(client, cmd, (void __user *)arg);
  2019. snd_leave_user(fs);
  2020. return result;
  2021. }
  2022. /* exported (for OSS emulator) */
  2023. int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
  2024. {
  2025. struct snd_seq_client *client;
  2026. client = clientptr(clientid);
  2027. if (client == NULL)
  2028. return -ENXIO;
  2029. if (! snd_seq_write_pool_allocated(client))
  2030. return 1;
  2031. if (snd_seq_pool_poll_wait(client->pool, file, wait))
  2032. return 1;
  2033. return 0;
  2034. }
  2035. /*---------------------------------------------------------------------------*/
  2036. #ifdef CONFIG_PROC_FS
  2037. /*
  2038. * /proc interface
  2039. */
  2040. static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
  2041. struct snd_seq_port_subs_info *group,
  2042. int is_src, char *msg)
  2043. {
  2044. struct list_head *p;
  2045. struct snd_seq_subscribers *s;
  2046. int count = 0;
  2047. down_read(&group->list_mutex);
  2048. if (list_empty(&group->list_head)) {
  2049. up_read(&group->list_mutex);
  2050. return;
  2051. }
  2052. snd_iprintf(buffer, msg);
  2053. list_for_each(p, &group->list_head) {
  2054. if (is_src)
  2055. s = list_entry(p, struct snd_seq_subscribers, src_list);
  2056. else
  2057. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  2058. if (count++)
  2059. snd_iprintf(buffer, ", ");
  2060. snd_iprintf(buffer, "%d:%d",
  2061. is_src ? s->info.dest.client : s->info.sender.client,
  2062. is_src ? s->info.dest.port : s->info.sender.port);
  2063. if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  2064. snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
  2065. if (group->exclusive)
  2066. snd_iprintf(buffer, "[ex]");
  2067. }
  2068. up_read(&group->list_mutex);
  2069. snd_iprintf(buffer, "\n");
  2070. }
  2071. #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
  2072. #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
  2073. #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
  2074. #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
  2075. static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
  2076. struct snd_seq_client *client)
  2077. {
  2078. struct list_head *l;
  2079. mutex_lock(&client->ports_mutex);
  2080. list_for_each(l, &client->ports_list_head) {
  2081. struct snd_seq_client_port *p = list_entry(l, struct snd_seq_client_port, list);
  2082. snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n",
  2083. p->addr.port, p->name,
  2084. FLAG_PERM_RD(p->capability),
  2085. FLAG_PERM_WR(p->capability),
  2086. FLAG_PERM_EX(p->capability),
  2087. FLAG_PERM_DUPLEX(p->capability));
  2088. snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: ");
  2089. snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: ");
  2090. }
  2091. mutex_unlock(&client->ports_mutex);
  2092. }
  2093. void snd_seq_info_pool(struct snd_info_buffer *buffer,
  2094. struct snd_seq_pool *pool, char *space);
  2095. /* exported to seq_info.c */
  2096. void snd_seq_info_clients_read(struct snd_info_entry *entry,
  2097. struct snd_info_buffer *buffer)
  2098. {
  2099. int c;
  2100. struct snd_seq_client *client;
  2101. snd_iprintf(buffer, "Client info\n");
  2102. snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
  2103. snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak);
  2104. snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
  2105. snd_iprintf(buffer, "\n");
  2106. /* list the client table */
  2107. for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
  2108. client = snd_seq_client_use_ptr(c);
  2109. if (client == NULL)
  2110. continue;
  2111. if (client->type == NO_CLIENT) {
  2112. snd_seq_client_unlock(client);
  2113. continue;
  2114. }
  2115. snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
  2116. c, client->name,
  2117. client->type == USER_CLIENT ? "User" : "Kernel");
  2118. snd_seq_info_dump_ports(buffer, client);
  2119. if (snd_seq_write_pool_allocated(client)) {
  2120. snd_iprintf(buffer, " Output pool :\n");
  2121. snd_seq_info_pool(buffer, client->pool, " ");
  2122. }
  2123. if (client->type == USER_CLIENT && client->data.user.fifo &&
  2124. client->data.user.fifo->pool) {
  2125. snd_iprintf(buffer, " Input pool :\n");
  2126. snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
  2127. }
  2128. snd_seq_client_unlock(client);
  2129. }
  2130. }
  2131. #endif /* CONFIG_PROC_FS */
  2132. /*---------------------------------------------------------------------------*/
  2133. /*
  2134. * REGISTRATION PART
  2135. */
  2136. static struct file_operations snd_seq_f_ops =
  2137. {
  2138. .owner = THIS_MODULE,
  2139. .read = snd_seq_read,
  2140. .write = snd_seq_write,
  2141. .open = snd_seq_open,
  2142. .release = snd_seq_release,
  2143. .poll = snd_seq_poll,
  2144. .unlocked_ioctl = snd_seq_ioctl,
  2145. .compat_ioctl = snd_seq_ioctl_compat,
  2146. };
  2147. /*
  2148. * register sequencer device
  2149. */
  2150. int __init snd_sequencer_device_init(void)
  2151. {
  2152. int err;
  2153. if (mutex_lock_interruptible(&register_mutex))
  2154. return -ERESTARTSYS;
  2155. if ((err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
  2156. &snd_seq_f_ops, NULL, "seq")) < 0) {
  2157. mutex_unlock(&register_mutex);
  2158. return err;
  2159. }
  2160. mutex_unlock(&register_mutex);
  2161. return 0;
  2162. }
  2163. /*
  2164. * unregister sequencer device
  2165. */
  2166. void __exit snd_sequencer_device_done(void)
  2167. {
  2168. snd_unregister_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0);
  2169. }