seq_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * ALSA sequencer Memory Manager
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * 2000 by Takashi Iwai <tiwai@suse.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/init.h>
  23. #include <linux/slab.h>
  24. #include <linux/vmalloc.h>
  25. #include <sound/core.h>
  26. #include <sound/seq_kernel.h>
  27. #include "seq_memory.h"
  28. #include "seq_queue.h"
  29. #include "seq_info.h"
  30. #include "seq_lock.h"
  31. static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
  32. {
  33. return pool->total_elements - atomic_read(&pool->counter);
  34. }
  35. static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
  36. {
  37. return snd_seq_pool_available(pool) >= pool->room;
  38. }
  39. /*
  40. * Variable length event:
  41. * The event like sysex uses variable length type.
  42. * The external data may be stored in three different formats.
  43. * 1) kernel space
  44. * This is the normal case.
  45. * ext.data.len = length
  46. * ext.data.ptr = buffer pointer
  47. * 2) user space
  48. * When an event is generated via read(), the external data is
  49. * kept in user space until expanded.
  50. * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
  51. * ext.data.ptr = userspace pointer
  52. * 3) chained cells
  53. * When the variable length event is enqueued (in prioq or fifo),
  54. * the external data is decomposed to several cells.
  55. * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
  56. * ext.data.ptr = the additiona cell head
  57. * -> cell.next -> cell.next -> ..
  58. */
  59. /*
  60. * exported:
  61. * call dump function to expand external data.
  62. */
  63. static int get_var_len(const struct snd_seq_event *event)
  64. {
  65. if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
  66. return -EINVAL;
  67. return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  68. }
  69. int snd_seq_dump_var_event(const struct snd_seq_event *event,
  70. snd_seq_dump_func_t func, void *private_data)
  71. {
  72. int len, err;
  73. struct snd_seq_event_cell *cell;
  74. if ((len = get_var_len(event)) <= 0)
  75. return len;
  76. if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
  77. char buf[32];
  78. char __user *curptr = (char __user *)event->data.ext.ptr;
  79. while (len > 0) {
  80. int size = sizeof(buf);
  81. if (len < size)
  82. size = len;
  83. if (copy_from_user(buf, curptr, size))
  84. return -EFAULT;
  85. err = func(private_data, buf, size);
  86. if (err < 0)
  87. return err;
  88. curptr += size;
  89. len -= size;
  90. }
  91. return 0;
  92. } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) {
  93. return func(private_data, event->data.ext.ptr, len);
  94. }
  95. cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
  96. for (; len > 0 && cell; cell = cell->next) {
  97. int size = sizeof(struct snd_seq_event);
  98. if (len < size)
  99. size = len;
  100. err = func(private_data, &cell->event, size);
  101. if (err < 0)
  102. return err;
  103. len -= size;
  104. }
  105. return 0;
  106. }
  107. EXPORT_SYMBOL(snd_seq_dump_var_event);
  108. /*
  109. * exported:
  110. * expand the variable length event to linear buffer space.
  111. */
  112. static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
  113. {
  114. memcpy(*bufptr, src, size);
  115. *bufptr += size;
  116. return 0;
  117. }
  118. static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
  119. {
  120. if (copy_to_user(*bufptr, src, size))
  121. return -EFAULT;
  122. *bufptr += size;
  123. return 0;
  124. }
  125. int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
  126. int in_kernel, int size_aligned)
  127. {
  128. int len, newlen;
  129. int err;
  130. if ((len = get_var_len(event)) < 0)
  131. return len;
  132. newlen = len;
  133. if (size_aligned > 0)
  134. newlen = roundup(len, size_aligned);
  135. if (count < newlen)
  136. return -EAGAIN;
  137. if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
  138. if (! in_kernel)
  139. return -EINVAL;
  140. if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len))
  141. return -EFAULT;
  142. return newlen;
  143. }
  144. err = snd_seq_dump_var_event(event,
  145. in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
  146. (snd_seq_dump_func_t)seq_copy_in_user,
  147. &buf);
  148. return err < 0 ? err : newlen;
  149. }
  150. EXPORT_SYMBOL(snd_seq_expand_var_event);
  151. /*
  152. * release this cell, free extended data if available
  153. */
  154. static inline void free_cell(struct snd_seq_pool *pool,
  155. struct snd_seq_event_cell *cell)
  156. {
  157. cell->next = pool->free;
  158. pool->free = cell;
  159. atomic_dec(&pool->counter);
  160. }
  161. void snd_seq_cell_free(struct snd_seq_event_cell * cell)
  162. {
  163. unsigned long flags;
  164. struct snd_seq_pool *pool;
  165. snd_assert(cell != NULL, return);
  166. pool = cell->pool;
  167. snd_assert(pool != NULL, return);
  168. spin_lock_irqsave(&pool->lock, flags);
  169. free_cell(pool, cell);
  170. if (snd_seq_ev_is_variable(&cell->event)) {
  171. if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
  172. struct snd_seq_event_cell *curp, *nextptr;
  173. curp = cell->event.data.ext.ptr;
  174. for (; curp; curp = nextptr) {
  175. nextptr = curp->next;
  176. curp->next = pool->free;
  177. free_cell(pool, curp);
  178. }
  179. }
  180. }
  181. if (waitqueue_active(&pool->output_sleep)) {
  182. /* has enough space now? */
  183. if (snd_seq_output_ok(pool))
  184. wake_up(&pool->output_sleep);
  185. }
  186. spin_unlock_irqrestore(&pool->lock, flags);
  187. }
  188. /*
  189. * allocate an event cell.
  190. */
  191. static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
  192. struct snd_seq_event_cell **cellp,
  193. int nonblock, struct file *file)
  194. {
  195. struct snd_seq_event_cell *cell;
  196. unsigned long flags;
  197. int err = -EAGAIN;
  198. wait_queue_t wait;
  199. if (pool == NULL)
  200. return -EINVAL;
  201. *cellp = NULL;
  202. init_waitqueue_entry(&wait, current);
  203. spin_lock_irqsave(&pool->lock, flags);
  204. if (pool->ptr == NULL) { /* not initialized */
  205. snd_printd("seq: pool is not initialized\n");
  206. err = -EINVAL;
  207. goto __error;
  208. }
  209. while (pool->free == NULL && ! nonblock && ! pool->closing) {
  210. set_current_state(TASK_INTERRUPTIBLE);
  211. add_wait_queue(&pool->output_sleep, &wait);
  212. spin_unlock_irq(&pool->lock);
  213. schedule();
  214. spin_lock_irq(&pool->lock);
  215. remove_wait_queue(&pool->output_sleep, &wait);
  216. /* interrupted? */
  217. if (signal_pending(current)) {
  218. err = -ERESTARTSYS;
  219. goto __error;
  220. }
  221. }
  222. if (pool->closing) { /* closing.. */
  223. err = -ENOMEM;
  224. goto __error;
  225. }
  226. cell = pool->free;
  227. if (cell) {
  228. int used;
  229. pool->free = cell->next;
  230. atomic_inc(&pool->counter);
  231. used = atomic_read(&pool->counter);
  232. if (pool->max_used < used)
  233. pool->max_used = used;
  234. pool->event_alloc_success++;
  235. /* clear cell pointers */
  236. cell->next = NULL;
  237. err = 0;
  238. } else
  239. pool->event_alloc_failures++;
  240. *cellp = cell;
  241. __error:
  242. spin_unlock_irqrestore(&pool->lock, flags);
  243. return err;
  244. }
  245. /*
  246. * duplicate the event to a cell.
  247. * if the event has external data, the data is decomposed to additional
  248. * cells.
  249. */
  250. int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
  251. struct snd_seq_event_cell **cellp, int nonblock,
  252. struct file *file)
  253. {
  254. int ncells, err;
  255. unsigned int extlen;
  256. struct snd_seq_event_cell *cell;
  257. *cellp = NULL;
  258. ncells = 0;
  259. extlen = 0;
  260. if (snd_seq_ev_is_variable(event)) {
  261. extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  262. ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
  263. }
  264. if (ncells >= pool->total_elements)
  265. return -ENOMEM;
  266. err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
  267. if (err < 0)
  268. return err;
  269. /* copy the event */
  270. cell->event = *event;
  271. /* decompose */
  272. if (snd_seq_ev_is_variable(event)) {
  273. int len = extlen;
  274. int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
  275. int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
  276. struct snd_seq_event_cell *src, *tmp, *tail;
  277. char *buf;
  278. cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
  279. cell->event.data.ext.ptr = NULL;
  280. src = (struct snd_seq_event_cell *)event->data.ext.ptr;
  281. buf = (char *)event->data.ext.ptr;
  282. tail = NULL;
  283. while (ncells-- > 0) {
  284. int size = sizeof(struct snd_seq_event);
  285. if (len < size)
  286. size = len;
  287. err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
  288. if (err < 0)
  289. goto __error;
  290. if (cell->event.data.ext.ptr == NULL)
  291. cell->event.data.ext.ptr = tmp;
  292. if (tail)
  293. tail->next = tmp;
  294. tail = tmp;
  295. /* copy chunk */
  296. if (is_chained && src) {
  297. tmp->event = src->event;
  298. src = src->next;
  299. } else if (is_usrptr) {
  300. if (copy_from_user(&tmp->event, (char __user *)buf, size)) {
  301. err = -EFAULT;
  302. goto __error;
  303. }
  304. } else {
  305. memcpy(&tmp->event, buf, size);
  306. }
  307. buf += size;
  308. len -= size;
  309. }
  310. }
  311. *cellp = cell;
  312. return 0;
  313. __error:
  314. snd_seq_cell_free(cell);
  315. return err;
  316. }
  317. /* poll wait */
  318. int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
  319. poll_table *wait)
  320. {
  321. poll_wait(file, &pool->output_sleep, wait);
  322. return snd_seq_output_ok(pool);
  323. }
  324. /* allocate room specified number of events */
  325. int snd_seq_pool_init(struct snd_seq_pool *pool)
  326. {
  327. int cell;
  328. struct snd_seq_event_cell *cellptr;
  329. unsigned long flags;
  330. snd_assert(pool != NULL, return -EINVAL);
  331. if (pool->ptr) /* should be atomic? */
  332. return 0;
  333. pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
  334. if (pool->ptr == NULL) {
  335. snd_printd("seq: malloc for sequencer events failed\n");
  336. return -ENOMEM;
  337. }
  338. /* add new cells to the free cell list */
  339. spin_lock_irqsave(&pool->lock, flags);
  340. pool->free = NULL;
  341. for (cell = 0; cell < pool->size; cell++) {
  342. cellptr = pool->ptr + cell;
  343. cellptr->pool = pool;
  344. cellptr->next = pool->free;
  345. pool->free = cellptr;
  346. }
  347. pool->room = (pool->size + 1) / 2;
  348. /* init statistics */
  349. pool->max_used = 0;
  350. pool->total_elements = pool->size;
  351. spin_unlock_irqrestore(&pool->lock, flags);
  352. return 0;
  353. }
  354. /* remove events */
  355. int snd_seq_pool_done(struct snd_seq_pool *pool)
  356. {
  357. unsigned long flags;
  358. struct snd_seq_event_cell *ptr;
  359. int max_count = 5 * HZ;
  360. snd_assert(pool != NULL, return -EINVAL);
  361. /* wait for closing all threads */
  362. spin_lock_irqsave(&pool->lock, flags);
  363. pool->closing = 1;
  364. spin_unlock_irqrestore(&pool->lock, flags);
  365. if (waitqueue_active(&pool->output_sleep))
  366. wake_up(&pool->output_sleep);
  367. while (atomic_read(&pool->counter) > 0) {
  368. if (max_count == 0) {
  369. snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
  370. break;
  371. }
  372. schedule_timeout_uninterruptible(1);
  373. max_count--;
  374. }
  375. /* release all resources */
  376. spin_lock_irqsave(&pool->lock, flags);
  377. ptr = pool->ptr;
  378. pool->ptr = NULL;
  379. pool->free = NULL;
  380. pool->total_elements = 0;
  381. spin_unlock_irqrestore(&pool->lock, flags);
  382. vfree(ptr);
  383. spin_lock_irqsave(&pool->lock, flags);
  384. pool->closing = 0;
  385. spin_unlock_irqrestore(&pool->lock, flags);
  386. return 0;
  387. }
  388. /* init new memory pool */
  389. struct snd_seq_pool *snd_seq_pool_new(int poolsize)
  390. {
  391. struct snd_seq_pool *pool;
  392. /* create pool block */
  393. pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  394. if (pool == NULL) {
  395. snd_printd("seq: malloc failed for pool\n");
  396. return NULL;
  397. }
  398. spin_lock_init(&pool->lock);
  399. pool->ptr = NULL;
  400. pool->free = NULL;
  401. pool->total_elements = 0;
  402. atomic_set(&pool->counter, 0);
  403. pool->closing = 0;
  404. init_waitqueue_head(&pool->output_sleep);
  405. pool->size = poolsize;
  406. /* init statistics */
  407. pool->max_used = 0;
  408. return pool;
  409. }
  410. /* remove memory pool */
  411. int snd_seq_pool_delete(struct snd_seq_pool **ppool)
  412. {
  413. struct snd_seq_pool *pool = *ppool;
  414. *ppool = NULL;
  415. if (pool == NULL)
  416. return 0;
  417. snd_seq_pool_done(pool);
  418. kfree(pool);
  419. return 0;
  420. }
  421. /* initialize sequencer memory */
  422. int __init snd_sequencer_memory_init(void)
  423. {
  424. return 0;
  425. }
  426. /* release sequencer memory */
  427. void __exit snd_sequencer_memory_done(void)
  428. {
  429. }
  430. /* exported to seq_clientmgr.c */
  431. void snd_seq_info_pool(struct snd_info_buffer *buffer,
  432. struct snd_seq_pool *pool, char *space)
  433. {
  434. if (pool == NULL)
  435. return;
  436. snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
  437. snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
  438. snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
  439. snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
  440. snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
  441. }