seq_fifo.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * ALSA sequencer FIFO
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. #include <sound/core.h>
  22. #include <linux/slab.h>
  23. #include "seq_fifo.h"
  24. #include "seq_lock.h"
  25. /* FIFO */
  26. /* create new fifo */
  27. struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  28. {
  29. struct snd_seq_fifo *f;
  30. f = kzalloc(sizeof(*f), GFP_KERNEL);
  31. if (f == NULL) {
  32. snd_printd("malloc failed for snd_seq_fifo_new() \n");
  33. return NULL;
  34. }
  35. f->pool = snd_seq_pool_new(poolsize);
  36. if (f->pool == NULL) {
  37. kfree(f);
  38. return NULL;
  39. }
  40. if (snd_seq_pool_init(f->pool) < 0) {
  41. snd_seq_pool_delete(&f->pool);
  42. kfree(f);
  43. return NULL;
  44. }
  45. spin_lock_init(&f->lock);
  46. snd_use_lock_init(&f->use_lock);
  47. init_waitqueue_head(&f->input_sleep);
  48. atomic_set(&f->overflow, 0);
  49. f->head = NULL;
  50. f->tail = NULL;
  51. f->cells = 0;
  52. return f;
  53. }
  54. void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  55. {
  56. struct snd_seq_fifo *f;
  57. snd_assert(fifo != NULL, return);
  58. f = *fifo;
  59. snd_assert(f != NULL, return);
  60. *fifo = NULL;
  61. snd_seq_fifo_clear(f);
  62. /* wake up clients if any */
  63. if (waitqueue_active(&f->input_sleep))
  64. wake_up(&f->input_sleep);
  65. /* release resources...*/
  66. /*....................*/
  67. if (f->pool) {
  68. snd_seq_pool_done(f->pool);
  69. snd_seq_pool_delete(&f->pool);
  70. }
  71. kfree(f);
  72. }
  73. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  74. /* clear queue */
  75. void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  76. {
  77. struct snd_seq_event_cell *cell;
  78. unsigned long flags;
  79. /* clear overflow flag */
  80. atomic_set(&f->overflow, 0);
  81. snd_use_lock_sync(&f->use_lock);
  82. spin_lock_irqsave(&f->lock, flags);
  83. /* drain the fifo */
  84. while ((cell = fifo_cell_out(f)) != NULL) {
  85. snd_seq_cell_free(cell);
  86. }
  87. spin_unlock_irqrestore(&f->lock, flags);
  88. }
  89. /* enqueue event to fifo */
  90. int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
  91. struct snd_seq_event *event)
  92. {
  93. struct snd_seq_event_cell *cell;
  94. unsigned long flags;
  95. int err;
  96. snd_assert(f != NULL, return -EINVAL);
  97. snd_use_lock_use(&f->use_lock);
  98. err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
  99. if (err < 0) {
  100. if (err == -ENOMEM)
  101. atomic_inc(&f->overflow);
  102. snd_use_lock_free(&f->use_lock);
  103. return err;
  104. }
  105. /* append new cells to fifo */
  106. spin_lock_irqsave(&f->lock, flags);
  107. if (f->tail != NULL)
  108. f->tail->next = cell;
  109. f->tail = cell;
  110. if (f->head == NULL)
  111. f->head = cell;
  112. f->cells++;
  113. spin_unlock_irqrestore(&f->lock, flags);
  114. /* wakeup client */
  115. if (waitqueue_active(&f->input_sleep))
  116. wake_up(&f->input_sleep);
  117. snd_use_lock_free(&f->use_lock);
  118. return 0; /* success */
  119. }
  120. /* dequeue cell from fifo */
  121. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
  122. {
  123. struct snd_seq_event_cell *cell;
  124. if ((cell = f->head) != NULL) {
  125. f->head = cell->next;
  126. /* reset tail if this was the last element */
  127. if (f->tail == cell)
  128. f->tail = NULL;
  129. cell->next = NULL;
  130. f->cells--;
  131. }
  132. return cell;
  133. }
  134. /* dequeue cell from fifo and copy on user space */
  135. int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
  136. struct snd_seq_event_cell **cellp, int nonblock)
  137. {
  138. struct snd_seq_event_cell *cell;
  139. unsigned long flags;
  140. wait_queue_t wait;
  141. snd_assert(f != NULL, return -EINVAL);
  142. *cellp = NULL;
  143. init_waitqueue_entry(&wait, current);
  144. spin_lock_irqsave(&f->lock, flags);
  145. while ((cell = fifo_cell_out(f)) == NULL) {
  146. if (nonblock) {
  147. /* non-blocking - return immediately */
  148. spin_unlock_irqrestore(&f->lock, flags);
  149. return -EAGAIN;
  150. }
  151. set_current_state(TASK_INTERRUPTIBLE);
  152. add_wait_queue(&f->input_sleep, &wait);
  153. spin_unlock_irq(&f->lock);
  154. schedule();
  155. spin_lock_irq(&f->lock);
  156. remove_wait_queue(&f->input_sleep, &wait);
  157. if (signal_pending(current)) {
  158. spin_unlock_irqrestore(&f->lock, flags);
  159. return -ERESTARTSYS;
  160. }
  161. }
  162. spin_unlock_irqrestore(&f->lock, flags);
  163. *cellp = cell;
  164. return 0;
  165. }
  166. void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
  167. struct snd_seq_event_cell *cell)
  168. {
  169. unsigned long flags;
  170. if (cell) {
  171. spin_lock_irqsave(&f->lock, flags);
  172. cell->next = f->head;
  173. f->head = cell;
  174. f->cells++;
  175. spin_unlock_irqrestore(&f->lock, flags);
  176. }
  177. }
  178. /* polling; return non-zero if queue is available */
  179. int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
  180. poll_table *wait)
  181. {
  182. poll_wait(file, &f->input_sleep, wait);
  183. return (f->cells > 0);
  184. }
  185. /* change the size of pool; all old events are removed */
  186. int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
  187. {
  188. unsigned long flags;
  189. struct snd_seq_pool *newpool, *oldpool;
  190. struct snd_seq_event_cell *cell, *next, *oldhead;
  191. snd_assert(f != NULL && f->pool != NULL, return -EINVAL);
  192. /* allocate new pool */
  193. newpool = snd_seq_pool_new(poolsize);
  194. if (newpool == NULL)
  195. return -ENOMEM;
  196. if (snd_seq_pool_init(newpool) < 0) {
  197. snd_seq_pool_delete(&newpool);
  198. return -ENOMEM;
  199. }
  200. spin_lock_irqsave(&f->lock, flags);
  201. /* remember old pool */
  202. oldpool = f->pool;
  203. oldhead = f->head;
  204. /* exchange pools */
  205. f->pool = newpool;
  206. f->head = NULL;
  207. f->tail = NULL;
  208. f->cells = 0;
  209. /* NOTE: overflow flag is not cleared */
  210. spin_unlock_irqrestore(&f->lock, flags);
  211. /* release cells in old pool */
  212. for (cell = oldhead; cell; cell = next) {
  213. next = cell->next;
  214. snd_seq_cell_free(cell);
  215. }
  216. snd_seq_pool_delete(&oldpool);
  217. return 0;
  218. }