seq_fifo.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * ALSA sequencer FIFO
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. #include <sound/driver.h>
  22. #include <sound/core.h>
  23. #include <linux/slab.h>
  24. #include "seq_fifo.h"
  25. #include "seq_lock.h"
  26. /* FIFO */
  27. /* create new fifo */
  28. struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  29. {
  30. struct snd_seq_fifo *f;
  31. f = kzalloc(sizeof(*f), GFP_KERNEL);
  32. if (f == NULL) {
  33. snd_printd("malloc failed for snd_seq_fifo_new() \n");
  34. return NULL;
  35. }
  36. f->pool = snd_seq_pool_new(poolsize);
  37. if (f->pool == NULL) {
  38. kfree(f);
  39. return NULL;
  40. }
  41. if (snd_seq_pool_init(f->pool) < 0) {
  42. snd_seq_pool_delete(&f->pool);
  43. kfree(f);
  44. return NULL;
  45. }
  46. spin_lock_init(&f->lock);
  47. snd_use_lock_init(&f->use_lock);
  48. init_waitqueue_head(&f->input_sleep);
  49. atomic_set(&f->overflow, 0);
  50. f->head = NULL;
  51. f->tail = NULL;
  52. f->cells = 0;
  53. return f;
  54. }
  55. void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  56. {
  57. struct snd_seq_fifo *f;
  58. snd_assert(fifo != NULL, return);
  59. f = *fifo;
  60. snd_assert(f != NULL, return);
  61. *fifo = NULL;
  62. snd_seq_fifo_clear(f);
  63. /* wake up clients if any */
  64. if (waitqueue_active(&f->input_sleep))
  65. wake_up(&f->input_sleep);
  66. /* release resources...*/
  67. /*....................*/
  68. if (f->pool) {
  69. snd_seq_pool_done(f->pool);
  70. snd_seq_pool_delete(&f->pool);
  71. }
  72. kfree(f);
  73. }
  74. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  75. /* clear queue */
  76. void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  77. {
  78. struct snd_seq_event_cell *cell;
  79. unsigned long flags;
  80. /* clear overflow flag */
  81. atomic_set(&f->overflow, 0);
  82. snd_use_lock_sync(&f->use_lock);
  83. spin_lock_irqsave(&f->lock, flags);
  84. /* drain the fifo */
  85. while ((cell = fifo_cell_out(f)) != NULL) {
  86. snd_seq_cell_free(cell);
  87. }
  88. spin_unlock_irqrestore(&f->lock, flags);
  89. }
  90. /* enqueue event to fifo */
  91. int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
  92. struct snd_seq_event *event)
  93. {
  94. struct snd_seq_event_cell *cell;
  95. unsigned long flags;
  96. int err;
  97. snd_assert(f != NULL, return -EINVAL);
  98. snd_use_lock_use(&f->use_lock);
  99. err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
  100. if (err < 0) {
  101. if (err == -ENOMEM)
  102. atomic_inc(&f->overflow);
  103. snd_use_lock_free(&f->use_lock);
  104. return err;
  105. }
  106. /* append new cells to fifo */
  107. spin_lock_irqsave(&f->lock, flags);
  108. if (f->tail != NULL)
  109. f->tail->next = cell;
  110. f->tail = cell;
  111. if (f->head == NULL)
  112. f->head = cell;
  113. f->cells++;
  114. spin_unlock_irqrestore(&f->lock, flags);
  115. /* wakeup client */
  116. if (waitqueue_active(&f->input_sleep))
  117. wake_up(&f->input_sleep);
  118. snd_use_lock_free(&f->use_lock);
  119. return 0; /* success */
  120. }
  121. /* dequeue cell from fifo */
  122. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
  123. {
  124. struct snd_seq_event_cell *cell;
  125. if ((cell = f->head) != NULL) {
  126. f->head = cell->next;
  127. /* reset tail if this was the last element */
  128. if (f->tail == cell)
  129. f->tail = NULL;
  130. cell->next = NULL;
  131. f->cells--;
  132. }
  133. return cell;
  134. }
  135. /* dequeue cell from fifo and copy on user space */
  136. int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
  137. struct snd_seq_event_cell **cellp, int nonblock)
  138. {
  139. struct snd_seq_event_cell *cell;
  140. unsigned long flags;
  141. wait_queue_t wait;
  142. snd_assert(f != NULL, return -EINVAL);
  143. *cellp = NULL;
  144. init_waitqueue_entry(&wait, current);
  145. spin_lock_irqsave(&f->lock, flags);
  146. while ((cell = fifo_cell_out(f)) == NULL) {
  147. if (nonblock) {
  148. /* non-blocking - return immediately */
  149. spin_unlock_irqrestore(&f->lock, flags);
  150. return -EAGAIN;
  151. }
  152. set_current_state(TASK_INTERRUPTIBLE);
  153. add_wait_queue(&f->input_sleep, &wait);
  154. spin_unlock_irq(&f->lock);
  155. schedule();
  156. spin_lock_irq(&f->lock);
  157. remove_wait_queue(&f->input_sleep, &wait);
  158. if (signal_pending(current)) {
  159. spin_unlock_irqrestore(&f->lock, flags);
  160. return -ERESTARTSYS;
  161. }
  162. }
  163. spin_unlock_irqrestore(&f->lock, flags);
  164. *cellp = cell;
  165. return 0;
  166. }
  167. void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
  168. struct snd_seq_event_cell *cell)
  169. {
  170. unsigned long flags;
  171. if (cell) {
  172. spin_lock_irqsave(&f->lock, flags);
  173. cell->next = f->head;
  174. f->head = cell;
  175. f->cells++;
  176. spin_unlock_irqrestore(&f->lock, flags);
  177. }
  178. }
  179. /* polling; return non-zero if queue is available */
  180. int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
  181. poll_table *wait)
  182. {
  183. poll_wait(file, &f->input_sleep, wait);
  184. return (f->cells > 0);
  185. }
  186. /* change the size of pool; all old events are removed */
  187. int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
  188. {
  189. unsigned long flags;
  190. struct snd_seq_pool *newpool, *oldpool;
  191. struct snd_seq_event_cell *cell, *next, *oldhead;
  192. snd_assert(f != NULL && f->pool != NULL, return -EINVAL);
  193. /* allocate new pool */
  194. newpool = snd_seq_pool_new(poolsize);
  195. if (newpool == NULL)
  196. return -ENOMEM;
  197. if (snd_seq_pool_init(newpool) < 0) {
  198. snd_seq_pool_delete(&newpool);
  199. return -ENOMEM;
  200. }
  201. spin_lock_irqsave(&f->lock, flags);
  202. /* remember old pool */
  203. oldpool = f->pool;
  204. oldhead = f->head;
  205. /* exchange pools */
  206. f->pool = newpool;
  207. f->head = NULL;
  208. f->tail = NULL;
  209. f->cells = 0;
  210. /* NOTE: overflow flag is not cleared */
  211. spin_unlock_irqrestore(&f->lock, flags);
  212. /* release cells in old pool */
  213. for (cell = oldhead; cell; cell = next) {
  214. next = cell->next;
  215. snd_seq_cell_free(cell);
  216. }
  217. snd_seq_pool_delete(&oldpool);
  218. return 0;
  219. }