v4l2-event.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * v4l2-event.c
  3. *
  4. * V4L2 events.
  5. *
  6. * Copyright (C) 2009--2010 Nokia Corporation.
  7. *
  8. * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * version 2 as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  22. * 02110-1301 USA
  23. */
  24. #include <media/v4l2-dev.h>
  25. #include <media/v4l2-fh.h>
  26. #include <media/v4l2-event.h>
  27. #include <linux/sched.h>
  28. #include <linux/slab.h>
  29. int v4l2_event_init(struct v4l2_fh *fh)
  30. {
  31. fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
  32. if (fh->events == NULL)
  33. return -ENOMEM;
  34. init_waitqueue_head(&fh->events->wait);
  35. INIT_LIST_HEAD(&fh->events->free);
  36. INIT_LIST_HEAD(&fh->events->available);
  37. INIT_LIST_HEAD(&fh->events->subscribed);
  38. fh->events->sequence = -1;
  39. return 0;
  40. }
  41. int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
  42. {
  43. struct v4l2_events *events = fh->events;
  44. unsigned long flags;
  45. if (!events) {
  46. WARN_ON(1);
  47. return -ENOMEM;
  48. }
  49. while (events->nallocated < n) {
  50. struct v4l2_kevent *kev;
  51. kev = kzalloc(sizeof(*kev), GFP_KERNEL);
  52. if (kev == NULL)
  53. return -ENOMEM;
  54. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  55. list_add_tail(&kev->list, &events->free);
  56. events->nallocated++;
  57. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  58. }
  59. return 0;
  60. }
  61. EXPORT_SYMBOL_GPL(v4l2_event_alloc);
  62. #define list_kfree(list, type, member) \
  63. while (!list_empty(list)) { \
  64. type *hi; \
  65. hi = list_first_entry(list, type, member); \
  66. list_del(&hi->member); \
  67. kfree(hi); \
  68. }
  69. void v4l2_event_free(struct v4l2_fh *fh)
  70. {
  71. struct v4l2_events *events = fh->events;
  72. if (!events)
  73. return;
  74. list_kfree(&events->free, struct v4l2_kevent, list);
  75. list_kfree(&events->available, struct v4l2_kevent, list);
  76. list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
  77. kfree(events);
  78. fh->events = NULL;
  79. }
  80. EXPORT_SYMBOL_GPL(v4l2_event_free);
  81. static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
  82. {
  83. struct v4l2_events *events = fh->events;
  84. struct v4l2_kevent *kev;
  85. unsigned long flags;
  86. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  87. if (list_empty(&events->available)) {
  88. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  89. return -ENOENT;
  90. }
  91. WARN_ON(events->navailable == 0);
  92. kev = list_first_entry(&events->available, struct v4l2_kevent, list);
  93. list_move(&kev->list, &events->free);
  94. events->navailable--;
  95. kev->event.pending = events->navailable;
  96. *event = kev->event;
  97. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  98. return 0;
  99. }
  100. int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
  101. int nonblocking)
  102. {
  103. struct v4l2_events *events = fh->events;
  104. int ret;
  105. if (nonblocking)
  106. return __v4l2_event_dequeue(fh, event);
  107. do {
  108. ret = wait_event_interruptible(events->wait,
  109. events->navailable != 0);
  110. if (ret < 0)
  111. return ret;
  112. ret = __v4l2_event_dequeue(fh, event);
  113. } while (ret == -ENOENT);
  114. return ret;
  115. }
  116. /* Caller must hold fh->event->lock! */
  117. static struct v4l2_subscribed_event *v4l2_event_subscribed(
  118. struct v4l2_fh *fh, u32 type)
  119. {
  120. struct v4l2_events *events = fh->events;
  121. struct v4l2_subscribed_event *sev;
  122. WARN_ON(!spin_is_locked(&fh->vdev->fh_lock));
  123. list_for_each_entry(sev, &events->subscribed, list) {
  124. if (sev->type == type)
  125. return sev;
  126. }
  127. return NULL;
  128. }
  129. void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
  130. {
  131. struct v4l2_fh *fh;
  132. unsigned long flags;
  133. struct timespec timestamp;
  134. ktime_get_ts(&timestamp);
  135. spin_lock_irqsave(&vdev->fh_lock, flags);
  136. list_for_each_entry(fh, &vdev->fh_list, list) {
  137. struct v4l2_events *events = fh->events;
  138. struct v4l2_kevent *kev;
  139. /* Are we subscribed? */
  140. if (!v4l2_event_subscribed(fh, ev->type))
  141. continue;
  142. /* Increase event sequence number on fh. */
  143. events->sequence++;
  144. /* Do we have any free events? */
  145. if (list_empty(&events->free))
  146. continue;
  147. /* Take one and fill it. */
  148. kev = list_first_entry(&events->free, struct v4l2_kevent, list);
  149. kev->event.type = ev->type;
  150. kev->event.u = ev->u;
  151. kev->event.timestamp = timestamp;
  152. kev->event.sequence = events->sequence;
  153. list_move_tail(&kev->list, &events->available);
  154. events->navailable++;
  155. wake_up_all(&events->wait);
  156. }
  157. spin_unlock_irqrestore(&vdev->fh_lock, flags);
  158. }
  159. EXPORT_SYMBOL_GPL(v4l2_event_queue);
  160. int v4l2_event_pending(struct v4l2_fh *fh)
  161. {
  162. return fh->events->navailable;
  163. }
  164. EXPORT_SYMBOL_GPL(v4l2_event_pending);
  165. int v4l2_event_subscribe(struct v4l2_fh *fh,
  166. struct v4l2_event_subscription *sub)
  167. {
  168. struct v4l2_events *events = fh->events;
  169. struct v4l2_subscribed_event *sev;
  170. unsigned long flags;
  171. if (fh->events == NULL) {
  172. WARN_ON(1);
  173. return -ENOMEM;
  174. }
  175. sev = kmalloc(sizeof(*sev), GFP_KERNEL);
  176. if (!sev)
  177. return -ENOMEM;
  178. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  179. if (v4l2_event_subscribed(fh, sub->type) == NULL) {
  180. INIT_LIST_HEAD(&sev->list);
  181. sev->type = sub->type;
  182. list_add(&sev->list, &events->subscribed);
  183. sev = NULL;
  184. }
  185. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  186. kfree(sev);
  187. return 0;
  188. }
  189. EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
  190. static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
  191. {
  192. struct v4l2_events *events = fh->events;
  193. struct v4l2_subscribed_event *sev;
  194. unsigned long flags;
  195. do {
  196. sev = NULL;
  197. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  198. if (!list_empty(&events->subscribed)) {
  199. sev = list_first_entry(&events->subscribed,
  200. struct v4l2_subscribed_event, list);
  201. list_del(&sev->list);
  202. }
  203. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  204. kfree(sev);
  205. } while (sev);
  206. }
  207. int v4l2_event_unsubscribe(struct v4l2_fh *fh,
  208. struct v4l2_event_subscription *sub)
  209. {
  210. struct v4l2_subscribed_event *sev;
  211. unsigned long flags;
  212. if (sub->type == V4L2_EVENT_ALL) {
  213. v4l2_event_unsubscribe_all(fh);
  214. return 0;
  215. }
  216. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  217. sev = v4l2_event_subscribed(fh, sub->type);
  218. if (sev != NULL)
  219. list_del(&sev->list);
  220. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  221. kfree(sev);
  222. return 0;
  223. }
  224. EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);