v4l2-event.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * v4l2-event.c
  3. *
  4. * V4L2 events.
  5. *
  6. * Copyright (C) 2009--2010 Nokia Corporation.
  7. *
  8. * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * version 2 as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  22. * 02110-1301 USA
  23. */
  24. #include <media/v4l2-dev.h>
  25. #include <media/v4l2-fh.h>
  26. #include <media/v4l2-event.h>
  27. #include <linux/sched.h>
  28. #include <linux/slab.h>
  29. int v4l2_event_init(struct v4l2_fh *fh)
  30. {
  31. fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
  32. if (fh->events == NULL)
  33. return -ENOMEM;
  34. init_waitqueue_head(&fh->events->wait);
  35. INIT_LIST_HEAD(&fh->events->free);
  36. INIT_LIST_HEAD(&fh->events->available);
  37. INIT_LIST_HEAD(&fh->events->subscribed);
  38. fh->events->sequence = -1;
  39. return 0;
  40. }
  41. EXPORT_SYMBOL_GPL(v4l2_event_init);
  42. int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
  43. {
  44. struct v4l2_events *events = fh->events;
  45. unsigned long flags;
  46. if (!events) {
  47. WARN_ON(1);
  48. return -ENOMEM;
  49. }
  50. while (events->nallocated < n) {
  51. struct v4l2_kevent *kev;
  52. kev = kzalloc(sizeof(*kev), GFP_KERNEL);
  53. if (kev == NULL)
  54. return -ENOMEM;
  55. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  56. list_add_tail(&kev->list, &events->free);
  57. events->nallocated++;
  58. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  59. }
  60. return 0;
  61. }
  62. EXPORT_SYMBOL_GPL(v4l2_event_alloc);
  63. #define list_kfree(list, type, member) \
  64. while (!list_empty(list)) { \
  65. type *hi; \
  66. hi = list_first_entry(list, type, member); \
  67. list_del(&hi->member); \
  68. kfree(hi); \
  69. }
  70. void v4l2_event_free(struct v4l2_fh *fh)
  71. {
  72. struct v4l2_events *events = fh->events;
  73. if (!events)
  74. return;
  75. list_kfree(&events->free, struct v4l2_kevent, list);
  76. list_kfree(&events->available, struct v4l2_kevent, list);
  77. list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
  78. kfree(events);
  79. fh->events = NULL;
  80. }
  81. EXPORT_SYMBOL_GPL(v4l2_event_free);
  82. static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
  83. {
  84. struct v4l2_events *events = fh->events;
  85. struct v4l2_kevent *kev;
  86. unsigned long flags;
  87. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  88. if (list_empty(&events->available)) {
  89. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  90. return -ENOENT;
  91. }
  92. WARN_ON(events->navailable == 0);
  93. kev = list_first_entry(&events->available, struct v4l2_kevent, list);
  94. list_move(&kev->list, &events->free);
  95. events->navailable--;
  96. kev->event.pending = events->navailable;
  97. *event = kev->event;
  98. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  99. return 0;
  100. }
  101. int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
  102. int nonblocking)
  103. {
  104. struct v4l2_events *events = fh->events;
  105. int ret;
  106. if (nonblocking)
  107. return __v4l2_event_dequeue(fh, event);
  108. /* Release the vdev lock while waiting */
  109. if (fh->vdev->lock)
  110. mutex_unlock(fh->vdev->lock);
  111. do {
  112. ret = wait_event_interruptible(events->wait,
  113. events->navailable != 0);
  114. if (ret < 0)
  115. break;
  116. ret = __v4l2_event_dequeue(fh, event);
  117. } while (ret == -ENOENT);
  118. if (fh->vdev->lock)
  119. mutex_lock(fh->vdev->lock);
  120. return ret;
  121. }
  122. EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
  123. /* Caller must hold fh->event->lock! */
  124. static struct v4l2_subscribed_event *v4l2_event_subscribed(
  125. struct v4l2_fh *fh, u32 type)
  126. {
  127. struct v4l2_events *events = fh->events;
  128. struct v4l2_subscribed_event *sev;
  129. assert_spin_locked(&fh->vdev->fh_lock);
  130. list_for_each_entry(sev, &events->subscribed, list) {
  131. if (sev->type == type)
  132. return sev;
  133. }
  134. return NULL;
  135. }
  136. void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
  137. {
  138. struct v4l2_fh *fh;
  139. unsigned long flags;
  140. struct timespec timestamp;
  141. ktime_get_ts(&timestamp);
  142. spin_lock_irqsave(&vdev->fh_lock, flags);
  143. list_for_each_entry(fh, &vdev->fh_list, list) {
  144. struct v4l2_events *events = fh->events;
  145. struct v4l2_kevent *kev;
  146. /* Are we subscribed? */
  147. if (!v4l2_event_subscribed(fh, ev->type))
  148. continue;
  149. /* Increase event sequence number on fh. */
  150. events->sequence++;
  151. /* Do we have any free events? */
  152. if (list_empty(&events->free))
  153. continue;
  154. /* Take one and fill it. */
  155. kev = list_first_entry(&events->free, struct v4l2_kevent, list);
  156. kev->event.type = ev->type;
  157. kev->event.u = ev->u;
  158. kev->event.timestamp = timestamp;
  159. kev->event.sequence = events->sequence;
  160. list_move_tail(&kev->list, &events->available);
  161. events->navailable++;
  162. wake_up_all(&events->wait);
  163. }
  164. spin_unlock_irqrestore(&vdev->fh_lock, flags);
  165. }
  166. EXPORT_SYMBOL_GPL(v4l2_event_queue);
  167. int v4l2_event_pending(struct v4l2_fh *fh)
  168. {
  169. return fh->events->navailable;
  170. }
  171. EXPORT_SYMBOL_GPL(v4l2_event_pending);
  172. int v4l2_event_subscribe(struct v4l2_fh *fh,
  173. struct v4l2_event_subscription *sub)
  174. {
  175. struct v4l2_events *events = fh->events;
  176. struct v4l2_subscribed_event *sev;
  177. unsigned long flags;
  178. if (fh->events == NULL) {
  179. WARN_ON(1);
  180. return -ENOMEM;
  181. }
  182. sev = kmalloc(sizeof(*sev), GFP_KERNEL);
  183. if (!sev)
  184. return -ENOMEM;
  185. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  186. if (v4l2_event_subscribed(fh, sub->type) == NULL) {
  187. INIT_LIST_HEAD(&sev->list);
  188. sev->type = sub->type;
  189. list_add(&sev->list, &events->subscribed);
  190. sev = NULL;
  191. }
  192. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  193. kfree(sev);
  194. return 0;
  195. }
  196. EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
  197. static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
  198. {
  199. struct v4l2_events *events = fh->events;
  200. struct v4l2_subscribed_event *sev;
  201. unsigned long flags;
  202. do {
  203. sev = NULL;
  204. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  205. if (!list_empty(&events->subscribed)) {
  206. sev = list_first_entry(&events->subscribed,
  207. struct v4l2_subscribed_event, list);
  208. list_del(&sev->list);
  209. }
  210. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  211. kfree(sev);
  212. } while (sev);
  213. }
  214. int v4l2_event_unsubscribe(struct v4l2_fh *fh,
  215. struct v4l2_event_subscription *sub)
  216. {
  217. struct v4l2_subscribed_event *sev;
  218. unsigned long flags;
  219. if (sub->type == V4L2_EVENT_ALL) {
  220. v4l2_event_unsubscribe_all(fh);
  221. return 0;
  222. }
  223. spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  224. sev = v4l2_event_subscribed(fh, sub->type);
  225. if (sev != NULL)
  226. list_del(&sev->list);
  227. spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  228. kfree(sev);
  229. return 0;
  230. }
  231. EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);