industrialio-event.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /* Industrial I/O event handling
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Based on elements of hwmon and input subsystems.
  10. */
  11. #include <linux/anon_inodes.h>
  12. #include <linux/device.h>
  13. #include <linux/fs.h>
  14. #include <linux/kernel.h>
  15. #include <linux/kfifo.h>
  16. #include <linux/module.h>
  17. #include <linux/poll.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/wait.h>
  22. #include <linux/iio/iio.h>
  23. #include "iio_core.h"
  24. #include <linux/iio/sysfs.h>
  25. #include <linux/iio/events.h>
  26. /**
  27. * struct iio_event_interface - chrdev interface for an event line
  28. * @wait: wait queue to allow blocking reads of events
  29. * @det_events: list of detected events
  30. * @dev_attr_list: list of event interface sysfs attribute
  31. * @flags: file operations related flags including busy flag.
  32. * @group: event interface sysfs attribute group
  33. */
  34. struct iio_event_interface {
  35. wait_queue_head_t wait;
  36. DECLARE_KFIFO(det_events, struct iio_event_data, 16);
  37. struct list_head dev_attr_list;
  38. unsigned long flags;
  39. struct attribute_group group;
  40. };
  41. int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
  42. {
  43. struct iio_event_interface *ev_int = indio_dev->event_interface;
  44. struct iio_event_data ev;
  45. unsigned long flags;
  46. int copied;
  47. /* Does anyone care? */
  48. spin_lock_irqsave(&ev_int->wait.lock, flags);
  49. if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
  50. ev.id = ev_code;
  51. ev.timestamp = timestamp;
  52. copied = kfifo_put(&ev_int->det_events, &ev);
  53. if (copied != 0)
  54. wake_up_locked_poll(&ev_int->wait, POLLIN);
  55. }
  56. spin_unlock_irqrestore(&ev_int->wait.lock, flags);
  57. return 0;
  58. }
  59. EXPORT_SYMBOL(iio_push_event);
  60. /**
  61. * iio_event_poll() - poll the event queue to find out if it has data
  62. */
  63. static unsigned int iio_event_poll(struct file *filep,
  64. struct poll_table_struct *wait)
  65. {
  66. struct iio_dev *indio_dev = filep->private_data;
  67. struct iio_event_interface *ev_int = indio_dev->event_interface;
  68. unsigned int events = 0;
  69. if (!indio_dev->info)
  70. return -ENODEV;
  71. poll_wait(filep, &ev_int->wait, wait);
  72. spin_lock_irq(&ev_int->wait.lock);
  73. if (!kfifo_is_empty(&ev_int->det_events))
  74. events = POLLIN | POLLRDNORM;
  75. spin_unlock_irq(&ev_int->wait.lock);
  76. return events;
  77. }
  78. static ssize_t iio_event_chrdev_read(struct file *filep,
  79. char __user *buf,
  80. size_t count,
  81. loff_t *f_ps)
  82. {
  83. struct iio_dev *indio_dev = filep->private_data;
  84. struct iio_event_interface *ev_int = indio_dev->event_interface;
  85. unsigned int copied;
  86. int ret;
  87. if (!indio_dev->info)
  88. return -ENODEV;
  89. if (count < sizeof(struct iio_event_data))
  90. return -EINVAL;
  91. spin_lock_irq(&ev_int->wait.lock);
  92. if (kfifo_is_empty(&ev_int->det_events)) {
  93. if (filep->f_flags & O_NONBLOCK) {
  94. ret = -EAGAIN;
  95. goto error_unlock;
  96. }
  97. /* Blocking on device; waiting for something to be there */
  98. ret = wait_event_interruptible_locked_irq(ev_int->wait,
  99. !kfifo_is_empty(&ev_int->det_events) ||
  100. indio_dev->info == NULL);
  101. if (ret)
  102. goto error_unlock;
  103. if (indio_dev->info == NULL) {
  104. ret = -ENODEV;
  105. goto error_unlock;
  106. }
  107. /* Single access device so no one else can get the data */
  108. }
  109. ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
  110. error_unlock:
  111. spin_unlock_irq(&ev_int->wait.lock);
  112. return ret ? ret : copied;
  113. }
  114. static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
  115. {
  116. struct iio_dev *indio_dev = filep->private_data;
  117. struct iio_event_interface *ev_int = indio_dev->event_interface;
  118. spin_lock_irq(&ev_int->wait.lock);
  119. __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
  120. /*
  121. * In order to maintain a clean state for reopening,
  122. * clear out any awaiting events. The mask will prevent
  123. * any new __iio_push_event calls running.
  124. */
  125. kfifo_reset_out(&ev_int->det_events);
  126. spin_unlock_irq(&ev_int->wait.lock);
  127. iio_device_put(indio_dev);
  128. return 0;
  129. }
  130. static const struct file_operations iio_event_chrdev_fileops = {
  131. .read = iio_event_chrdev_read,
  132. .poll = iio_event_poll,
  133. .release = iio_event_chrdev_release,
  134. .owner = THIS_MODULE,
  135. .llseek = noop_llseek,
  136. };
  137. int iio_event_getfd(struct iio_dev *indio_dev)
  138. {
  139. struct iio_event_interface *ev_int = indio_dev->event_interface;
  140. int fd;
  141. if (ev_int == NULL)
  142. return -ENODEV;
  143. spin_lock_irq(&ev_int->wait.lock);
  144. if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
  145. spin_unlock_irq(&ev_int->wait.lock);
  146. return -EBUSY;
  147. }
  148. spin_unlock_irq(&ev_int->wait.lock);
  149. iio_device_get(indio_dev);
  150. fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
  151. indio_dev, O_RDONLY | O_CLOEXEC);
  152. if (fd < 0) {
  153. spin_lock_irq(&ev_int->wait.lock);
  154. __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
  155. spin_unlock_irq(&ev_int->wait.lock);
  156. iio_device_put(indio_dev);
  157. }
  158. return fd;
  159. }
  160. static const char * const iio_ev_type_text[] = {
  161. [IIO_EV_TYPE_THRESH] = "thresh",
  162. [IIO_EV_TYPE_MAG] = "mag",
  163. [IIO_EV_TYPE_ROC] = "roc",
  164. [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
  165. [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
  166. };
  167. static const char * const iio_ev_dir_text[] = {
  168. [IIO_EV_DIR_EITHER] = "either",
  169. [IIO_EV_DIR_RISING] = "rising",
  170. [IIO_EV_DIR_FALLING] = "falling"
  171. };
  172. static const char * const iio_ev_info_text[] = {
  173. [IIO_EV_INFO_ENABLE] = "en",
  174. [IIO_EV_INFO_VALUE] = "value",
  175. [IIO_EV_INFO_HYSTERESIS] = "hysteresis",
  176. };
  177. static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
  178. {
  179. return attr->c->event_spec[attr->address & 0xffff].dir;
  180. }
  181. static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
  182. {
  183. return attr->c->event_spec[attr->address & 0xffff].type;
  184. }
  185. static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
  186. {
  187. return (attr->address >> 16) & 0xffff;
  188. }
  189. static ssize_t iio_ev_state_store(struct device *dev,
  190. struct device_attribute *attr,
  191. const char *buf,
  192. size_t len)
  193. {
  194. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  195. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  196. int ret;
  197. bool val;
  198. ret = strtobool(buf, &val);
  199. if (ret < 0)
  200. return ret;
  201. if (indio_dev->info->write_event_config)
  202. ret = indio_dev->info->write_event_config(indio_dev,
  203. this_attr->address, val);
  204. else
  205. ret = indio_dev->info->write_event_config_new(indio_dev,
  206. this_attr->c, iio_ev_attr_type(this_attr),
  207. iio_ev_attr_dir(this_attr), val);
  208. return (ret < 0) ? ret : len;
  209. }
  210. static ssize_t iio_ev_state_show(struct device *dev,
  211. struct device_attribute *attr,
  212. char *buf)
  213. {
  214. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  215. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  216. int val;
  217. if (indio_dev->info->read_event_config)
  218. val = indio_dev->info->read_event_config(indio_dev,
  219. this_attr->address);
  220. else
  221. val = indio_dev->info->read_event_config_new(indio_dev,
  222. this_attr->c, iio_ev_attr_type(this_attr),
  223. iio_ev_attr_dir(this_attr));
  224. if (val < 0)
  225. return val;
  226. else
  227. return sprintf(buf, "%d\n", val);
  228. }
  229. static ssize_t iio_ev_value_show(struct device *dev,
  230. struct device_attribute *attr,
  231. char *buf)
  232. {
  233. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  234. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  235. int val, val2;
  236. int ret;
  237. if (indio_dev->info->read_event_value) {
  238. ret = indio_dev->info->read_event_value(indio_dev,
  239. this_attr->address, &val);
  240. if (ret < 0)
  241. return ret;
  242. return sprintf(buf, "%d\n", val);
  243. } else {
  244. ret = indio_dev->info->read_event_value_new(indio_dev,
  245. this_attr->c, iio_ev_attr_type(this_attr),
  246. iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
  247. &val, &val2);
  248. if (ret < 0)
  249. return ret;
  250. return iio_format_value(buf, ret, val, val2);
  251. }
  252. }
  253. static ssize_t iio_ev_value_store(struct device *dev,
  254. struct device_attribute *attr,
  255. const char *buf,
  256. size_t len)
  257. {
  258. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  259. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  260. int val, val2;
  261. int ret;
  262. if (!indio_dev->info->write_event_value &&
  263. !indio_dev->info->write_event_value_new)
  264. return -EINVAL;
  265. if (indio_dev->info->write_event_value) {
  266. ret = kstrtoint(buf, 10, &val);
  267. if (ret)
  268. return ret;
  269. ret = indio_dev->info->write_event_value(indio_dev,
  270. this_attr->address, val);
  271. } else {
  272. ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
  273. if (ret)
  274. return ret;
  275. ret = indio_dev->info->write_event_value_new(indio_dev,
  276. this_attr->c, iio_ev_attr_type(this_attr),
  277. iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
  278. val, val2);
  279. }
  280. if (ret < 0)
  281. return ret;
  282. return len;
  283. }
  284. static int iio_device_add_event(struct iio_dev *indio_dev,
  285. const struct iio_chan_spec *chan, unsigned int spec_index,
  286. enum iio_event_type type, enum iio_event_direction dir,
  287. enum iio_shared_by shared_by, const unsigned long *mask)
  288. {
  289. ssize_t (*show)(struct device *, struct device_attribute *, char *);
  290. ssize_t (*store)(struct device *, struct device_attribute *,
  291. const char *, size_t);
  292. unsigned int attrcount = 0;
  293. unsigned int i;
  294. char *postfix;
  295. int ret;
  296. for_each_set_bit(i, mask, sizeof(*mask)) {
  297. postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
  298. iio_ev_type_text[type], iio_ev_dir_text[dir],
  299. iio_ev_info_text[i]);
  300. if (postfix == NULL)
  301. return -ENOMEM;
  302. if (i == IIO_EV_INFO_ENABLE) {
  303. show = iio_ev_state_show;
  304. store = iio_ev_state_store;
  305. } else {
  306. show = iio_ev_value_show;
  307. store = iio_ev_value_store;
  308. }
  309. ret = __iio_add_chan_devattr(postfix, chan, show, store,
  310. (i << 16) | spec_index, shared_by, &indio_dev->dev,
  311. &indio_dev->event_interface->dev_attr_list);
  312. kfree(postfix);
  313. if (ret)
  314. return ret;
  315. attrcount++;
  316. }
  317. return attrcount;
  318. }
  319. static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev,
  320. struct iio_chan_spec const *chan)
  321. {
  322. int ret = 0, i, attrcount = 0;
  323. enum iio_event_direction dir;
  324. enum iio_event_type type;
  325. for (i = 0; i < chan->num_event_specs; i++) {
  326. type = chan->event_spec[i].type;
  327. dir = chan->event_spec[i].dir;
  328. ret = iio_device_add_event(indio_dev, chan, i, type, dir,
  329. IIO_SEPARATE, &chan->event_spec[i].mask_separate);
  330. if (ret < 0)
  331. goto error_ret;
  332. attrcount += ret;
  333. ret = iio_device_add_event(indio_dev, chan, i, type, dir,
  334. IIO_SHARED_BY_TYPE,
  335. &chan->event_spec[i].mask_shared_by_type);
  336. if (ret < 0)
  337. goto error_ret;
  338. attrcount += ret;
  339. ret = iio_device_add_event(indio_dev, chan, i, type, dir,
  340. IIO_SHARED_BY_DIR,
  341. &chan->event_spec[i].mask_shared_by_dir);
  342. if (ret < 0)
  343. goto error_ret;
  344. attrcount += ret;
  345. ret = iio_device_add_event(indio_dev, chan, i, type, dir,
  346. IIO_SHARED_BY_ALL,
  347. &chan->event_spec[i].mask_shared_by_all);
  348. if (ret < 0)
  349. goto error_ret;
  350. attrcount += ret;
  351. }
  352. ret = attrcount;
  353. error_ret:
  354. return ret;
  355. }
  356. static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev,
  357. struct iio_chan_spec const *chan)
  358. {
  359. int ret = 0, i, attrcount = 0;
  360. u64 mask = 0;
  361. char *postfix;
  362. if (!chan->event_mask)
  363. return 0;
  364. for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
  365. postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
  366. iio_ev_type_text[i/IIO_EV_DIR_MAX],
  367. iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
  368. if (postfix == NULL) {
  369. ret = -ENOMEM;
  370. goto error_ret;
  371. }
  372. if (chan->modified)
  373. mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
  374. i/IIO_EV_DIR_MAX,
  375. i%IIO_EV_DIR_MAX);
  376. else if (chan->differential)
  377. mask = IIO_EVENT_CODE(chan->type,
  378. 0, 0,
  379. i%IIO_EV_DIR_MAX,
  380. i/IIO_EV_DIR_MAX,
  381. 0,
  382. chan->channel,
  383. chan->channel2);
  384. else
  385. mask = IIO_UNMOD_EVENT_CODE(chan->type,
  386. chan->channel,
  387. i/IIO_EV_DIR_MAX,
  388. i%IIO_EV_DIR_MAX);
  389. ret = __iio_add_chan_devattr(postfix,
  390. chan,
  391. &iio_ev_state_show,
  392. iio_ev_state_store,
  393. mask,
  394. 0,
  395. &indio_dev->dev,
  396. &indio_dev->event_interface->
  397. dev_attr_list);
  398. kfree(postfix);
  399. if (ret)
  400. goto error_ret;
  401. attrcount++;
  402. postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
  403. iio_ev_type_text[i/IIO_EV_DIR_MAX],
  404. iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
  405. if (postfix == NULL) {
  406. ret = -ENOMEM;
  407. goto error_ret;
  408. }
  409. ret = __iio_add_chan_devattr(postfix, chan,
  410. iio_ev_value_show,
  411. iio_ev_value_store,
  412. mask,
  413. 0,
  414. &indio_dev->dev,
  415. &indio_dev->event_interface->
  416. dev_attr_list);
  417. kfree(postfix);
  418. if (ret)
  419. goto error_ret;
  420. attrcount++;
  421. }
  422. ret = attrcount;
  423. error_ret:
  424. return ret;
  425. }
  426. static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
  427. struct iio_chan_spec const *chan)
  428. {
  429. if (chan->event_mask)
  430. return iio_device_add_event_sysfs_old(indio_dev, chan);
  431. else
  432. return iio_device_add_event_sysfs_new(indio_dev, chan);
  433. }
  434. static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
  435. {
  436. int j, ret, attrcount = 0;
  437. /* Dynically created from the channels array */
  438. for (j = 0; j < indio_dev->num_channels; j++) {
  439. ret = iio_device_add_event_sysfs(indio_dev,
  440. &indio_dev->channels[j]);
  441. if (ret < 0)
  442. return ret;
  443. attrcount += ret;
  444. }
  445. return attrcount;
  446. }
  447. static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
  448. {
  449. int j;
  450. for (j = 0; j < indio_dev->num_channels; j++) {
  451. if (indio_dev->channels[j].event_mask != 0)
  452. return true;
  453. if (indio_dev->channels[j].num_event_specs != 0)
  454. return true;
  455. }
  456. return false;
  457. }
  458. static void iio_setup_ev_int(struct iio_event_interface *ev_int)
  459. {
  460. INIT_KFIFO(ev_int->det_events);
  461. init_waitqueue_head(&ev_int->wait);
  462. }
  463. static const char *iio_event_group_name = "events";
  464. int iio_device_register_eventset(struct iio_dev *indio_dev)
  465. {
  466. struct iio_dev_attr *p;
  467. int ret = 0, attrcount_orig = 0, attrcount, attrn;
  468. struct attribute **attr;
  469. if (!(indio_dev->info->event_attrs ||
  470. iio_check_for_dynamic_events(indio_dev)))
  471. return 0;
  472. indio_dev->event_interface =
  473. kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
  474. if (indio_dev->event_interface == NULL) {
  475. ret = -ENOMEM;
  476. goto error_ret;
  477. }
  478. INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
  479. iio_setup_ev_int(indio_dev->event_interface);
  480. if (indio_dev->info->event_attrs != NULL) {
  481. attr = indio_dev->info->event_attrs->attrs;
  482. while (*attr++ != NULL)
  483. attrcount_orig++;
  484. }
  485. attrcount = attrcount_orig;
  486. if (indio_dev->channels) {
  487. ret = __iio_add_event_config_attrs(indio_dev);
  488. if (ret < 0)
  489. goto error_free_setup_event_lines;
  490. attrcount += ret;
  491. }
  492. indio_dev->event_interface->group.name = iio_event_group_name;
  493. indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
  494. sizeof(indio_dev->event_interface->group.attrs[0]),
  495. GFP_KERNEL);
  496. if (indio_dev->event_interface->group.attrs == NULL) {
  497. ret = -ENOMEM;
  498. goto error_free_setup_event_lines;
  499. }
  500. if (indio_dev->info->event_attrs)
  501. memcpy(indio_dev->event_interface->group.attrs,
  502. indio_dev->info->event_attrs->attrs,
  503. sizeof(indio_dev->event_interface->group.attrs[0])
  504. *attrcount_orig);
  505. attrn = attrcount_orig;
  506. /* Add all elements from the list. */
  507. list_for_each_entry(p,
  508. &indio_dev->event_interface->dev_attr_list,
  509. l)
  510. indio_dev->event_interface->group.attrs[attrn++] =
  511. &p->dev_attr.attr;
  512. indio_dev->groups[indio_dev->groupcounter++] =
  513. &indio_dev->event_interface->group;
  514. return 0;
  515. error_free_setup_event_lines:
  516. iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
  517. kfree(indio_dev->event_interface);
  518. error_ret:
  519. return ret;
  520. }
  521. /**
  522. * iio_device_wakeup_eventset - Wakes up the event waitqueue
  523. * @indio_dev: The IIO device
  524. *
  525. * Wakes up the event waitqueue used for poll() and blocking read().
  526. * Should usually be called when the device is unregistered.
  527. */
  528. void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
  529. {
  530. if (indio_dev->event_interface == NULL)
  531. return;
  532. wake_up(&indio_dev->event_interface->wait);
  533. }
  534. void iio_device_unregister_eventset(struct iio_dev *indio_dev)
  535. {
  536. if (indio_dev->event_interface == NULL)
  537. return;
  538. iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
  539. kfree(indio_dev->event_interface->group.attrs);
  540. kfree(indio_dev->event_interface);
  541. }