industrialio-buffer.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/sched.h>
  24. #include <linux/iio/iio.h>
  25. #include "iio_core.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. static const char * const iio_endian_prefix[] = {
  29. [IIO_BE] = "be",
  30. [IIO_LE] = "le",
  31. };
  32. static bool iio_buffer_is_active(struct iio_buffer *buf)
  33. {
  34. return !list_empty(&buf->buffer_list);
  35. }
  36. /**
  37. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  38. *
  39. * This function relies on all buffer implementations having an
  40. * iio_buffer as their first element.
  41. **/
  42. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  43. size_t n, loff_t *f_ps)
  44. {
  45. struct iio_dev *indio_dev = filp->private_data;
  46. struct iio_buffer *rb = indio_dev->buffer;
  47. if (!indio_dev->info)
  48. return -ENODEV;
  49. if (!rb || !rb->access->read_first_n)
  50. return -EINVAL;
  51. return rb->access->read_first_n(rb, n, buf);
  52. }
  53. /**
  54. * iio_buffer_poll() - poll the buffer to find out if it has data
  55. */
  56. unsigned int iio_buffer_poll(struct file *filp,
  57. struct poll_table_struct *wait)
  58. {
  59. struct iio_dev *indio_dev = filp->private_data;
  60. struct iio_buffer *rb = indio_dev->buffer;
  61. if (!indio_dev->info)
  62. return -ENODEV;
  63. poll_wait(filp, &rb->pollq, wait);
  64. if (rb->stufftoread)
  65. return POLLIN | POLLRDNORM;
  66. /* need a way of knowing if there may be enough data... */
  67. return 0;
  68. }
  69. /**
  70. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  71. * @indio_dev: The IIO device
  72. *
  73. * Wakes up the event waitqueue used for poll(). Should usually
  74. * be called when the device is unregistered.
  75. */
  76. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  77. {
  78. if (!indio_dev->buffer)
  79. return;
  80. wake_up(&indio_dev->buffer->pollq);
  81. }
  82. void iio_buffer_init(struct iio_buffer *buffer)
  83. {
  84. INIT_LIST_HEAD(&buffer->demux_list);
  85. INIT_LIST_HEAD(&buffer->buffer_list);
  86. init_waitqueue_head(&buffer->pollq);
  87. kref_init(&buffer->ref);
  88. }
  89. EXPORT_SYMBOL(iio_buffer_init);
  90. static ssize_t iio_show_scan_index(struct device *dev,
  91. struct device_attribute *attr,
  92. char *buf)
  93. {
  94. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  95. }
  96. static ssize_t iio_show_fixed_type(struct device *dev,
  97. struct device_attribute *attr,
  98. char *buf)
  99. {
  100. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  101. u8 type = this_attr->c->scan_type.endianness;
  102. if (type == IIO_CPU) {
  103. #ifdef __LITTLE_ENDIAN
  104. type = IIO_LE;
  105. #else
  106. type = IIO_BE;
  107. #endif
  108. }
  109. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  110. iio_endian_prefix[type],
  111. this_attr->c->scan_type.sign,
  112. this_attr->c->scan_type.realbits,
  113. this_attr->c->scan_type.storagebits,
  114. this_attr->c->scan_type.shift);
  115. }
  116. static ssize_t iio_scan_el_show(struct device *dev,
  117. struct device_attribute *attr,
  118. char *buf)
  119. {
  120. int ret;
  121. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  122. ret = test_bit(to_iio_dev_attr(attr)->address,
  123. indio_dev->buffer->scan_mask);
  124. return sprintf(buf, "%d\n", ret);
  125. }
  126. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  127. {
  128. clear_bit(bit, buffer->scan_mask);
  129. return 0;
  130. }
  131. static ssize_t iio_scan_el_store(struct device *dev,
  132. struct device_attribute *attr,
  133. const char *buf,
  134. size_t len)
  135. {
  136. int ret;
  137. bool state;
  138. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  139. struct iio_buffer *buffer = indio_dev->buffer;
  140. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  141. ret = strtobool(buf, &state);
  142. if (ret < 0)
  143. return ret;
  144. mutex_lock(&indio_dev->mlock);
  145. if (iio_buffer_is_active(indio_dev->buffer)) {
  146. ret = -EBUSY;
  147. goto error_ret;
  148. }
  149. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  150. if (ret < 0)
  151. goto error_ret;
  152. if (!state && ret) {
  153. ret = iio_scan_mask_clear(buffer, this_attr->address);
  154. if (ret)
  155. goto error_ret;
  156. } else if (state && !ret) {
  157. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  158. if (ret)
  159. goto error_ret;
  160. }
  161. error_ret:
  162. mutex_unlock(&indio_dev->mlock);
  163. return ret < 0 ? ret : len;
  164. }
  165. static ssize_t iio_scan_el_ts_show(struct device *dev,
  166. struct device_attribute *attr,
  167. char *buf)
  168. {
  169. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  170. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  171. }
  172. static ssize_t iio_scan_el_ts_store(struct device *dev,
  173. struct device_attribute *attr,
  174. const char *buf,
  175. size_t len)
  176. {
  177. int ret;
  178. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  179. bool state;
  180. ret = strtobool(buf, &state);
  181. if (ret < 0)
  182. return ret;
  183. mutex_lock(&indio_dev->mlock);
  184. if (iio_buffer_is_active(indio_dev->buffer)) {
  185. ret = -EBUSY;
  186. goto error_ret;
  187. }
  188. indio_dev->buffer->scan_timestamp = state;
  189. error_ret:
  190. mutex_unlock(&indio_dev->mlock);
  191. return ret ? ret : len;
  192. }
  193. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  194. const struct iio_chan_spec *chan)
  195. {
  196. int ret, attrcount = 0;
  197. struct iio_buffer *buffer = indio_dev->buffer;
  198. ret = __iio_add_chan_devattr("index",
  199. chan,
  200. &iio_show_scan_index,
  201. NULL,
  202. 0,
  203. IIO_SEPARATE,
  204. &indio_dev->dev,
  205. &buffer->scan_el_dev_attr_list);
  206. if (ret)
  207. goto error_ret;
  208. attrcount++;
  209. ret = __iio_add_chan_devattr("type",
  210. chan,
  211. &iio_show_fixed_type,
  212. NULL,
  213. 0,
  214. 0,
  215. &indio_dev->dev,
  216. &buffer->scan_el_dev_attr_list);
  217. if (ret)
  218. goto error_ret;
  219. attrcount++;
  220. if (chan->type != IIO_TIMESTAMP)
  221. ret = __iio_add_chan_devattr("en",
  222. chan,
  223. &iio_scan_el_show,
  224. &iio_scan_el_store,
  225. chan->scan_index,
  226. 0,
  227. &indio_dev->dev,
  228. &buffer->scan_el_dev_attr_list);
  229. else
  230. ret = __iio_add_chan_devattr("en",
  231. chan,
  232. &iio_scan_el_ts_show,
  233. &iio_scan_el_ts_store,
  234. chan->scan_index,
  235. 0,
  236. &indio_dev->dev,
  237. &buffer->scan_el_dev_attr_list);
  238. if (ret)
  239. goto error_ret;
  240. attrcount++;
  241. ret = attrcount;
  242. error_ret:
  243. return ret;
  244. }
  245. static const char * const iio_scan_elements_group_name = "scan_elements";
  246. int iio_buffer_register(struct iio_dev *indio_dev,
  247. const struct iio_chan_spec *channels,
  248. int num_channels)
  249. {
  250. struct iio_dev_attr *p;
  251. struct attribute **attr;
  252. struct iio_buffer *buffer = indio_dev->buffer;
  253. int ret, i, attrn, attrcount, attrcount_orig = 0;
  254. if (buffer->attrs)
  255. indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
  256. if (buffer->scan_el_attrs != NULL) {
  257. attr = buffer->scan_el_attrs->attrs;
  258. while (*attr++ != NULL)
  259. attrcount_orig++;
  260. }
  261. attrcount = attrcount_orig;
  262. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  263. if (channels) {
  264. /* new magic */
  265. for (i = 0; i < num_channels; i++) {
  266. if (channels[i].scan_index < 0)
  267. continue;
  268. /* Establish necessary mask length */
  269. if (channels[i].scan_index >
  270. (int)indio_dev->masklength - 1)
  271. indio_dev->masklength
  272. = channels[i].scan_index + 1;
  273. ret = iio_buffer_add_channel_sysfs(indio_dev,
  274. &channels[i]);
  275. if (ret < 0)
  276. goto error_cleanup_dynamic;
  277. attrcount += ret;
  278. if (channels[i].type == IIO_TIMESTAMP)
  279. indio_dev->scan_index_timestamp =
  280. channels[i].scan_index;
  281. }
  282. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  283. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  284. sizeof(*buffer->scan_mask),
  285. GFP_KERNEL);
  286. if (buffer->scan_mask == NULL) {
  287. ret = -ENOMEM;
  288. goto error_cleanup_dynamic;
  289. }
  290. }
  291. }
  292. buffer->scan_el_group.name = iio_scan_elements_group_name;
  293. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  294. sizeof(buffer->scan_el_group.attrs[0]),
  295. GFP_KERNEL);
  296. if (buffer->scan_el_group.attrs == NULL) {
  297. ret = -ENOMEM;
  298. goto error_free_scan_mask;
  299. }
  300. if (buffer->scan_el_attrs)
  301. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  302. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  303. attrn = attrcount_orig;
  304. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  305. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  306. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  307. return 0;
  308. error_free_scan_mask:
  309. kfree(buffer->scan_mask);
  310. error_cleanup_dynamic:
  311. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  312. return ret;
  313. }
  314. EXPORT_SYMBOL(iio_buffer_register);
  315. void iio_buffer_unregister(struct iio_dev *indio_dev)
  316. {
  317. kfree(indio_dev->buffer->scan_mask);
  318. kfree(indio_dev->buffer->scan_el_group.attrs);
  319. iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  320. }
  321. EXPORT_SYMBOL(iio_buffer_unregister);
  322. ssize_t iio_buffer_read_length(struct device *dev,
  323. struct device_attribute *attr,
  324. char *buf)
  325. {
  326. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  327. struct iio_buffer *buffer = indio_dev->buffer;
  328. if (buffer->access->get_length)
  329. return sprintf(buf, "%d\n",
  330. buffer->access->get_length(buffer));
  331. return 0;
  332. }
  333. EXPORT_SYMBOL(iio_buffer_read_length);
  334. ssize_t iio_buffer_write_length(struct device *dev,
  335. struct device_attribute *attr,
  336. const char *buf,
  337. size_t len)
  338. {
  339. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  340. struct iio_buffer *buffer = indio_dev->buffer;
  341. unsigned int val;
  342. int ret;
  343. ret = kstrtouint(buf, 10, &val);
  344. if (ret)
  345. return ret;
  346. if (buffer->access->get_length)
  347. if (val == buffer->access->get_length(buffer))
  348. return len;
  349. mutex_lock(&indio_dev->mlock);
  350. if (iio_buffer_is_active(indio_dev->buffer)) {
  351. ret = -EBUSY;
  352. } else {
  353. if (buffer->access->set_length)
  354. buffer->access->set_length(buffer, val);
  355. ret = 0;
  356. }
  357. mutex_unlock(&indio_dev->mlock);
  358. return ret ? ret : len;
  359. }
  360. EXPORT_SYMBOL(iio_buffer_write_length);
  361. ssize_t iio_buffer_show_enable(struct device *dev,
  362. struct device_attribute *attr,
  363. char *buf)
  364. {
  365. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  366. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  367. }
  368. EXPORT_SYMBOL(iio_buffer_show_enable);
  369. /* Note NULL used as error indicator as it doesn't make sense. */
  370. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  371. unsigned int masklength,
  372. const unsigned long *mask)
  373. {
  374. if (bitmap_empty(mask, masklength))
  375. return NULL;
  376. while (*av_masks) {
  377. if (bitmap_subset(mask, av_masks, masklength))
  378. return av_masks;
  379. av_masks += BITS_TO_LONGS(masklength);
  380. }
  381. return NULL;
  382. }
  383. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  384. const unsigned long *mask, bool timestamp)
  385. {
  386. const struct iio_chan_spec *ch;
  387. unsigned bytes = 0;
  388. int length, i;
  389. /* How much space will the demuxed element take? */
  390. for_each_set_bit(i, mask,
  391. indio_dev->masklength) {
  392. ch = iio_find_channel_from_si(indio_dev, i);
  393. length = ch->scan_type.storagebits / 8;
  394. bytes = ALIGN(bytes, length);
  395. bytes += length;
  396. }
  397. if (timestamp) {
  398. ch = iio_find_channel_from_si(indio_dev,
  399. indio_dev->scan_index_timestamp);
  400. length = ch->scan_type.storagebits / 8;
  401. bytes = ALIGN(bytes, length);
  402. bytes += length;
  403. }
  404. return bytes;
  405. }
  406. static void iio_buffer_activate(struct iio_dev *indio_dev,
  407. struct iio_buffer *buffer)
  408. {
  409. iio_buffer_get(buffer);
  410. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  411. }
  412. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  413. {
  414. list_del_init(&buffer->buffer_list);
  415. iio_buffer_put(buffer);
  416. }
  417. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  418. {
  419. struct iio_buffer *buffer, *_buffer;
  420. if (list_empty(&indio_dev->buffer_list))
  421. return;
  422. if (indio_dev->setup_ops->predisable)
  423. indio_dev->setup_ops->predisable(indio_dev);
  424. list_for_each_entry_safe(buffer, _buffer,
  425. &indio_dev->buffer_list, buffer_list)
  426. iio_buffer_deactivate(buffer);
  427. indio_dev->currentmode = INDIO_DIRECT_MODE;
  428. if (indio_dev->setup_ops->postdisable)
  429. indio_dev->setup_ops->postdisable(indio_dev);
  430. if (indio_dev->available_scan_masks == NULL)
  431. kfree(indio_dev->active_scan_mask);
  432. }
  433. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  434. struct iio_buffer *buffer)
  435. {
  436. unsigned int bytes;
  437. if (!buffer->access->set_bytes_per_datum)
  438. return;
  439. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  440. buffer->scan_timestamp);
  441. buffer->access->set_bytes_per_datum(buffer, bytes);
  442. }
  443. static int __iio_update_buffers(struct iio_dev *indio_dev,
  444. struct iio_buffer *insert_buffer,
  445. struct iio_buffer *remove_buffer)
  446. {
  447. int ret;
  448. int success = 0;
  449. struct iio_buffer *buffer;
  450. unsigned long *compound_mask;
  451. const unsigned long *old_mask;
  452. /* Wind down existing buffers - iff there are any */
  453. if (!list_empty(&indio_dev->buffer_list)) {
  454. if (indio_dev->setup_ops->predisable) {
  455. ret = indio_dev->setup_ops->predisable(indio_dev);
  456. if (ret)
  457. goto error_ret;
  458. }
  459. indio_dev->currentmode = INDIO_DIRECT_MODE;
  460. if (indio_dev->setup_ops->postdisable) {
  461. ret = indio_dev->setup_ops->postdisable(indio_dev);
  462. if (ret)
  463. goto error_ret;
  464. }
  465. }
  466. /* Keep a copy of current setup to allow roll back */
  467. old_mask = indio_dev->active_scan_mask;
  468. if (!indio_dev->available_scan_masks)
  469. indio_dev->active_scan_mask = NULL;
  470. if (remove_buffer)
  471. iio_buffer_deactivate(remove_buffer);
  472. if (insert_buffer)
  473. iio_buffer_activate(indio_dev, insert_buffer);
  474. /* If no buffers in list, we are done */
  475. if (list_empty(&indio_dev->buffer_list)) {
  476. indio_dev->currentmode = INDIO_DIRECT_MODE;
  477. if (indio_dev->available_scan_masks == NULL)
  478. kfree(old_mask);
  479. return 0;
  480. }
  481. /* What scan mask do we actually have? */
  482. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  483. sizeof(long), GFP_KERNEL);
  484. if (compound_mask == NULL) {
  485. if (indio_dev->available_scan_masks == NULL)
  486. kfree(old_mask);
  487. return -ENOMEM;
  488. }
  489. indio_dev->scan_timestamp = 0;
  490. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  491. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  492. indio_dev->masklength);
  493. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  494. }
  495. if (indio_dev->available_scan_masks) {
  496. indio_dev->active_scan_mask =
  497. iio_scan_mask_match(indio_dev->available_scan_masks,
  498. indio_dev->masklength,
  499. compound_mask);
  500. if (indio_dev->active_scan_mask == NULL) {
  501. /*
  502. * Roll back.
  503. * Note can only occur when adding a buffer.
  504. */
  505. iio_buffer_deactivate(insert_buffer);
  506. if (old_mask) {
  507. indio_dev->active_scan_mask = old_mask;
  508. success = -EINVAL;
  509. }
  510. else {
  511. kfree(compound_mask);
  512. ret = -EINVAL;
  513. goto error_ret;
  514. }
  515. }
  516. } else {
  517. indio_dev->active_scan_mask = compound_mask;
  518. }
  519. iio_update_demux(indio_dev);
  520. /* Wind up again */
  521. if (indio_dev->setup_ops->preenable) {
  522. ret = indio_dev->setup_ops->preenable(indio_dev);
  523. if (ret) {
  524. printk(KERN_ERR
  525. "Buffer not started: buffer preenable failed (%d)\n", ret);
  526. goto error_remove_inserted;
  527. }
  528. }
  529. indio_dev->scan_bytes =
  530. iio_compute_scan_bytes(indio_dev,
  531. indio_dev->active_scan_mask,
  532. indio_dev->scan_timestamp);
  533. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  534. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  535. if (buffer->access->request_update) {
  536. ret = buffer->access->request_update(buffer);
  537. if (ret) {
  538. printk(KERN_INFO
  539. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  540. goto error_run_postdisable;
  541. }
  542. }
  543. }
  544. if (indio_dev->info->update_scan_mode) {
  545. ret = indio_dev->info
  546. ->update_scan_mode(indio_dev,
  547. indio_dev->active_scan_mask);
  548. if (ret < 0) {
  549. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  550. goto error_run_postdisable;
  551. }
  552. }
  553. /* Definitely possible for devices to support both of these. */
  554. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
  555. if (!indio_dev->trig) {
  556. printk(KERN_INFO "Buffer not started: no trigger\n");
  557. ret = -EINVAL;
  558. /* Can only occur on first buffer */
  559. goto error_run_postdisable;
  560. }
  561. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  562. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  563. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  564. } else { /* Should never be reached */
  565. ret = -EINVAL;
  566. goto error_run_postdisable;
  567. }
  568. if (indio_dev->setup_ops->postenable) {
  569. ret = indio_dev->setup_ops->postenable(indio_dev);
  570. if (ret) {
  571. printk(KERN_INFO
  572. "Buffer not started: postenable failed (%d)\n", ret);
  573. indio_dev->currentmode = INDIO_DIRECT_MODE;
  574. if (indio_dev->setup_ops->postdisable)
  575. indio_dev->setup_ops->postdisable(indio_dev);
  576. goto error_disable_all_buffers;
  577. }
  578. }
  579. if (indio_dev->available_scan_masks)
  580. kfree(compound_mask);
  581. else
  582. kfree(old_mask);
  583. return success;
  584. error_disable_all_buffers:
  585. indio_dev->currentmode = INDIO_DIRECT_MODE;
  586. error_run_postdisable:
  587. if (indio_dev->setup_ops->postdisable)
  588. indio_dev->setup_ops->postdisable(indio_dev);
  589. error_remove_inserted:
  590. if (insert_buffer)
  591. iio_buffer_deactivate(insert_buffer);
  592. indio_dev->active_scan_mask = old_mask;
  593. kfree(compound_mask);
  594. error_ret:
  595. return ret;
  596. }
  597. int iio_update_buffers(struct iio_dev *indio_dev,
  598. struct iio_buffer *insert_buffer,
  599. struct iio_buffer *remove_buffer)
  600. {
  601. int ret;
  602. if (insert_buffer == remove_buffer)
  603. return 0;
  604. mutex_lock(&indio_dev->info_exist_lock);
  605. mutex_lock(&indio_dev->mlock);
  606. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  607. insert_buffer = NULL;
  608. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  609. remove_buffer = NULL;
  610. if (!insert_buffer && !remove_buffer) {
  611. ret = 0;
  612. goto out_unlock;
  613. }
  614. if (indio_dev->info == NULL) {
  615. ret = -ENODEV;
  616. goto out_unlock;
  617. }
  618. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  619. out_unlock:
  620. mutex_unlock(&indio_dev->mlock);
  621. mutex_unlock(&indio_dev->info_exist_lock);
  622. return ret;
  623. }
  624. EXPORT_SYMBOL_GPL(iio_update_buffers);
  625. ssize_t iio_buffer_store_enable(struct device *dev,
  626. struct device_attribute *attr,
  627. const char *buf,
  628. size_t len)
  629. {
  630. int ret;
  631. bool requested_state;
  632. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  633. bool inlist;
  634. ret = strtobool(buf, &requested_state);
  635. if (ret < 0)
  636. return ret;
  637. mutex_lock(&indio_dev->mlock);
  638. /* Find out if it is in the list */
  639. inlist = iio_buffer_is_active(indio_dev->buffer);
  640. /* Already in desired state */
  641. if (inlist == requested_state)
  642. goto done;
  643. if (requested_state)
  644. ret = __iio_update_buffers(indio_dev,
  645. indio_dev->buffer, NULL);
  646. else
  647. ret = __iio_update_buffers(indio_dev,
  648. NULL, indio_dev->buffer);
  649. if (ret < 0)
  650. goto done;
  651. done:
  652. mutex_unlock(&indio_dev->mlock);
  653. return (ret < 0) ? ret : len;
  654. }
  655. EXPORT_SYMBOL(iio_buffer_store_enable);
  656. /**
  657. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  658. * @indio_dev: the iio device
  659. * @mask: scan mask to be checked
  660. *
  661. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  662. * can be used for devices where only one channel can be active for sampling at
  663. * a time.
  664. */
  665. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  666. const unsigned long *mask)
  667. {
  668. return bitmap_weight(mask, indio_dev->masklength) == 1;
  669. }
  670. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  671. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  672. const unsigned long *mask)
  673. {
  674. if (!indio_dev->setup_ops->validate_scan_mask)
  675. return true;
  676. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  677. }
  678. /**
  679. * iio_scan_mask_set() - set particular bit in the scan mask
  680. * @indio_dev: the iio device
  681. * @buffer: the buffer whose scan mask we are interested in
  682. * @bit: the bit to be set.
  683. *
  684. * Note that at this point we have no way of knowing what other
  685. * buffers might request, hence this code only verifies that the
  686. * individual buffers request is plausible.
  687. */
  688. int iio_scan_mask_set(struct iio_dev *indio_dev,
  689. struct iio_buffer *buffer, int bit)
  690. {
  691. const unsigned long *mask;
  692. unsigned long *trialmask;
  693. trialmask = kmalloc(sizeof(*trialmask)*
  694. BITS_TO_LONGS(indio_dev->masklength),
  695. GFP_KERNEL);
  696. if (trialmask == NULL)
  697. return -ENOMEM;
  698. if (!indio_dev->masklength) {
  699. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  700. goto err_invalid_mask;
  701. }
  702. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  703. set_bit(bit, trialmask);
  704. if (!iio_validate_scan_mask(indio_dev, trialmask))
  705. goto err_invalid_mask;
  706. if (indio_dev->available_scan_masks) {
  707. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  708. indio_dev->masklength,
  709. trialmask);
  710. if (!mask)
  711. goto err_invalid_mask;
  712. }
  713. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  714. kfree(trialmask);
  715. return 0;
  716. err_invalid_mask:
  717. kfree(trialmask);
  718. return -EINVAL;
  719. }
  720. EXPORT_SYMBOL_GPL(iio_scan_mask_set);
  721. int iio_scan_mask_query(struct iio_dev *indio_dev,
  722. struct iio_buffer *buffer, int bit)
  723. {
  724. if (bit > indio_dev->masklength)
  725. return -EINVAL;
  726. if (!buffer->scan_mask)
  727. return 0;
  728. return test_bit(bit, buffer->scan_mask);
  729. };
  730. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  731. /**
  732. * struct iio_demux_table() - table describing demux memcpy ops
  733. * @from: index to copy from
  734. * @to: index to copy to
  735. * @length: how many bytes to copy
  736. * @l: list head used for management
  737. */
  738. struct iio_demux_table {
  739. unsigned from;
  740. unsigned to;
  741. unsigned length;
  742. struct list_head l;
  743. };
  744. static const void *iio_demux(struct iio_buffer *buffer,
  745. const void *datain)
  746. {
  747. struct iio_demux_table *t;
  748. if (list_empty(&buffer->demux_list))
  749. return datain;
  750. list_for_each_entry(t, &buffer->demux_list, l)
  751. memcpy(buffer->demux_bounce + t->to,
  752. datain + t->from, t->length);
  753. return buffer->demux_bounce;
  754. }
  755. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  756. {
  757. const void *dataout = iio_demux(buffer, data);
  758. return buffer->access->store_to(buffer, dataout);
  759. }
  760. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  761. {
  762. struct iio_demux_table *p, *q;
  763. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  764. list_del(&p->l);
  765. kfree(p);
  766. }
  767. }
  768. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  769. {
  770. int ret;
  771. struct iio_buffer *buf;
  772. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  773. ret = iio_push_to_buffer(buf, data);
  774. if (ret < 0)
  775. return ret;
  776. }
  777. return 0;
  778. }
  779. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  780. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  781. struct iio_buffer *buffer)
  782. {
  783. const struct iio_chan_spec *ch;
  784. int ret, in_ind = -1, out_ind, length;
  785. unsigned in_loc = 0, out_loc = 0;
  786. struct iio_demux_table *p;
  787. /* Clear out any old demux */
  788. iio_buffer_demux_free(buffer);
  789. kfree(buffer->demux_bounce);
  790. buffer->demux_bounce = NULL;
  791. /* First work out which scan mode we will actually have */
  792. if (bitmap_equal(indio_dev->active_scan_mask,
  793. buffer->scan_mask,
  794. indio_dev->masklength))
  795. return 0;
  796. /* Now we have the two masks, work from least sig and build up sizes */
  797. for_each_set_bit(out_ind,
  798. indio_dev->active_scan_mask,
  799. indio_dev->masklength) {
  800. in_ind = find_next_bit(indio_dev->active_scan_mask,
  801. indio_dev->masklength,
  802. in_ind + 1);
  803. while (in_ind != out_ind) {
  804. in_ind = find_next_bit(indio_dev->active_scan_mask,
  805. indio_dev->masklength,
  806. in_ind + 1);
  807. ch = iio_find_channel_from_si(indio_dev, in_ind);
  808. length = ch->scan_type.storagebits/8;
  809. /* Make sure we are aligned */
  810. in_loc += length;
  811. if (in_loc % length)
  812. in_loc += length - in_loc % length;
  813. }
  814. p = kmalloc(sizeof(*p), GFP_KERNEL);
  815. if (p == NULL) {
  816. ret = -ENOMEM;
  817. goto error_clear_mux_table;
  818. }
  819. ch = iio_find_channel_from_si(indio_dev, in_ind);
  820. length = ch->scan_type.storagebits/8;
  821. if (out_loc % length)
  822. out_loc += length - out_loc % length;
  823. if (in_loc % length)
  824. in_loc += length - in_loc % length;
  825. p->from = in_loc;
  826. p->to = out_loc;
  827. p->length = length;
  828. list_add_tail(&p->l, &buffer->demux_list);
  829. out_loc += length;
  830. in_loc += length;
  831. }
  832. /* Relies on scan_timestamp being last */
  833. if (buffer->scan_timestamp) {
  834. p = kmalloc(sizeof(*p), GFP_KERNEL);
  835. if (p == NULL) {
  836. ret = -ENOMEM;
  837. goto error_clear_mux_table;
  838. }
  839. ch = iio_find_channel_from_si(indio_dev,
  840. indio_dev->scan_index_timestamp);
  841. length = ch->scan_type.storagebits/8;
  842. if (out_loc % length)
  843. out_loc += length - out_loc % length;
  844. if (in_loc % length)
  845. in_loc += length - in_loc % length;
  846. p->from = in_loc;
  847. p->to = out_loc;
  848. p->length = length;
  849. list_add_tail(&p->l, &buffer->demux_list);
  850. out_loc += length;
  851. in_loc += length;
  852. }
  853. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  854. if (buffer->demux_bounce == NULL) {
  855. ret = -ENOMEM;
  856. goto error_clear_mux_table;
  857. }
  858. return 0;
  859. error_clear_mux_table:
  860. iio_buffer_demux_free(buffer);
  861. return ret;
  862. }
  863. int iio_update_demux(struct iio_dev *indio_dev)
  864. {
  865. struct iio_buffer *buffer;
  866. int ret;
  867. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  868. ret = iio_buffer_update_demux(indio_dev, buffer);
  869. if (ret < 0)
  870. goto error_clear_mux_table;
  871. }
  872. return 0;
  873. error_clear_mux_table:
  874. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  875. iio_buffer_demux_free(buffer);
  876. return ret;
  877. }
  878. EXPORT_SYMBOL_GPL(iio_update_demux);
  879. /**
  880. * iio_buffer_release() - Free a buffer's resources
  881. * @ref: Pointer to the kref embedded in the iio_buffer struct
  882. *
  883. * This function is called when the last reference to the buffer has been
  884. * dropped. It will typically free all resources allocated by the buffer. Do not
  885. * call this function manually, always use iio_buffer_put() when done using a
  886. * buffer.
  887. */
  888. static void iio_buffer_release(struct kref *ref)
  889. {
  890. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  891. buffer->access->release(buffer);
  892. }
  893. /**
  894. * iio_buffer_get() - Grab a reference to the buffer
  895. * @buffer: The buffer to grab a reference for, may be NULL
  896. *
  897. * Returns the pointer to the buffer that was passed into the function.
  898. */
  899. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  900. {
  901. if (buffer)
  902. kref_get(&buffer->ref);
  903. return buffer;
  904. }
  905. EXPORT_SYMBOL_GPL(iio_buffer_get);
  906. /**
  907. * iio_buffer_put() - Release the reference to the buffer
  908. * @buffer: The buffer to release the reference for, may be NULL
  909. */
  910. void iio_buffer_put(struct iio_buffer *buffer)
  911. {
  912. if (buffer)
  913. kref_put(&buffer->ref, iio_buffer_release);
  914. }
  915. EXPORT_SYMBOL_GPL(iio_buffer_put);