industrialio-buffer.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/iio/iio.h>
  24. #include "iio_core.h"
  25. #include <linux/iio/sysfs.h>
  26. #include <linux/iio/buffer.h>
  27. static const char * const iio_endian_prefix[] = {
  28. [IIO_BE] = "be",
  29. [IIO_LE] = "le",
  30. };
  31. static bool iio_buffer_is_active(struct iio_buffer *buf)
  32. {
  33. return !list_empty(&buf->buffer_list);
  34. }
  35. /**
  36. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  37. *
  38. * This function relies on all buffer implementations having an
  39. * iio_buffer as their first element.
  40. **/
  41. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  42. size_t n, loff_t *f_ps)
  43. {
  44. struct iio_dev *indio_dev = filp->private_data;
  45. struct iio_buffer *rb = indio_dev->buffer;
  46. if (!rb || !rb->access->read_first_n)
  47. return -EINVAL;
  48. return rb->access->read_first_n(rb, n, buf);
  49. }
  50. /**
  51. * iio_buffer_poll() - poll the buffer to find out if it has data
  52. */
  53. unsigned int iio_buffer_poll(struct file *filp,
  54. struct poll_table_struct *wait)
  55. {
  56. struct iio_dev *indio_dev = filp->private_data;
  57. struct iio_buffer *rb = indio_dev->buffer;
  58. poll_wait(filp, &rb->pollq, wait);
  59. if (rb->stufftoread)
  60. return POLLIN | POLLRDNORM;
  61. /* need a way of knowing if there may be enough data... */
  62. return 0;
  63. }
  64. void iio_buffer_init(struct iio_buffer *buffer)
  65. {
  66. INIT_LIST_HEAD(&buffer->demux_list);
  67. INIT_LIST_HEAD(&buffer->buffer_list);
  68. init_waitqueue_head(&buffer->pollq);
  69. }
  70. EXPORT_SYMBOL(iio_buffer_init);
  71. static ssize_t iio_show_scan_index(struct device *dev,
  72. struct device_attribute *attr,
  73. char *buf)
  74. {
  75. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  76. }
  77. static ssize_t iio_show_fixed_type(struct device *dev,
  78. struct device_attribute *attr,
  79. char *buf)
  80. {
  81. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  82. u8 type = this_attr->c->scan_type.endianness;
  83. if (type == IIO_CPU) {
  84. #ifdef __LITTLE_ENDIAN
  85. type = IIO_LE;
  86. #else
  87. type = IIO_BE;
  88. #endif
  89. }
  90. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  91. iio_endian_prefix[type],
  92. this_attr->c->scan_type.sign,
  93. this_attr->c->scan_type.realbits,
  94. this_attr->c->scan_type.storagebits,
  95. this_attr->c->scan_type.shift);
  96. }
  97. static ssize_t iio_scan_el_show(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. int ret;
  102. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  103. ret = test_bit(to_iio_dev_attr(attr)->address,
  104. indio_dev->buffer->scan_mask);
  105. return sprintf(buf, "%d\n", ret);
  106. }
  107. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  108. {
  109. clear_bit(bit, buffer->scan_mask);
  110. return 0;
  111. }
  112. static ssize_t iio_scan_el_store(struct device *dev,
  113. struct device_attribute *attr,
  114. const char *buf,
  115. size_t len)
  116. {
  117. int ret;
  118. bool state;
  119. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  120. struct iio_buffer *buffer = indio_dev->buffer;
  121. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  122. ret = strtobool(buf, &state);
  123. if (ret < 0)
  124. return ret;
  125. mutex_lock(&indio_dev->mlock);
  126. if (iio_buffer_is_active(indio_dev->buffer)) {
  127. ret = -EBUSY;
  128. goto error_ret;
  129. }
  130. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  131. if (ret < 0)
  132. goto error_ret;
  133. if (!state && ret) {
  134. ret = iio_scan_mask_clear(buffer, this_attr->address);
  135. if (ret)
  136. goto error_ret;
  137. } else if (state && !ret) {
  138. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  139. if (ret)
  140. goto error_ret;
  141. }
  142. error_ret:
  143. mutex_unlock(&indio_dev->mlock);
  144. return ret < 0 ? ret : len;
  145. }
  146. static ssize_t iio_scan_el_ts_show(struct device *dev,
  147. struct device_attribute *attr,
  148. char *buf)
  149. {
  150. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  151. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  152. }
  153. static ssize_t iio_scan_el_ts_store(struct device *dev,
  154. struct device_attribute *attr,
  155. const char *buf,
  156. size_t len)
  157. {
  158. int ret;
  159. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  160. bool state;
  161. ret = strtobool(buf, &state);
  162. if (ret < 0)
  163. return ret;
  164. mutex_lock(&indio_dev->mlock);
  165. if (iio_buffer_is_active(indio_dev->buffer)) {
  166. ret = -EBUSY;
  167. goto error_ret;
  168. }
  169. indio_dev->buffer->scan_timestamp = state;
  170. error_ret:
  171. mutex_unlock(&indio_dev->mlock);
  172. return ret ? ret : len;
  173. }
  174. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  175. const struct iio_chan_spec *chan)
  176. {
  177. int ret, attrcount = 0;
  178. struct iio_buffer *buffer = indio_dev->buffer;
  179. ret = __iio_add_chan_devattr("index",
  180. chan,
  181. &iio_show_scan_index,
  182. NULL,
  183. 0,
  184. IIO_SEPARATE,
  185. &indio_dev->dev,
  186. &buffer->scan_el_dev_attr_list);
  187. if (ret)
  188. goto error_ret;
  189. attrcount++;
  190. ret = __iio_add_chan_devattr("type",
  191. chan,
  192. &iio_show_fixed_type,
  193. NULL,
  194. 0,
  195. 0,
  196. &indio_dev->dev,
  197. &buffer->scan_el_dev_attr_list);
  198. if (ret)
  199. goto error_ret;
  200. attrcount++;
  201. if (chan->type != IIO_TIMESTAMP)
  202. ret = __iio_add_chan_devattr("en",
  203. chan,
  204. &iio_scan_el_show,
  205. &iio_scan_el_store,
  206. chan->scan_index,
  207. 0,
  208. &indio_dev->dev,
  209. &buffer->scan_el_dev_attr_list);
  210. else
  211. ret = __iio_add_chan_devattr("en",
  212. chan,
  213. &iio_scan_el_ts_show,
  214. &iio_scan_el_ts_store,
  215. chan->scan_index,
  216. 0,
  217. &indio_dev->dev,
  218. &buffer->scan_el_dev_attr_list);
  219. if (ret)
  220. goto error_ret;
  221. attrcount++;
  222. ret = attrcount;
  223. error_ret:
  224. return ret;
  225. }
  226. static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
  227. struct iio_dev_attr *p)
  228. {
  229. kfree(p->dev_attr.attr.name);
  230. kfree(p);
  231. }
  232. static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
  233. {
  234. struct iio_dev_attr *p, *n;
  235. struct iio_buffer *buffer = indio_dev->buffer;
  236. list_for_each_entry_safe(p, n,
  237. &buffer->scan_el_dev_attr_list, l)
  238. iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
  239. }
  240. static const char * const iio_scan_elements_group_name = "scan_elements";
  241. int iio_buffer_register(struct iio_dev *indio_dev,
  242. const struct iio_chan_spec *channels,
  243. int num_channels)
  244. {
  245. struct iio_dev_attr *p;
  246. struct attribute **attr;
  247. struct iio_buffer *buffer = indio_dev->buffer;
  248. int ret, i, attrn, attrcount, attrcount_orig = 0;
  249. if (buffer->attrs)
  250. indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
  251. if (buffer->scan_el_attrs != NULL) {
  252. attr = buffer->scan_el_attrs->attrs;
  253. while (*attr++ != NULL)
  254. attrcount_orig++;
  255. }
  256. attrcount = attrcount_orig;
  257. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  258. if (channels) {
  259. /* new magic */
  260. for (i = 0; i < num_channels; i++) {
  261. if (channels[i].scan_index < 0)
  262. continue;
  263. /* Establish necessary mask length */
  264. if (channels[i].scan_index >
  265. (int)indio_dev->masklength - 1)
  266. indio_dev->masklength
  267. = channels[i].scan_index + 1;
  268. ret = iio_buffer_add_channel_sysfs(indio_dev,
  269. &channels[i]);
  270. if (ret < 0)
  271. goto error_cleanup_dynamic;
  272. attrcount += ret;
  273. if (channels[i].type == IIO_TIMESTAMP)
  274. indio_dev->scan_index_timestamp =
  275. channels[i].scan_index;
  276. }
  277. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  278. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  279. sizeof(*buffer->scan_mask),
  280. GFP_KERNEL);
  281. if (buffer->scan_mask == NULL) {
  282. ret = -ENOMEM;
  283. goto error_cleanup_dynamic;
  284. }
  285. }
  286. }
  287. buffer->scan_el_group.name = iio_scan_elements_group_name;
  288. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  289. sizeof(buffer->scan_el_group.attrs[0]),
  290. GFP_KERNEL);
  291. if (buffer->scan_el_group.attrs == NULL) {
  292. ret = -ENOMEM;
  293. goto error_free_scan_mask;
  294. }
  295. if (buffer->scan_el_attrs)
  296. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  297. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  298. attrn = attrcount_orig;
  299. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  300. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  301. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  302. return 0;
  303. error_free_scan_mask:
  304. kfree(buffer->scan_mask);
  305. error_cleanup_dynamic:
  306. __iio_buffer_attr_cleanup(indio_dev);
  307. return ret;
  308. }
  309. EXPORT_SYMBOL(iio_buffer_register);
  310. void iio_buffer_unregister(struct iio_dev *indio_dev)
  311. {
  312. kfree(indio_dev->buffer->scan_mask);
  313. kfree(indio_dev->buffer->scan_el_group.attrs);
  314. __iio_buffer_attr_cleanup(indio_dev);
  315. }
  316. EXPORT_SYMBOL(iio_buffer_unregister);
  317. ssize_t iio_buffer_read_length(struct device *dev,
  318. struct device_attribute *attr,
  319. char *buf)
  320. {
  321. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  322. struct iio_buffer *buffer = indio_dev->buffer;
  323. if (buffer->access->get_length)
  324. return sprintf(buf, "%d\n",
  325. buffer->access->get_length(buffer));
  326. return 0;
  327. }
  328. EXPORT_SYMBOL(iio_buffer_read_length);
  329. ssize_t iio_buffer_write_length(struct device *dev,
  330. struct device_attribute *attr,
  331. const char *buf,
  332. size_t len)
  333. {
  334. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  335. struct iio_buffer *buffer = indio_dev->buffer;
  336. unsigned int val;
  337. int ret;
  338. ret = kstrtouint(buf, 10, &val);
  339. if (ret)
  340. return ret;
  341. if (buffer->access->get_length)
  342. if (val == buffer->access->get_length(buffer))
  343. return len;
  344. mutex_lock(&indio_dev->mlock);
  345. if (iio_buffer_is_active(indio_dev->buffer)) {
  346. ret = -EBUSY;
  347. } else {
  348. if (buffer->access->set_length)
  349. buffer->access->set_length(buffer, val);
  350. ret = 0;
  351. }
  352. mutex_unlock(&indio_dev->mlock);
  353. return ret ? ret : len;
  354. }
  355. EXPORT_SYMBOL(iio_buffer_write_length);
  356. ssize_t iio_buffer_show_enable(struct device *dev,
  357. struct device_attribute *attr,
  358. char *buf)
  359. {
  360. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  361. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  362. }
  363. EXPORT_SYMBOL(iio_buffer_show_enable);
  364. /* Note NULL used as error indicator as it doesn't make sense. */
  365. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  366. unsigned int masklength,
  367. const unsigned long *mask)
  368. {
  369. if (bitmap_empty(mask, masklength))
  370. return NULL;
  371. while (*av_masks) {
  372. if (bitmap_subset(mask, av_masks, masklength))
  373. return av_masks;
  374. av_masks += BITS_TO_LONGS(masklength);
  375. }
  376. return NULL;
  377. }
  378. static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
  379. bool timestamp)
  380. {
  381. const struct iio_chan_spec *ch;
  382. unsigned bytes = 0;
  383. int length, i;
  384. /* How much space will the demuxed element take? */
  385. for_each_set_bit(i, mask,
  386. indio_dev->masklength) {
  387. ch = iio_find_channel_from_si(indio_dev, i);
  388. length = ch->scan_type.storagebits / 8;
  389. bytes = ALIGN(bytes, length);
  390. bytes += length;
  391. }
  392. if (timestamp) {
  393. ch = iio_find_channel_from_si(indio_dev,
  394. indio_dev->scan_index_timestamp);
  395. length = ch->scan_type.storagebits / 8;
  396. bytes = ALIGN(bytes, length);
  397. bytes += length;
  398. }
  399. return bytes;
  400. }
  401. int iio_update_buffers(struct iio_dev *indio_dev,
  402. struct iio_buffer *insert_buffer,
  403. struct iio_buffer *remove_buffer)
  404. {
  405. int ret;
  406. int success = 0;
  407. struct iio_buffer *buffer;
  408. unsigned long *compound_mask;
  409. const unsigned long *old_mask;
  410. /* Wind down existing buffers - iff there are any */
  411. if (!list_empty(&indio_dev->buffer_list)) {
  412. if (indio_dev->setup_ops->predisable) {
  413. ret = indio_dev->setup_ops->predisable(indio_dev);
  414. if (ret)
  415. goto error_ret;
  416. }
  417. indio_dev->currentmode = INDIO_DIRECT_MODE;
  418. if (indio_dev->setup_ops->postdisable) {
  419. ret = indio_dev->setup_ops->postdisable(indio_dev);
  420. if (ret)
  421. goto error_ret;
  422. }
  423. }
  424. /* Keep a copy of current setup to allow roll back */
  425. old_mask = indio_dev->active_scan_mask;
  426. if (!indio_dev->available_scan_masks)
  427. indio_dev->active_scan_mask = NULL;
  428. if (remove_buffer)
  429. list_del_init(&remove_buffer->buffer_list);
  430. if (insert_buffer)
  431. list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
  432. /* If no buffers in list, we are done */
  433. if (list_empty(&indio_dev->buffer_list)) {
  434. indio_dev->currentmode = INDIO_DIRECT_MODE;
  435. if (indio_dev->available_scan_masks == NULL)
  436. kfree(old_mask);
  437. return 0;
  438. }
  439. /* What scan mask do we actually have? */
  440. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  441. sizeof(long), GFP_KERNEL);
  442. if (compound_mask == NULL) {
  443. if (indio_dev->available_scan_masks == NULL)
  444. kfree(old_mask);
  445. return -ENOMEM;
  446. }
  447. indio_dev->scan_timestamp = 0;
  448. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  449. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  450. indio_dev->masklength);
  451. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  452. }
  453. if (indio_dev->available_scan_masks) {
  454. indio_dev->active_scan_mask =
  455. iio_scan_mask_match(indio_dev->available_scan_masks,
  456. indio_dev->masklength,
  457. compound_mask);
  458. if (indio_dev->active_scan_mask == NULL) {
  459. /*
  460. * Roll back.
  461. * Note can only occur when adding a buffer.
  462. */
  463. list_del_init(&insert_buffer->buffer_list);
  464. indio_dev->active_scan_mask = old_mask;
  465. success = -EINVAL;
  466. }
  467. } else {
  468. indio_dev->active_scan_mask = compound_mask;
  469. }
  470. iio_update_demux(indio_dev);
  471. /* Wind up again */
  472. if (indio_dev->setup_ops->preenable) {
  473. ret = indio_dev->setup_ops->preenable(indio_dev);
  474. if (ret) {
  475. printk(KERN_ERR
  476. "Buffer not started: buffer preenable failed (%d)\n", ret);
  477. goto error_remove_inserted;
  478. }
  479. }
  480. indio_dev->scan_bytes =
  481. iio_compute_scan_bytes(indio_dev,
  482. indio_dev->active_scan_mask,
  483. indio_dev->scan_timestamp);
  484. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  485. if (buffer->access->request_update) {
  486. ret = buffer->access->request_update(buffer);
  487. if (ret) {
  488. printk(KERN_INFO
  489. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  490. goto error_run_postdisable;
  491. }
  492. }
  493. if (indio_dev->info->update_scan_mode) {
  494. ret = indio_dev->info
  495. ->update_scan_mode(indio_dev,
  496. indio_dev->active_scan_mask);
  497. if (ret < 0) {
  498. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  499. goto error_run_postdisable;
  500. }
  501. }
  502. /* Definitely possible for devices to support both of these. */
  503. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
  504. if (!indio_dev->trig) {
  505. printk(KERN_INFO "Buffer not started: no trigger\n");
  506. ret = -EINVAL;
  507. /* Can only occur on first buffer */
  508. goto error_run_postdisable;
  509. }
  510. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  511. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  512. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  513. } else { /* Should never be reached */
  514. ret = -EINVAL;
  515. goto error_run_postdisable;
  516. }
  517. if (indio_dev->setup_ops->postenable) {
  518. ret = indio_dev->setup_ops->postenable(indio_dev);
  519. if (ret) {
  520. printk(KERN_INFO
  521. "Buffer not started: postenable failed (%d)\n", ret);
  522. indio_dev->currentmode = INDIO_DIRECT_MODE;
  523. if (indio_dev->setup_ops->postdisable)
  524. indio_dev->setup_ops->postdisable(indio_dev);
  525. goto error_disable_all_buffers;
  526. }
  527. }
  528. if (indio_dev->available_scan_masks)
  529. kfree(compound_mask);
  530. else
  531. kfree(old_mask);
  532. return success;
  533. error_disable_all_buffers:
  534. indio_dev->currentmode = INDIO_DIRECT_MODE;
  535. error_run_postdisable:
  536. if (indio_dev->setup_ops->postdisable)
  537. indio_dev->setup_ops->postdisable(indio_dev);
  538. error_remove_inserted:
  539. if (insert_buffer)
  540. list_del_init(&insert_buffer->buffer_list);
  541. indio_dev->active_scan_mask = old_mask;
  542. kfree(compound_mask);
  543. error_ret:
  544. return ret;
  545. }
  546. EXPORT_SYMBOL_GPL(iio_update_buffers);
  547. ssize_t iio_buffer_store_enable(struct device *dev,
  548. struct device_attribute *attr,
  549. const char *buf,
  550. size_t len)
  551. {
  552. int ret;
  553. bool requested_state;
  554. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  555. bool inlist;
  556. ret = strtobool(buf, &requested_state);
  557. if (ret < 0)
  558. return ret;
  559. mutex_lock(&indio_dev->mlock);
  560. /* Find out if it is in the list */
  561. inlist = iio_buffer_is_active(indio_dev->buffer);
  562. /* Already in desired state */
  563. if (inlist == requested_state)
  564. goto done;
  565. if (requested_state)
  566. ret = iio_update_buffers(indio_dev,
  567. indio_dev->buffer, NULL);
  568. else
  569. ret = iio_update_buffers(indio_dev,
  570. NULL, indio_dev->buffer);
  571. if (ret < 0)
  572. goto done;
  573. done:
  574. mutex_unlock(&indio_dev->mlock);
  575. return (ret < 0) ? ret : len;
  576. }
  577. EXPORT_SYMBOL(iio_buffer_store_enable);
  578. int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
  579. {
  580. struct iio_buffer *buffer;
  581. unsigned bytes;
  582. dev_dbg(&indio_dev->dev, "%s\n", __func__);
  583. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  584. if (buffer->access->set_bytes_per_datum) {
  585. bytes = iio_compute_scan_bytes(indio_dev,
  586. buffer->scan_mask,
  587. buffer->scan_timestamp);
  588. buffer->access->set_bytes_per_datum(buffer, bytes);
  589. }
  590. return 0;
  591. }
  592. EXPORT_SYMBOL(iio_sw_buffer_preenable);
  593. /**
  594. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  595. * @indio_dev: the iio device
  596. * @mask: scan mask to be checked
  597. *
  598. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  599. * can be used for devices where only one channel can be active for sampling at
  600. * a time.
  601. */
  602. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  603. const unsigned long *mask)
  604. {
  605. return bitmap_weight(mask, indio_dev->masklength) == 1;
  606. }
  607. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  608. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  609. const unsigned long *mask)
  610. {
  611. if (!indio_dev->setup_ops->validate_scan_mask)
  612. return true;
  613. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  614. }
  615. /**
  616. * iio_scan_mask_set() - set particular bit in the scan mask
  617. * @indio_dev: the iio device
  618. * @buffer: the buffer whose scan mask we are interested in
  619. * @bit: the bit to be set.
  620. *
  621. * Note that at this point we have no way of knowing what other
  622. * buffers might request, hence this code only verifies that the
  623. * individual buffers request is plausible.
  624. */
  625. int iio_scan_mask_set(struct iio_dev *indio_dev,
  626. struct iio_buffer *buffer, int bit)
  627. {
  628. const unsigned long *mask;
  629. unsigned long *trialmask;
  630. trialmask = kmalloc(sizeof(*trialmask)*
  631. BITS_TO_LONGS(indio_dev->masklength),
  632. GFP_KERNEL);
  633. if (trialmask == NULL)
  634. return -ENOMEM;
  635. if (!indio_dev->masklength) {
  636. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  637. goto err_invalid_mask;
  638. }
  639. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  640. set_bit(bit, trialmask);
  641. if (!iio_validate_scan_mask(indio_dev, trialmask))
  642. goto err_invalid_mask;
  643. if (indio_dev->available_scan_masks) {
  644. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  645. indio_dev->masklength,
  646. trialmask);
  647. if (!mask)
  648. goto err_invalid_mask;
  649. }
  650. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  651. kfree(trialmask);
  652. return 0;
  653. err_invalid_mask:
  654. kfree(trialmask);
  655. return -EINVAL;
  656. }
  657. EXPORT_SYMBOL_GPL(iio_scan_mask_set);
  658. int iio_scan_mask_query(struct iio_dev *indio_dev,
  659. struct iio_buffer *buffer, int bit)
  660. {
  661. if (bit > indio_dev->masklength)
  662. return -EINVAL;
  663. if (!buffer->scan_mask)
  664. return 0;
  665. return test_bit(bit, buffer->scan_mask);
  666. };
  667. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  668. /**
  669. * struct iio_demux_table() - table describing demux memcpy ops
  670. * @from: index to copy from
  671. * @to: index to copy to
  672. * @length: how many bytes to copy
  673. * @l: list head used for management
  674. */
  675. struct iio_demux_table {
  676. unsigned from;
  677. unsigned to;
  678. unsigned length;
  679. struct list_head l;
  680. };
  681. static const void *iio_demux(struct iio_buffer *buffer,
  682. const void *datain)
  683. {
  684. struct iio_demux_table *t;
  685. if (list_empty(&buffer->demux_list))
  686. return datain;
  687. list_for_each_entry(t, &buffer->demux_list, l)
  688. memcpy(buffer->demux_bounce + t->to,
  689. datain + t->from, t->length);
  690. return buffer->demux_bounce;
  691. }
  692. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  693. {
  694. const void *dataout = iio_demux(buffer, data);
  695. return buffer->access->store_to(buffer, dataout);
  696. }
  697. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  698. {
  699. struct iio_demux_table *p, *q;
  700. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  701. list_del(&p->l);
  702. kfree(p);
  703. }
  704. }
  705. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  706. {
  707. int ret;
  708. struct iio_buffer *buf;
  709. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  710. ret = iio_push_to_buffer(buf, data);
  711. if (ret < 0)
  712. return ret;
  713. }
  714. return 0;
  715. }
  716. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  717. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  718. struct iio_buffer *buffer)
  719. {
  720. const struct iio_chan_spec *ch;
  721. int ret, in_ind = -1, out_ind, length;
  722. unsigned in_loc = 0, out_loc = 0;
  723. struct iio_demux_table *p;
  724. /* Clear out any old demux */
  725. iio_buffer_demux_free(buffer);
  726. kfree(buffer->demux_bounce);
  727. buffer->demux_bounce = NULL;
  728. /* First work out which scan mode we will actually have */
  729. if (bitmap_equal(indio_dev->active_scan_mask,
  730. buffer->scan_mask,
  731. indio_dev->masklength))
  732. return 0;
  733. /* Now we have the two masks, work from least sig and build up sizes */
  734. for_each_set_bit(out_ind,
  735. indio_dev->active_scan_mask,
  736. indio_dev->masklength) {
  737. in_ind = find_next_bit(indio_dev->active_scan_mask,
  738. indio_dev->masklength,
  739. in_ind + 1);
  740. while (in_ind != out_ind) {
  741. in_ind = find_next_bit(indio_dev->active_scan_mask,
  742. indio_dev->masklength,
  743. in_ind + 1);
  744. ch = iio_find_channel_from_si(indio_dev, in_ind);
  745. length = ch->scan_type.storagebits/8;
  746. /* Make sure we are aligned */
  747. in_loc += length;
  748. if (in_loc % length)
  749. in_loc += length - in_loc % length;
  750. }
  751. p = kmalloc(sizeof(*p), GFP_KERNEL);
  752. if (p == NULL) {
  753. ret = -ENOMEM;
  754. goto error_clear_mux_table;
  755. }
  756. ch = iio_find_channel_from_si(indio_dev, in_ind);
  757. length = ch->scan_type.storagebits/8;
  758. if (out_loc % length)
  759. out_loc += length - out_loc % length;
  760. if (in_loc % length)
  761. in_loc += length - in_loc % length;
  762. p->from = in_loc;
  763. p->to = out_loc;
  764. p->length = length;
  765. list_add_tail(&p->l, &buffer->demux_list);
  766. out_loc += length;
  767. in_loc += length;
  768. }
  769. /* Relies on scan_timestamp being last */
  770. if (buffer->scan_timestamp) {
  771. p = kmalloc(sizeof(*p), GFP_KERNEL);
  772. if (p == NULL) {
  773. ret = -ENOMEM;
  774. goto error_clear_mux_table;
  775. }
  776. ch = iio_find_channel_from_si(indio_dev,
  777. indio_dev->scan_index_timestamp);
  778. length = ch->scan_type.storagebits/8;
  779. if (out_loc % length)
  780. out_loc += length - out_loc % length;
  781. if (in_loc % length)
  782. in_loc += length - in_loc % length;
  783. p->from = in_loc;
  784. p->to = out_loc;
  785. p->length = length;
  786. list_add_tail(&p->l, &buffer->demux_list);
  787. out_loc += length;
  788. in_loc += length;
  789. }
  790. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  791. if (buffer->demux_bounce == NULL) {
  792. ret = -ENOMEM;
  793. goto error_clear_mux_table;
  794. }
  795. return 0;
  796. error_clear_mux_table:
  797. iio_buffer_demux_free(buffer);
  798. return ret;
  799. }
  800. int iio_update_demux(struct iio_dev *indio_dev)
  801. {
  802. struct iio_buffer *buffer;
  803. int ret;
  804. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  805. ret = iio_buffer_update_demux(indio_dev, buffer);
  806. if (ret < 0)
  807. goto error_clear_mux_table;
  808. }
  809. return 0;
  810. error_clear_mux_table:
  811. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  812. iio_buffer_demux_free(buffer);
  813. return ret;
  814. }
  815. EXPORT_SYMBOL_GPL(iio_update_demux);