industrialio-buffer.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/iio/iio.h>
  24. #include "iio_core.h"
  25. #include <linux/iio/sysfs.h>
  26. #include <linux/iio/buffer.h>
  27. static const char * const iio_endian_prefix[] = {
  28. [IIO_BE] = "be",
  29. [IIO_LE] = "le",
  30. };
  31. static bool iio_buffer_is_active(struct iio_buffer *buf)
  32. {
  33. return !list_empty(&buf->buffer_list);
  34. }
  35. /**
  36. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  37. *
  38. * This function relies on all buffer implementations having an
  39. * iio_buffer as their first element.
  40. **/
  41. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  42. size_t n, loff_t *f_ps)
  43. {
  44. struct iio_dev *indio_dev = filp->private_data;
  45. struct iio_buffer *rb = indio_dev->buffer;
  46. if (!rb || !rb->access->read_first_n)
  47. return -EINVAL;
  48. return rb->access->read_first_n(rb, n, buf);
  49. }
  50. /**
  51. * iio_buffer_poll() - poll the buffer to find out if it has data
  52. */
  53. unsigned int iio_buffer_poll(struct file *filp,
  54. struct poll_table_struct *wait)
  55. {
  56. struct iio_dev *indio_dev = filp->private_data;
  57. struct iio_buffer *rb = indio_dev->buffer;
  58. poll_wait(filp, &rb->pollq, wait);
  59. if (rb->stufftoread)
  60. return POLLIN | POLLRDNORM;
  61. /* need a way of knowing if there may be enough data... */
  62. return 0;
  63. }
  64. void iio_buffer_init(struct iio_buffer *buffer)
  65. {
  66. INIT_LIST_HEAD(&buffer->demux_list);
  67. INIT_LIST_HEAD(&buffer->buffer_list);
  68. init_waitqueue_head(&buffer->pollq);
  69. kref_init(&buffer->ref);
  70. }
  71. EXPORT_SYMBOL(iio_buffer_init);
  72. static ssize_t iio_show_scan_index(struct device *dev,
  73. struct device_attribute *attr,
  74. char *buf)
  75. {
  76. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  77. }
  78. static ssize_t iio_show_fixed_type(struct device *dev,
  79. struct device_attribute *attr,
  80. char *buf)
  81. {
  82. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  83. u8 type = this_attr->c->scan_type.endianness;
  84. if (type == IIO_CPU) {
  85. #ifdef __LITTLE_ENDIAN
  86. type = IIO_LE;
  87. #else
  88. type = IIO_BE;
  89. #endif
  90. }
  91. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  92. iio_endian_prefix[type],
  93. this_attr->c->scan_type.sign,
  94. this_attr->c->scan_type.realbits,
  95. this_attr->c->scan_type.storagebits,
  96. this_attr->c->scan_type.shift);
  97. }
  98. static ssize_t iio_scan_el_show(struct device *dev,
  99. struct device_attribute *attr,
  100. char *buf)
  101. {
  102. int ret;
  103. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  104. ret = test_bit(to_iio_dev_attr(attr)->address,
  105. indio_dev->buffer->scan_mask);
  106. return sprintf(buf, "%d\n", ret);
  107. }
  108. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  109. {
  110. clear_bit(bit, buffer->scan_mask);
  111. return 0;
  112. }
  113. static ssize_t iio_scan_el_store(struct device *dev,
  114. struct device_attribute *attr,
  115. const char *buf,
  116. size_t len)
  117. {
  118. int ret;
  119. bool state;
  120. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  121. struct iio_buffer *buffer = indio_dev->buffer;
  122. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  123. ret = strtobool(buf, &state);
  124. if (ret < 0)
  125. return ret;
  126. mutex_lock(&indio_dev->mlock);
  127. if (iio_buffer_is_active(indio_dev->buffer)) {
  128. ret = -EBUSY;
  129. goto error_ret;
  130. }
  131. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  132. if (ret < 0)
  133. goto error_ret;
  134. if (!state && ret) {
  135. ret = iio_scan_mask_clear(buffer, this_attr->address);
  136. if (ret)
  137. goto error_ret;
  138. } else if (state && !ret) {
  139. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  140. if (ret)
  141. goto error_ret;
  142. }
  143. error_ret:
  144. mutex_unlock(&indio_dev->mlock);
  145. return ret < 0 ? ret : len;
  146. }
  147. static ssize_t iio_scan_el_ts_show(struct device *dev,
  148. struct device_attribute *attr,
  149. char *buf)
  150. {
  151. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  152. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  153. }
  154. static ssize_t iio_scan_el_ts_store(struct device *dev,
  155. struct device_attribute *attr,
  156. const char *buf,
  157. size_t len)
  158. {
  159. int ret;
  160. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  161. bool state;
  162. ret = strtobool(buf, &state);
  163. if (ret < 0)
  164. return ret;
  165. mutex_lock(&indio_dev->mlock);
  166. if (iio_buffer_is_active(indio_dev->buffer)) {
  167. ret = -EBUSY;
  168. goto error_ret;
  169. }
  170. indio_dev->buffer->scan_timestamp = state;
  171. error_ret:
  172. mutex_unlock(&indio_dev->mlock);
  173. return ret ? ret : len;
  174. }
  175. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  176. const struct iio_chan_spec *chan)
  177. {
  178. int ret, attrcount = 0;
  179. struct iio_buffer *buffer = indio_dev->buffer;
  180. ret = __iio_add_chan_devattr("index",
  181. chan,
  182. &iio_show_scan_index,
  183. NULL,
  184. 0,
  185. IIO_SEPARATE,
  186. &indio_dev->dev,
  187. &buffer->scan_el_dev_attr_list);
  188. if (ret)
  189. goto error_ret;
  190. attrcount++;
  191. ret = __iio_add_chan_devattr("type",
  192. chan,
  193. &iio_show_fixed_type,
  194. NULL,
  195. 0,
  196. 0,
  197. &indio_dev->dev,
  198. &buffer->scan_el_dev_attr_list);
  199. if (ret)
  200. goto error_ret;
  201. attrcount++;
  202. if (chan->type != IIO_TIMESTAMP)
  203. ret = __iio_add_chan_devattr("en",
  204. chan,
  205. &iio_scan_el_show,
  206. &iio_scan_el_store,
  207. chan->scan_index,
  208. 0,
  209. &indio_dev->dev,
  210. &buffer->scan_el_dev_attr_list);
  211. else
  212. ret = __iio_add_chan_devattr("en",
  213. chan,
  214. &iio_scan_el_ts_show,
  215. &iio_scan_el_ts_store,
  216. chan->scan_index,
  217. 0,
  218. &indio_dev->dev,
  219. &buffer->scan_el_dev_attr_list);
  220. if (ret)
  221. goto error_ret;
  222. attrcount++;
  223. ret = attrcount;
  224. error_ret:
  225. return ret;
  226. }
  227. static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
  228. struct iio_dev_attr *p)
  229. {
  230. kfree(p->dev_attr.attr.name);
  231. kfree(p);
  232. }
  233. static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
  234. {
  235. struct iio_dev_attr *p, *n;
  236. struct iio_buffer *buffer = indio_dev->buffer;
  237. list_for_each_entry_safe(p, n,
  238. &buffer->scan_el_dev_attr_list, l)
  239. iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
  240. }
  241. static const char * const iio_scan_elements_group_name = "scan_elements";
  242. int iio_buffer_register(struct iio_dev *indio_dev,
  243. const struct iio_chan_spec *channels,
  244. int num_channels)
  245. {
  246. struct iio_dev_attr *p;
  247. struct attribute **attr;
  248. struct iio_buffer *buffer = indio_dev->buffer;
  249. int ret, i, attrn, attrcount, attrcount_orig = 0;
  250. if (buffer->attrs)
  251. indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
  252. if (buffer->scan_el_attrs != NULL) {
  253. attr = buffer->scan_el_attrs->attrs;
  254. while (*attr++ != NULL)
  255. attrcount_orig++;
  256. }
  257. attrcount = attrcount_orig;
  258. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  259. if (channels) {
  260. /* new magic */
  261. for (i = 0; i < num_channels; i++) {
  262. if (channels[i].scan_index < 0)
  263. continue;
  264. /* Establish necessary mask length */
  265. if (channels[i].scan_index >
  266. (int)indio_dev->masklength - 1)
  267. indio_dev->masklength
  268. = channels[i].scan_index + 1;
  269. ret = iio_buffer_add_channel_sysfs(indio_dev,
  270. &channels[i]);
  271. if (ret < 0)
  272. goto error_cleanup_dynamic;
  273. attrcount += ret;
  274. if (channels[i].type == IIO_TIMESTAMP)
  275. indio_dev->scan_index_timestamp =
  276. channels[i].scan_index;
  277. }
  278. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  279. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  280. sizeof(*buffer->scan_mask),
  281. GFP_KERNEL);
  282. if (buffer->scan_mask == NULL) {
  283. ret = -ENOMEM;
  284. goto error_cleanup_dynamic;
  285. }
  286. }
  287. }
  288. buffer->scan_el_group.name = iio_scan_elements_group_name;
  289. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  290. sizeof(buffer->scan_el_group.attrs[0]),
  291. GFP_KERNEL);
  292. if (buffer->scan_el_group.attrs == NULL) {
  293. ret = -ENOMEM;
  294. goto error_free_scan_mask;
  295. }
  296. if (buffer->scan_el_attrs)
  297. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  298. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  299. attrn = attrcount_orig;
  300. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  301. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  302. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  303. return 0;
  304. error_free_scan_mask:
  305. kfree(buffer->scan_mask);
  306. error_cleanup_dynamic:
  307. __iio_buffer_attr_cleanup(indio_dev);
  308. return ret;
  309. }
  310. EXPORT_SYMBOL(iio_buffer_register);
  311. void iio_buffer_unregister(struct iio_dev *indio_dev)
  312. {
  313. kfree(indio_dev->buffer->scan_mask);
  314. kfree(indio_dev->buffer->scan_el_group.attrs);
  315. __iio_buffer_attr_cleanup(indio_dev);
  316. }
  317. EXPORT_SYMBOL(iio_buffer_unregister);
  318. ssize_t iio_buffer_read_length(struct device *dev,
  319. struct device_attribute *attr,
  320. char *buf)
  321. {
  322. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  323. struct iio_buffer *buffer = indio_dev->buffer;
  324. if (buffer->access->get_length)
  325. return sprintf(buf, "%d\n",
  326. buffer->access->get_length(buffer));
  327. return 0;
  328. }
  329. EXPORT_SYMBOL(iio_buffer_read_length);
  330. ssize_t iio_buffer_write_length(struct device *dev,
  331. struct device_attribute *attr,
  332. const char *buf,
  333. size_t len)
  334. {
  335. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  336. struct iio_buffer *buffer = indio_dev->buffer;
  337. unsigned int val;
  338. int ret;
  339. ret = kstrtouint(buf, 10, &val);
  340. if (ret)
  341. return ret;
  342. if (buffer->access->get_length)
  343. if (val == buffer->access->get_length(buffer))
  344. return len;
  345. mutex_lock(&indio_dev->mlock);
  346. if (iio_buffer_is_active(indio_dev->buffer)) {
  347. ret = -EBUSY;
  348. } else {
  349. if (buffer->access->set_length)
  350. buffer->access->set_length(buffer, val);
  351. ret = 0;
  352. }
  353. mutex_unlock(&indio_dev->mlock);
  354. return ret ? ret : len;
  355. }
  356. EXPORT_SYMBOL(iio_buffer_write_length);
  357. ssize_t iio_buffer_show_enable(struct device *dev,
  358. struct device_attribute *attr,
  359. char *buf)
  360. {
  361. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  362. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  363. }
  364. EXPORT_SYMBOL(iio_buffer_show_enable);
  365. /* Note NULL used as error indicator as it doesn't make sense. */
  366. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  367. unsigned int masklength,
  368. const unsigned long *mask)
  369. {
  370. if (bitmap_empty(mask, masklength))
  371. return NULL;
  372. while (*av_masks) {
  373. if (bitmap_subset(mask, av_masks, masklength))
  374. return av_masks;
  375. av_masks += BITS_TO_LONGS(masklength);
  376. }
  377. return NULL;
  378. }
  379. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  380. const unsigned long *mask, bool timestamp)
  381. {
  382. const struct iio_chan_spec *ch;
  383. unsigned bytes = 0;
  384. int length, i;
  385. /* How much space will the demuxed element take? */
  386. for_each_set_bit(i, mask,
  387. indio_dev->masklength) {
  388. ch = iio_find_channel_from_si(indio_dev, i);
  389. length = ch->scan_type.storagebits / 8;
  390. bytes = ALIGN(bytes, length);
  391. bytes += length;
  392. }
  393. if (timestamp) {
  394. ch = iio_find_channel_from_si(indio_dev,
  395. indio_dev->scan_index_timestamp);
  396. length = ch->scan_type.storagebits / 8;
  397. bytes = ALIGN(bytes, length);
  398. bytes += length;
  399. }
  400. return bytes;
  401. }
  402. static void iio_buffer_activate(struct iio_dev *indio_dev,
  403. struct iio_buffer *buffer)
  404. {
  405. iio_buffer_get(buffer);
  406. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  407. }
  408. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  409. {
  410. list_del_init(&buffer->buffer_list);
  411. iio_buffer_put(buffer);
  412. }
  413. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  414. {
  415. struct iio_buffer *buffer, *_buffer;
  416. if (list_empty(&indio_dev->buffer_list))
  417. return;
  418. if (indio_dev->setup_ops->predisable)
  419. indio_dev->setup_ops->predisable(indio_dev);
  420. list_for_each_entry_safe(buffer, _buffer,
  421. &indio_dev->buffer_list, buffer_list)
  422. iio_buffer_deactivate(buffer);
  423. indio_dev->currentmode = INDIO_DIRECT_MODE;
  424. if (indio_dev->setup_ops->postdisable)
  425. indio_dev->setup_ops->postdisable(indio_dev);
  426. }
  427. int iio_update_buffers(struct iio_dev *indio_dev,
  428. struct iio_buffer *insert_buffer,
  429. struct iio_buffer *remove_buffer)
  430. {
  431. int ret;
  432. int success = 0;
  433. struct iio_buffer *buffer;
  434. unsigned long *compound_mask;
  435. const unsigned long *old_mask;
  436. /* Wind down existing buffers - iff there are any */
  437. if (!list_empty(&indio_dev->buffer_list)) {
  438. if (indio_dev->setup_ops->predisable) {
  439. ret = indio_dev->setup_ops->predisable(indio_dev);
  440. if (ret)
  441. goto error_ret;
  442. }
  443. indio_dev->currentmode = INDIO_DIRECT_MODE;
  444. if (indio_dev->setup_ops->postdisable) {
  445. ret = indio_dev->setup_ops->postdisable(indio_dev);
  446. if (ret)
  447. goto error_ret;
  448. }
  449. }
  450. /* Keep a copy of current setup to allow roll back */
  451. old_mask = indio_dev->active_scan_mask;
  452. if (!indio_dev->available_scan_masks)
  453. indio_dev->active_scan_mask = NULL;
  454. if (remove_buffer)
  455. iio_buffer_deactivate(remove_buffer);
  456. if (insert_buffer)
  457. iio_buffer_activate(indio_dev, insert_buffer);
  458. /* If no buffers in list, we are done */
  459. if (list_empty(&indio_dev->buffer_list)) {
  460. indio_dev->currentmode = INDIO_DIRECT_MODE;
  461. if (indio_dev->available_scan_masks == NULL)
  462. kfree(old_mask);
  463. return 0;
  464. }
  465. /* What scan mask do we actually have? */
  466. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  467. sizeof(long), GFP_KERNEL);
  468. if (compound_mask == NULL) {
  469. if (indio_dev->available_scan_masks == NULL)
  470. kfree(old_mask);
  471. return -ENOMEM;
  472. }
  473. indio_dev->scan_timestamp = 0;
  474. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  475. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  476. indio_dev->masklength);
  477. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  478. }
  479. if (indio_dev->available_scan_masks) {
  480. indio_dev->active_scan_mask =
  481. iio_scan_mask_match(indio_dev->available_scan_masks,
  482. indio_dev->masklength,
  483. compound_mask);
  484. if (indio_dev->active_scan_mask == NULL) {
  485. /*
  486. * Roll back.
  487. * Note can only occur when adding a buffer.
  488. */
  489. iio_buffer_deactivate(insert_buffer);
  490. if (old_mask) {
  491. indio_dev->active_scan_mask = old_mask;
  492. success = -EINVAL;
  493. }
  494. else {
  495. kfree(compound_mask);
  496. ret = -EINVAL;
  497. goto error_ret;
  498. }
  499. }
  500. } else {
  501. indio_dev->active_scan_mask = compound_mask;
  502. }
  503. iio_update_demux(indio_dev);
  504. /* Wind up again */
  505. if (indio_dev->setup_ops->preenable) {
  506. ret = indio_dev->setup_ops->preenable(indio_dev);
  507. if (ret) {
  508. printk(KERN_ERR
  509. "Buffer not started: buffer preenable failed (%d)\n", ret);
  510. goto error_remove_inserted;
  511. }
  512. }
  513. indio_dev->scan_bytes =
  514. iio_compute_scan_bytes(indio_dev,
  515. indio_dev->active_scan_mask,
  516. indio_dev->scan_timestamp);
  517. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  518. if (buffer->access->request_update) {
  519. ret = buffer->access->request_update(buffer);
  520. if (ret) {
  521. printk(KERN_INFO
  522. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  523. goto error_run_postdisable;
  524. }
  525. }
  526. if (indio_dev->info->update_scan_mode) {
  527. ret = indio_dev->info
  528. ->update_scan_mode(indio_dev,
  529. indio_dev->active_scan_mask);
  530. if (ret < 0) {
  531. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  532. goto error_run_postdisable;
  533. }
  534. }
  535. /* Definitely possible for devices to support both of these. */
  536. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
  537. if (!indio_dev->trig) {
  538. printk(KERN_INFO "Buffer not started: no trigger\n");
  539. ret = -EINVAL;
  540. /* Can only occur on first buffer */
  541. goto error_run_postdisable;
  542. }
  543. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  544. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  545. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  546. } else { /* Should never be reached */
  547. ret = -EINVAL;
  548. goto error_run_postdisable;
  549. }
  550. if (indio_dev->setup_ops->postenable) {
  551. ret = indio_dev->setup_ops->postenable(indio_dev);
  552. if (ret) {
  553. printk(KERN_INFO
  554. "Buffer not started: postenable failed (%d)\n", ret);
  555. indio_dev->currentmode = INDIO_DIRECT_MODE;
  556. if (indio_dev->setup_ops->postdisable)
  557. indio_dev->setup_ops->postdisable(indio_dev);
  558. goto error_disable_all_buffers;
  559. }
  560. }
  561. if (indio_dev->available_scan_masks)
  562. kfree(compound_mask);
  563. else
  564. kfree(old_mask);
  565. return success;
  566. error_disable_all_buffers:
  567. indio_dev->currentmode = INDIO_DIRECT_MODE;
  568. error_run_postdisable:
  569. if (indio_dev->setup_ops->postdisable)
  570. indio_dev->setup_ops->postdisable(indio_dev);
  571. error_remove_inserted:
  572. if (insert_buffer)
  573. iio_buffer_deactivate(insert_buffer);
  574. indio_dev->active_scan_mask = old_mask;
  575. kfree(compound_mask);
  576. error_ret:
  577. return ret;
  578. }
  579. EXPORT_SYMBOL_GPL(iio_update_buffers);
  580. ssize_t iio_buffer_store_enable(struct device *dev,
  581. struct device_attribute *attr,
  582. const char *buf,
  583. size_t len)
  584. {
  585. int ret;
  586. bool requested_state;
  587. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  588. bool inlist;
  589. ret = strtobool(buf, &requested_state);
  590. if (ret < 0)
  591. return ret;
  592. mutex_lock(&indio_dev->mlock);
  593. /* Find out if it is in the list */
  594. inlist = iio_buffer_is_active(indio_dev->buffer);
  595. /* Already in desired state */
  596. if (inlist == requested_state)
  597. goto done;
  598. if (requested_state)
  599. ret = iio_update_buffers(indio_dev,
  600. indio_dev->buffer, NULL);
  601. else
  602. ret = iio_update_buffers(indio_dev,
  603. NULL, indio_dev->buffer);
  604. if (ret < 0)
  605. goto done;
  606. done:
  607. mutex_unlock(&indio_dev->mlock);
  608. return (ret < 0) ? ret : len;
  609. }
  610. EXPORT_SYMBOL(iio_buffer_store_enable);
  611. int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
  612. {
  613. struct iio_buffer *buffer;
  614. unsigned bytes;
  615. dev_dbg(&indio_dev->dev, "%s\n", __func__);
  616. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  617. if (buffer->access->set_bytes_per_datum) {
  618. bytes = iio_compute_scan_bytes(indio_dev,
  619. buffer->scan_mask,
  620. buffer->scan_timestamp);
  621. buffer->access->set_bytes_per_datum(buffer, bytes);
  622. }
  623. return 0;
  624. }
  625. EXPORT_SYMBOL(iio_sw_buffer_preenable);
  626. /**
  627. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  628. * @indio_dev: the iio device
  629. * @mask: scan mask to be checked
  630. *
  631. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  632. * can be used for devices where only one channel can be active for sampling at
  633. * a time.
  634. */
  635. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  636. const unsigned long *mask)
  637. {
  638. return bitmap_weight(mask, indio_dev->masklength) == 1;
  639. }
  640. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  641. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  642. const unsigned long *mask)
  643. {
  644. if (!indio_dev->setup_ops->validate_scan_mask)
  645. return true;
  646. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  647. }
  648. /**
  649. * iio_scan_mask_set() - set particular bit in the scan mask
  650. * @indio_dev: the iio device
  651. * @buffer: the buffer whose scan mask we are interested in
  652. * @bit: the bit to be set.
  653. *
  654. * Note that at this point we have no way of knowing what other
  655. * buffers might request, hence this code only verifies that the
  656. * individual buffers request is plausible.
  657. */
  658. int iio_scan_mask_set(struct iio_dev *indio_dev,
  659. struct iio_buffer *buffer, int bit)
  660. {
  661. const unsigned long *mask;
  662. unsigned long *trialmask;
  663. trialmask = kmalloc(sizeof(*trialmask)*
  664. BITS_TO_LONGS(indio_dev->masklength),
  665. GFP_KERNEL);
  666. if (trialmask == NULL)
  667. return -ENOMEM;
  668. if (!indio_dev->masklength) {
  669. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  670. goto err_invalid_mask;
  671. }
  672. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  673. set_bit(bit, trialmask);
  674. if (!iio_validate_scan_mask(indio_dev, trialmask))
  675. goto err_invalid_mask;
  676. if (indio_dev->available_scan_masks) {
  677. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  678. indio_dev->masklength,
  679. trialmask);
  680. if (!mask)
  681. goto err_invalid_mask;
  682. }
  683. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  684. kfree(trialmask);
  685. return 0;
  686. err_invalid_mask:
  687. kfree(trialmask);
  688. return -EINVAL;
  689. }
  690. EXPORT_SYMBOL_GPL(iio_scan_mask_set);
  691. int iio_scan_mask_query(struct iio_dev *indio_dev,
  692. struct iio_buffer *buffer, int bit)
  693. {
  694. if (bit > indio_dev->masklength)
  695. return -EINVAL;
  696. if (!buffer->scan_mask)
  697. return 0;
  698. return test_bit(bit, buffer->scan_mask);
  699. };
  700. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  701. /**
  702. * struct iio_demux_table() - table describing demux memcpy ops
  703. * @from: index to copy from
  704. * @to: index to copy to
  705. * @length: how many bytes to copy
  706. * @l: list head used for management
  707. */
  708. struct iio_demux_table {
  709. unsigned from;
  710. unsigned to;
  711. unsigned length;
  712. struct list_head l;
  713. };
  714. static const void *iio_demux(struct iio_buffer *buffer,
  715. const void *datain)
  716. {
  717. struct iio_demux_table *t;
  718. if (list_empty(&buffer->demux_list))
  719. return datain;
  720. list_for_each_entry(t, &buffer->demux_list, l)
  721. memcpy(buffer->demux_bounce + t->to,
  722. datain + t->from, t->length);
  723. return buffer->demux_bounce;
  724. }
  725. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  726. {
  727. const void *dataout = iio_demux(buffer, data);
  728. return buffer->access->store_to(buffer, dataout);
  729. }
  730. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  731. {
  732. struct iio_demux_table *p, *q;
  733. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  734. list_del(&p->l);
  735. kfree(p);
  736. }
  737. }
  738. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  739. {
  740. int ret;
  741. struct iio_buffer *buf;
  742. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  743. ret = iio_push_to_buffer(buf, data);
  744. if (ret < 0)
  745. return ret;
  746. }
  747. return 0;
  748. }
  749. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  750. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  751. struct iio_buffer *buffer)
  752. {
  753. const struct iio_chan_spec *ch;
  754. int ret, in_ind = -1, out_ind, length;
  755. unsigned in_loc = 0, out_loc = 0;
  756. struct iio_demux_table *p;
  757. /* Clear out any old demux */
  758. iio_buffer_demux_free(buffer);
  759. kfree(buffer->demux_bounce);
  760. buffer->demux_bounce = NULL;
  761. /* First work out which scan mode we will actually have */
  762. if (bitmap_equal(indio_dev->active_scan_mask,
  763. buffer->scan_mask,
  764. indio_dev->masklength))
  765. return 0;
  766. /* Now we have the two masks, work from least sig and build up sizes */
  767. for_each_set_bit(out_ind,
  768. indio_dev->active_scan_mask,
  769. indio_dev->masklength) {
  770. in_ind = find_next_bit(indio_dev->active_scan_mask,
  771. indio_dev->masklength,
  772. in_ind + 1);
  773. while (in_ind != out_ind) {
  774. in_ind = find_next_bit(indio_dev->active_scan_mask,
  775. indio_dev->masklength,
  776. in_ind + 1);
  777. ch = iio_find_channel_from_si(indio_dev, in_ind);
  778. length = ch->scan_type.storagebits/8;
  779. /* Make sure we are aligned */
  780. in_loc += length;
  781. if (in_loc % length)
  782. in_loc += length - in_loc % length;
  783. }
  784. p = kmalloc(sizeof(*p), GFP_KERNEL);
  785. if (p == NULL) {
  786. ret = -ENOMEM;
  787. goto error_clear_mux_table;
  788. }
  789. ch = iio_find_channel_from_si(indio_dev, in_ind);
  790. length = ch->scan_type.storagebits/8;
  791. if (out_loc % length)
  792. out_loc += length - out_loc % length;
  793. if (in_loc % length)
  794. in_loc += length - in_loc % length;
  795. p->from = in_loc;
  796. p->to = out_loc;
  797. p->length = length;
  798. list_add_tail(&p->l, &buffer->demux_list);
  799. out_loc += length;
  800. in_loc += length;
  801. }
  802. /* Relies on scan_timestamp being last */
  803. if (buffer->scan_timestamp) {
  804. p = kmalloc(sizeof(*p), GFP_KERNEL);
  805. if (p == NULL) {
  806. ret = -ENOMEM;
  807. goto error_clear_mux_table;
  808. }
  809. ch = iio_find_channel_from_si(indio_dev,
  810. indio_dev->scan_index_timestamp);
  811. length = ch->scan_type.storagebits/8;
  812. if (out_loc % length)
  813. out_loc += length - out_loc % length;
  814. if (in_loc % length)
  815. in_loc += length - in_loc % length;
  816. p->from = in_loc;
  817. p->to = out_loc;
  818. p->length = length;
  819. list_add_tail(&p->l, &buffer->demux_list);
  820. out_loc += length;
  821. in_loc += length;
  822. }
  823. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  824. if (buffer->demux_bounce == NULL) {
  825. ret = -ENOMEM;
  826. goto error_clear_mux_table;
  827. }
  828. return 0;
  829. error_clear_mux_table:
  830. iio_buffer_demux_free(buffer);
  831. return ret;
  832. }
  833. int iio_update_demux(struct iio_dev *indio_dev)
  834. {
  835. struct iio_buffer *buffer;
  836. int ret;
  837. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  838. ret = iio_buffer_update_demux(indio_dev, buffer);
  839. if (ret < 0)
  840. goto error_clear_mux_table;
  841. }
  842. return 0;
  843. error_clear_mux_table:
  844. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  845. iio_buffer_demux_free(buffer);
  846. return ret;
  847. }
  848. EXPORT_SYMBOL_GPL(iio_update_demux);
  849. /**
  850. * iio_buffer_release() - Free a buffer's resources
  851. * @ref: Pointer to the kref embedded in the iio_buffer struct
  852. *
  853. * This function is called when the last reference to the buffer has been
  854. * dropped. It will typically free all resources allocated by the buffer. Do not
  855. * call this function manually, always use iio_buffer_put() when done using a
  856. * buffer.
  857. */
  858. static void iio_buffer_release(struct kref *ref)
  859. {
  860. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  861. buffer->access->release(buffer);
  862. }
  863. /**
  864. * iio_buffer_get() - Grab a reference to the buffer
  865. * @buffer: The buffer to grab a reference for, may be NULL
  866. *
  867. * Returns the pointer to the buffer that was passed into the function.
  868. */
  869. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  870. {
  871. if (buffer)
  872. kref_get(&buffer->ref);
  873. return buffer;
  874. }
  875. EXPORT_SYMBOL_GPL(iio_buffer_get);
  876. /**
  877. * iio_buffer_put() - Release the reference to the buffer
  878. * @buffer: The buffer to release the reference for, may be NULL
  879. */
  880. void iio_buffer_put(struct iio_buffer *buffer)
  881. {
  882. if (buffer)
  883. kref_put(&buffer->ref, iio_buffer_release);
  884. }
  885. EXPORT_SYMBOL_GPL(iio_buffer_put);