buffer_cb.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. #include <linux/kernel.h>
  2. #include <linux/slab.h>
  3. #include <linux/err.h>
  4. #include <linux/export.h>
  5. #include <linux/iio/buffer.h>
  6. #include <linux/iio/consumer.h>
  7. struct iio_cb_buffer {
  8. struct iio_buffer buffer;
  9. int (*cb)(u8 *data, void *private);
  10. void *private;
  11. struct iio_channel *channels;
  12. };
  13. static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
  14. {
  15. struct iio_cb_buffer *cb_buff = container_of(buffer,
  16. struct iio_cb_buffer,
  17. buffer);
  18. return cb_buff->cb(data, cb_buff->private);
  19. }
  20. static struct iio_buffer_access_funcs iio_cb_access = {
  21. .store_to = &iio_buffer_cb_store_to,
  22. };
  23. struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
  24. int (*cb)(u8 *data,
  25. void *private),
  26. void *private)
  27. {
  28. int ret;
  29. struct iio_cb_buffer *cb_buff;
  30. struct iio_dev *indio_dev;
  31. struct iio_channel *chan;
  32. cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
  33. if (cb_buff == NULL) {
  34. ret = -ENOMEM;
  35. goto error_ret;
  36. }
  37. cb_buff->private = private;
  38. cb_buff->cb = cb;
  39. cb_buff->buffer.access = &iio_cb_access;
  40. INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
  41. cb_buff->channels = iio_channel_get_all(dev);
  42. if (IS_ERR(cb_buff->channels)) {
  43. ret = PTR_ERR(cb_buff->channels);
  44. goto error_free_cb_buff;
  45. }
  46. indio_dev = cb_buff->channels[0].indio_dev;
  47. cb_buff->buffer.scan_mask
  48. = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
  49. GFP_KERNEL);
  50. if (cb_buff->buffer.scan_mask == NULL) {
  51. ret = -ENOMEM;
  52. goto error_release_channels;
  53. }
  54. chan = &cb_buff->channels[0];
  55. while (chan->indio_dev) {
  56. if (chan->indio_dev != indio_dev) {
  57. ret = -EINVAL;
  58. goto error_release_channels;
  59. }
  60. set_bit(chan->channel->scan_index,
  61. cb_buff->buffer.scan_mask);
  62. chan++;
  63. }
  64. return cb_buff;
  65. error_release_channels:
  66. iio_channel_release_all(cb_buff->channels);
  67. error_free_cb_buff:
  68. kfree(cb_buff);
  69. error_ret:
  70. return ERR_PTR(ret);
  71. }
  72. EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
  73. int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
  74. {
  75. return iio_update_buffers(cb_buff->channels[0].indio_dev,
  76. &cb_buff->buffer,
  77. NULL);
  78. }
  79. EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
  80. void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
  81. {
  82. iio_update_buffers(cb_buff->channels[0].indio_dev,
  83. NULL,
  84. &cb_buff->buffer);
  85. }
  86. EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
  87. void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
  88. {
  89. iio_channel_release_all(cb_buff->channels);
  90. kfree(cb_buff);
  91. }
  92. EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
  93. struct iio_channel
  94. *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
  95. {
  96. return cb_buffer->channels;
  97. }
  98. EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);