inkern.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. /* The industrial I/O core in kernel channel mapping
  2. *
  3. * Copyright (c) 2011 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/err.h>
  10. #include <linux/export.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/of.h>
  14. #include <linux/iio/iio.h>
  15. #include "iio_core.h"
  16. #include <linux/iio/machine.h>
  17. #include <linux/iio/driver.h>
  18. #include <linux/iio/consumer.h>
  19. struct iio_map_internal {
  20. struct iio_dev *indio_dev;
  21. struct iio_map *map;
  22. struct list_head l;
  23. };
  24. static LIST_HEAD(iio_map_list);
  25. static DEFINE_MUTEX(iio_map_list_lock);
  26. int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  27. {
  28. int i = 0, ret = 0;
  29. struct iio_map_internal *mapi;
  30. if (maps == NULL)
  31. return 0;
  32. mutex_lock(&iio_map_list_lock);
  33. while (maps[i].consumer_dev_name != NULL) {
  34. mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  35. if (mapi == NULL) {
  36. ret = -ENOMEM;
  37. goto error_ret;
  38. }
  39. mapi->map = &maps[i];
  40. mapi->indio_dev = indio_dev;
  41. list_add(&mapi->l, &iio_map_list);
  42. i++;
  43. }
  44. error_ret:
  45. mutex_unlock(&iio_map_list_lock);
  46. return ret;
  47. }
  48. EXPORT_SYMBOL_GPL(iio_map_array_register);
  49. /*
  50. * Remove all map entries associated with the given iio device
  51. */
  52. int iio_map_array_unregister(struct iio_dev *indio_dev)
  53. {
  54. int ret = -ENODEV;
  55. struct iio_map_internal *mapi;
  56. struct list_head *pos, *tmp;
  57. mutex_lock(&iio_map_list_lock);
  58. list_for_each_safe(pos, tmp, &iio_map_list) {
  59. mapi = list_entry(pos, struct iio_map_internal, l);
  60. if (indio_dev == mapi->indio_dev) {
  61. list_del(&mapi->l);
  62. kfree(mapi);
  63. ret = 0;
  64. }
  65. }
  66. mutex_unlock(&iio_map_list_lock);
  67. return ret;
  68. }
  69. EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  70. static const struct iio_chan_spec
  71. *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  72. {
  73. int i;
  74. const struct iio_chan_spec *chan = NULL;
  75. for (i = 0; i < indio_dev->num_channels; i++)
  76. if (indio_dev->channels[i].datasheet_name &&
  77. strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  78. chan = &indio_dev->channels[i];
  79. break;
  80. }
  81. return chan;
  82. }
  83. #ifdef CONFIG_OF
  84. static int iio_dev_node_match(struct device *dev, void *data)
  85. {
  86. return dev->of_node == data && dev->type == &iio_device_type;
  87. }
  88. static int __of_iio_channel_get(struct iio_channel *channel,
  89. struct device_node *np, int index)
  90. {
  91. struct device *idev;
  92. struct iio_dev *indio_dev;
  93. int err;
  94. struct of_phandle_args iiospec;
  95. err = of_parse_phandle_with_args(np, "io-channels",
  96. "#io-channel-cells",
  97. index, &iiospec);
  98. if (err)
  99. return err;
  100. idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
  101. iio_dev_node_match);
  102. of_node_put(iiospec.np);
  103. if (idev == NULL)
  104. return -EPROBE_DEFER;
  105. indio_dev = dev_to_iio_dev(idev);
  106. channel->indio_dev = indio_dev;
  107. index = iiospec.args_count ? iiospec.args[0] : 0;
  108. if (index >= indio_dev->num_channels) {
  109. err = -EINVAL;
  110. goto err_put;
  111. }
  112. channel->channel = &indio_dev->channels[index];
  113. return 0;
  114. err_put:
  115. iio_device_put(indio_dev);
  116. return err;
  117. }
  118. static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
  119. {
  120. struct iio_channel *channel;
  121. int err;
  122. if (index < 0)
  123. return ERR_PTR(-EINVAL);
  124. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  125. if (channel == NULL)
  126. return ERR_PTR(-ENOMEM);
  127. err = __of_iio_channel_get(channel, np, index);
  128. if (err)
  129. goto err_free_channel;
  130. return channel;
  131. err_free_channel:
  132. kfree(channel);
  133. return ERR_PTR(err);
  134. }
  135. static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
  136. const char *name)
  137. {
  138. struct iio_channel *chan = NULL;
  139. /* Walk up the tree of devices looking for a matching iio channel */
  140. while (np) {
  141. int index = 0;
  142. /*
  143. * For named iio channels, first look up the name in the
  144. * "io-channel-names" property. If it cannot be found, the
  145. * index will be an error code, and of_iio_channel_get()
  146. * will fail.
  147. */
  148. if (name)
  149. index = of_property_match_string(np, "io-channel-names",
  150. name);
  151. chan = of_iio_channel_get(np, index);
  152. if (!IS_ERR(chan))
  153. break;
  154. else if (name && index >= 0) {
  155. pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
  156. np->full_name, name ? name : "", index);
  157. return chan;
  158. }
  159. /*
  160. * No matching IIO channel found on this node.
  161. * If the parent node has a "io-channel-ranges" property,
  162. * then we can try one of its channels.
  163. */
  164. np = np->parent;
  165. if (np && !of_get_property(np, "io-channel-ranges", NULL))
  166. break;
  167. }
  168. return chan;
  169. }
  170. static struct iio_channel *of_iio_channel_get_all(struct device *dev)
  171. {
  172. struct iio_channel *chans;
  173. int i, mapind, nummaps = 0;
  174. int ret;
  175. do {
  176. ret = of_parse_phandle_with_args(dev->of_node,
  177. "io-channels",
  178. "#io-channel-cells",
  179. nummaps, NULL);
  180. if (ret < 0)
  181. break;
  182. } while (++nummaps);
  183. if (nummaps == 0) /* no error, return NULL to search map table */
  184. return NULL;
  185. /* NULL terminated array to save passing size */
  186. chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  187. if (chans == NULL)
  188. return ERR_PTR(-ENOMEM);
  189. /* Search for OF matches */
  190. for (mapind = 0; mapind < nummaps; mapind++) {
  191. ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
  192. mapind);
  193. if (ret)
  194. goto error_free_chans;
  195. }
  196. return chans;
  197. error_free_chans:
  198. for (i = 0; i < mapind; i++)
  199. iio_device_put(chans[i].indio_dev);
  200. kfree(chans);
  201. return ERR_PTR(ret);
  202. }
  203. #else /* CONFIG_OF */
  204. static inline struct iio_channel *
  205. of_iio_channel_get_by_name(struct device_node *np, const char *name)
  206. {
  207. return NULL;
  208. }
  209. static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
  210. {
  211. return NULL;
  212. }
  213. #endif /* CONFIG_OF */
  214. static struct iio_channel *iio_channel_get_sys(const char *name,
  215. const char *channel_name)
  216. {
  217. struct iio_map_internal *c_i = NULL, *c = NULL;
  218. struct iio_channel *channel;
  219. int err;
  220. if (name == NULL && channel_name == NULL)
  221. return ERR_PTR(-ENODEV);
  222. /* first find matching entry the channel map */
  223. mutex_lock(&iio_map_list_lock);
  224. list_for_each_entry(c_i, &iio_map_list, l) {
  225. if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
  226. (channel_name &&
  227. strcmp(channel_name, c_i->map->consumer_channel) != 0))
  228. continue;
  229. c = c_i;
  230. iio_device_get(c->indio_dev);
  231. break;
  232. }
  233. mutex_unlock(&iio_map_list_lock);
  234. if (c == NULL)
  235. return ERR_PTR(-ENODEV);
  236. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  237. if (channel == NULL) {
  238. err = -ENOMEM;
  239. goto error_no_mem;
  240. }
  241. channel->indio_dev = c->indio_dev;
  242. if (c->map->adc_channel_label) {
  243. channel->channel =
  244. iio_chan_spec_from_name(channel->indio_dev,
  245. c->map->adc_channel_label);
  246. if (channel->channel == NULL) {
  247. err = -EINVAL;
  248. goto error_no_chan;
  249. }
  250. }
  251. return channel;
  252. error_no_chan:
  253. kfree(channel);
  254. error_no_mem:
  255. iio_device_put(c->indio_dev);
  256. return ERR_PTR(err);
  257. }
  258. struct iio_channel *iio_channel_get(struct device *dev,
  259. const char *channel_name)
  260. {
  261. const char *name = dev ? dev_name(dev) : NULL;
  262. struct iio_channel *channel;
  263. if (dev) {
  264. channel = of_iio_channel_get_by_name(dev->of_node,
  265. channel_name);
  266. if (channel != NULL)
  267. return channel;
  268. }
  269. return iio_channel_get_sys(name, channel_name);
  270. }
  271. EXPORT_SYMBOL_GPL(iio_channel_get);
  272. void iio_channel_release(struct iio_channel *channel)
  273. {
  274. iio_device_put(channel->indio_dev);
  275. kfree(channel);
  276. }
  277. EXPORT_SYMBOL_GPL(iio_channel_release);
  278. struct iio_channel *iio_channel_get_all(struct device *dev)
  279. {
  280. const char *name;
  281. struct iio_channel *chans;
  282. struct iio_map_internal *c = NULL;
  283. int nummaps = 0;
  284. int mapind = 0;
  285. int i, ret;
  286. if (dev == NULL)
  287. return ERR_PTR(-EINVAL);
  288. chans = of_iio_channel_get_all(dev);
  289. if (chans)
  290. return chans;
  291. name = dev_name(dev);
  292. mutex_lock(&iio_map_list_lock);
  293. /* first count the matching maps */
  294. list_for_each_entry(c, &iio_map_list, l)
  295. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  296. continue;
  297. else
  298. nummaps++;
  299. if (nummaps == 0) {
  300. ret = -ENODEV;
  301. goto error_ret;
  302. }
  303. /* NULL terminated array to save passing size */
  304. chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
  305. if (chans == NULL) {
  306. ret = -ENOMEM;
  307. goto error_ret;
  308. }
  309. /* for each map fill in the chans element */
  310. list_for_each_entry(c, &iio_map_list, l) {
  311. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  312. continue;
  313. chans[mapind].indio_dev = c->indio_dev;
  314. chans[mapind].data = c->map->consumer_data;
  315. chans[mapind].channel =
  316. iio_chan_spec_from_name(chans[mapind].indio_dev,
  317. c->map->adc_channel_label);
  318. if (chans[mapind].channel == NULL) {
  319. ret = -EINVAL;
  320. goto error_free_chans;
  321. }
  322. iio_device_get(chans[mapind].indio_dev);
  323. mapind++;
  324. }
  325. if (mapind == 0) {
  326. ret = -ENODEV;
  327. goto error_free_chans;
  328. }
  329. mutex_unlock(&iio_map_list_lock);
  330. return chans;
  331. error_free_chans:
  332. for (i = 0; i < nummaps; i++)
  333. iio_device_put(chans[i].indio_dev);
  334. kfree(chans);
  335. error_ret:
  336. mutex_unlock(&iio_map_list_lock);
  337. return ERR_PTR(ret);
  338. }
  339. EXPORT_SYMBOL_GPL(iio_channel_get_all);
  340. void iio_channel_release_all(struct iio_channel *channels)
  341. {
  342. struct iio_channel *chan = &channels[0];
  343. while (chan->indio_dev) {
  344. iio_device_put(chan->indio_dev);
  345. chan++;
  346. }
  347. kfree(channels);
  348. }
  349. EXPORT_SYMBOL_GPL(iio_channel_release_all);
  350. static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
  351. enum iio_chan_info_enum info)
  352. {
  353. int unused;
  354. if (val2 == NULL)
  355. val2 = &unused;
  356. return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
  357. val, val2, info);
  358. }
  359. int iio_read_channel_raw(struct iio_channel *chan, int *val)
  360. {
  361. int ret;
  362. mutex_lock(&chan->indio_dev->info_exist_lock);
  363. if (chan->indio_dev->info == NULL) {
  364. ret = -ENODEV;
  365. goto err_unlock;
  366. }
  367. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  368. err_unlock:
  369. mutex_unlock(&chan->indio_dev->info_exist_lock);
  370. return ret;
  371. }
  372. EXPORT_SYMBOL_GPL(iio_read_channel_raw);
  373. static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
  374. int raw, int *processed, unsigned int scale)
  375. {
  376. int scale_type, scale_val, scale_val2, offset;
  377. s64 raw64 = raw;
  378. int ret;
  379. ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
  380. if (ret >= 0)
  381. raw64 += offset;
  382. scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
  383. IIO_CHAN_INFO_SCALE);
  384. if (scale_type < 0)
  385. return scale_type;
  386. switch (scale_type) {
  387. case IIO_VAL_INT:
  388. *processed = raw64 * scale_val;
  389. break;
  390. case IIO_VAL_INT_PLUS_MICRO:
  391. if (scale_val2 < 0)
  392. *processed = -raw64 * scale_val;
  393. else
  394. *processed = raw64 * scale_val;
  395. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  396. 1000000LL);
  397. break;
  398. case IIO_VAL_INT_PLUS_NANO:
  399. if (scale_val2 < 0)
  400. *processed = -raw64 * scale_val;
  401. else
  402. *processed = raw64 * scale_val;
  403. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  404. 1000000000LL);
  405. break;
  406. case IIO_VAL_FRACTIONAL:
  407. *processed = div_s64(raw64 * (s64)scale_val * scale,
  408. scale_val2);
  409. break;
  410. case IIO_VAL_FRACTIONAL_LOG2:
  411. *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
  412. break;
  413. default:
  414. return -EINVAL;
  415. }
  416. return 0;
  417. }
  418. int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
  419. int *processed, unsigned int scale)
  420. {
  421. int ret;
  422. mutex_lock(&chan->indio_dev->info_exist_lock);
  423. if (chan->indio_dev->info == NULL) {
  424. ret = -ENODEV;
  425. goto err_unlock;
  426. }
  427. ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
  428. scale);
  429. err_unlock:
  430. mutex_unlock(&chan->indio_dev->info_exist_lock);
  431. return ret;
  432. }
  433. EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
  434. int iio_read_channel_processed(struct iio_channel *chan, int *val)
  435. {
  436. int ret;
  437. mutex_lock(&chan->indio_dev->info_exist_lock);
  438. if (chan->indio_dev->info == NULL) {
  439. ret = -ENODEV;
  440. goto err_unlock;
  441. }
  442. if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
  443. ret = iio_channel_read(chan, val, NULL,
  444. IIO_CHAN_INFO_PROCESSED);
  445. } else {
  446. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  447. if (ret < 0)
  448. goto err_unlock;
  449. ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
  450. }
  451. err_unlock:
  452. mutex_unlock(&chan->indio_dev->info_exist_lock);
  453. return ret;
  454. }
  455. EXPORT_SYMBOL_GPL(iio_read_channel_processed);
  456. int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
  457. {
  458. int ret;
  459. mutex_lock(&chan->indio_dev->info_exist_lock);
  460. if (chan->indio_dev->info == NULL) {
  461. ret = -ENODEV;
  462. goto err_unlock;
  463. }
  464. ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
  465. err_unlock:
  466. mutex_unlock(&chan->indio_dev->info_exist_lock);
  467. return ret;
  468. }
  469. EXPORT_SYMBOL_GPL(iio_read_channel_scale);
  470. int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
  471. {
  472. int ret = 0;
  473. /* Need to verify underlying driver has not gone away */
  474. mutex_lock(&chan->indio_dev->info_exist_lock);
  475. if (chan->indio_dev->info == NULL) {
  476. ret = -ENODEV;
  477. goto err_unlock;
  478. }
  479. *type = chan->channel->type;
  480. err_unlock:
  481. mutex_unlock(&chan->indio_dev->info_exist_lock);
  482. return ret;
  483. }
  484. EXPORT_SYMBOL_GPL(iio_get_channel_type);