opp.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/slab.h>
  18. #include <linux/cpufreq.h>
  19. #include <linux/device.h>
  20. #include <linux/list.h>
  21. #include <linux/rculist.h>
  22. #include <linux/rcupdate.h>
  23. #include <linux/opp.h>
  24. #include <linux/of.h>
  25. #include <linux/export.h>
  26. /*
  27. * Internal data structure organization with the OPP layer library is as
  28. * follows:
  29. * dev_opp_list (root)
  30. * |- device 1 (represents voltage domain 1)
  31. * | |- opp 1 (availability, freq, voltage)
  32. * | |- opp 2 ..
  33. * ... ...
  34. * | `- opp n ..
  35. * |- device 2 (represents the next voltage domain)
  36. * ...
  37. * `- device m (represents mth voltage domain)
  38. * device 1, 2.. are represented by dev_opp structure while each opp
  39. * is represented by the opp structure.
  40. */
  41. /**
  42. * struct opp - Generic OPP description structure
  43. * @node: opp list node. The nodes are maintained throughout the lifetime
  44. * of boot. It is expected only an optimal set of OPPs are
  45. * added to the library by the SoC framework.
  46. * RCU usage: opp list is traversed with RCU locks. node
  47. * modification is possible realtime, hence the modifications
  48. * are protected by the dev_opp_list_lock for integrity.
  49. * IMPORTANT: the opp nodes should be maintained in increasing
  50. * order.
  51. * @available: true/false - marks if this OPP as available or not
  52. * @rate: Frequency in hertz
  53. * @u_volt: Nominal voltage in microvolts corresponding to this OPP
  54. * @dev_opp: points back to the device_opp struct this opp belongs to
  55. * @head: RCU callback head used for deferred freeing
  56. *
  57. * This structure stores the OPP information for a given device.
  58. */
  59. struct opp {
  60. struct list_head node;
  61. bool available;
  62. unsigned long rate;
  63. unsigned long u_volt;
  64. struct device_opp *dev_opp;
  65. struct rcu_head head;
  66. };
  67. /**
  68. * struct device_opp - Device opp structure
  69. * @node: list node - contains the devices with OPPs that
  70. * have been registered. Nodes once added are not modified in this
  71. * list.
  72. * RCU usage: nodes are not modified in the list of device_opp,
  73. * however addition is possible and is secured by dev_opp_list_lock
  74. * @dev: device pointer
  75. * @head: notifier head to notify the OPP availability changes.
  76. * @opp_list: list of opps
  77. *
  78. * This is an internal data structure maintaining the link to opps attached to
  79. * a device. This structure is not meant to be shared to users as it is
  80. * meant for book keeping and private to OPP library
  81. */
  82. struct device_opp {
  83. struct list_head node;
  84. struct device *dev;
  85. struct srcu_notifier_head head;
  86. struct list_head opp_list;
  87. };
  88. /*
  89. * The root of the list of all devices. All device_opp structures branch off
  90. * from here, with each device_opp containing the list of opp it supports in
  91. * various states of availability.
  92. */
  93. static LIST_HEAD(dev_opp_list);
  94. /* Lock to allow exclusive modification to the device and opp lists */
  95. static DEFINE_MUTEX(dev_opp_list_lock);
  96. /**
  97. * find_device_opp() - find device_opp struct using device pointer
  98. * @dev: device pointer used to lookup device OPPs
  99. *
  100. * Search list of device OPPs for one containing matching device. Does a RCU
  101. * reader operation to grab the pointer needed.
  102. *
  103. * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
  104. * -EINVAL based on type of error.
  105. *
  106. * Locking: This function must be called under rcu_read_lock(). device_opp
  107. * is a RCU protected pointer. This means that device_opp is valid as long
  108. * as we are under RCU lock.
  109. */
  110. static struct device_opp *find_device_opp(struct device *dev)
  111. {
  112. struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
  113. if (unlikely(IS_ERR_OR_NULL(dev))) {
  114. pr_err("%s: Invalid parameters\n", __func__);
  115. return ERR_PTR(-EINVAL);
  116. }
  117. list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
  118. if (tmp_dev_opp->dev == dev) {
  119. dev_opp = tmp_dev_opp;
  120. break;
  121. }
  122. }
  123. return dev_opp;
  124. }
  125. /**
  126. * opp_get_voltage() - Gets the voltage corresponding to an available opp
  127. * @opp: opp for which voltage has to be returned for
  128. *
  129. * Return voltage in micro volt corresponding to the opp, else
  130. * return 0
  131. *
  132. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  133. * protected pointer. This means that opp which could have been fetched by
  134. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  135. * under RCU lock. The pointer returned by the opp_find_freq family must be
  136. * used in the same section as the usage of this function with the pointer
  137. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  138. * pointer.
  139. */
  140. unsigned long opp_get_voltage(struct opp *opp)
  141. {
  142. struct opp *tmp_opp;
  143. unsigned long v = 0;
  144. tmp_opp = rcu_dereference(opp);
  145. if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
  146. pr_err("%s: Invalid parameters\n", __func__);
  147. else
  148. v = tmp_opp->u_volt;
  149. return v;
  150. }
  151. EXPORT_SYMBOL_GPL(opp_get_voltage);
  152. /**
  153. * opp_get_freq() - Gets the frequency corresponding to an available opp
  154. * @opp: opp for which frequency has to be returned for
  155. *
  156. * Return frequency in hertz corresponding to the opp, else
  157. * return 0
  158. *
  159. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  160. * protected pointer. This means that opp which could have been fetched by
  161. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  162. * under RCU lock. The pointer returned by the opp_find_freq family must be
  163. * used in the same section as the usage of this function with the pointer
  164. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  165. * pointer.
  166. */
  167. unsigned long opp_get_freq(struct opp *opp)
  168. {
  169. struct opp *tmp_opp;
  170. unsigned long f = 0;
  171. tmp_opp = rcu_dereference(opp);
  172. if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
  173. pr_err("%s: Invalid parameters\n", __func__);
  174. else
  175. f = tmp_opp->rate;
  176. return f;
  177. }
  178. EXPORT_SYMBOL_GPL(opp_get_freq);
  179. /**
  180. * opp_get_opp_count() - Get number of opps available in the opp list
  181. * @dev: device for which we do this operation
  182. *
  183. * This function returns the number of available opps if there are any,
  184. * else returns 0 if none or the corresponding error value.
  185. *
  186. * Locking: This function must be called under rcu_read_lock(). This function
  187. * internally references two RCU protected structures: device_opp and opp which
  188. * are safe as long as we are under a common RCU locked section.
  189. */
  190. int opp_get_opp_count(struct device *dev)
  191. {
  192. struct device_opp *dev_opp;
  193. struct opp *temp_opp;
  194. int count = 0;
  195. dev_opp = find_device_opp(dev);
  196. if (IS_ERR(dev_opp)) {
  197. int r = PTR_ERR(dev_opp);
  198. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  199. return r;
  200. }
  201. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  202. if (temp_opp->available)
  203. count++;
  204. }
  205. return count;
  206. }
  207. EXPORT_SYMBOL_GPL(opp_get_opp_count);
  208. /**
  209. * opp_find_freq_exact() - search for an exact frequency
  210. * @dev: device for which we do this operation
  211. * @freq: frequency to search for
  212. * @available: true/false - match for available opp
  213. *
  214. * Searches for exact match in the opp list and returns pointer to the matching
  215. * opp if found, else returns ERR_PTR in case of error and should be handled
  216. * using IS_ERR. Error return values can be:
  217. * EINVAL: for bad pointer
  218. * ERANGE: no match found for search
  219. * ENODEV: if device not found in list of registered devices
  220. *
  221. * Note: available is a modifier for the search. if available=true, then the
  222. * match is for exact matching frequency and is available in the stored OPP
  223. * table. if false, the match is for exact frequency which is not available.
  224. *
  225. * This provides a mechanism to enable an opp which is not available currently
  226. * or the opposite as well.
  227. *
  228. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  229. * protected pointer. The reason for the same is that the opp pointer which is
  230. * returned will remain valid for use with opp_get_{voltage, freq} only while
  231. * under the locked area. The pointer returned must be used prior to unlocking
  232. * with rcu_read_unlock() to maintain the integrity of the pointer.
  233. */
  234. struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
  235. bool available)
  236. {
  237. struct device_opp *dev_opp;
  238. struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  239. dev_opp = find_device_opp(dev);
  240. if (IS_ERR(dev_opp)) {
  241. int r = PTR_ERR(dev_opp);
  242. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  243. return ERR_PTR(r);
  244. }
  245. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  246. if (temp_opp->available == available &&
  247. temp_opp->rate == freq) {
  248. opp = temp_opp;
  249. break;
  250. }
  251. }
  252. return opp;
  253. }
  254. EXPORT_SYMBOL_GPL(opp_find_freq_exact);
  255. /**
  256. * opp_find_freq_ceil() - Search for an rounded ceil freq
  257. * @dev: device for which we do this operation
  258. * @freq: Start frequency
  259. *
  260. * Search for the matching ceil *available* OPP from a starting freq
  261. * for a device.
  262. *
  263. * Returns matching *opp and refreshes *freq accordingly, else returns
  264. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  265. * values can be:
  266. * EINVAL: for bad pointer
  267. * ERANGE: no match found for search
  268. * ENODEV: if device not found in list of registered devices
  269. *
  270. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  271. * protected pointer. The reason for the same is that the opp pointer which is
  272. * returned will remain valid for use with opp_get_{voltage, freq} only while
  273. * under the locked area. The pointer returned must be used prior to unlocking
  274. * with rcu_read_unlock() to maintain the integrity of the pointer.
  275. */
  276. struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
  277. {
  278. struct device_opp *dev_opp;
  279. struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  280. if (!dev || !freq) {
  281. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  282. return ERR_PTR(-EINVAL);
  283. }
  284. dev_opp = find_device_opp(dev);
  285. if (IS_ERR(dev_opp))
  286. return ERR_CAST(dev_opp);
  287. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  288. if (temp_opp->available && temp_opp->rate >= *freq) {
  289. opp = temp_opp;
  290. *freq = opp->rate;
  291. break;
  292. }
  293. }
  294. return opp;
  295. }
  296. EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
  297. /**
  298. * opp_find_freq_floor() - Search for a rounded floor freq
  299. * @dev: device for which we do this operation
  300. * @freq: Start frequency
  301. *
  302. * Search for the matching floor *available* OPP from a starting freq
  303. * for a device.
  304. *
  305. * Returns matching *opp and refreshes *freq accordingly, else returns
  306. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  307. * values can be:
  308. * EINVAL: for bad pointer
  309. * ERANGE: no match found for search
  310. * ENODEV: if device not found in list of registered devices
  311. *
  312. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  313. * protected pointer. The reason for the same is that the opp pointer which is
  314. * returned will remain valid for use with opp_get_{voltage, freq} only while
  315. * under the locked area. The pointer returned must be used prior to unlocking
  316. * with rcu_read_unlock() to maintain the integrity of the pointer.
  317. */
  318. struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
  319. {
  320. struct device_opp *dev_opp;
  321. struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  322. if (!dev || !freq) {
  323. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  324. return ERR_PTR(-EINVAL);
  325. }
  326. dev_opp = find_device_opp(dev);
  327. if (IS_ERR(dev_opp))
  328. return ERR_CAST(dev_opp);
  329. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  330. if (temp_opp->available) {
  331. /* go to the next node, before choosing prev */
  332. if (temp_opp->rate > *freq)
  333. break;
  334. else
  335. opp = temp_opp;
  336. }
  337. }
  338. if (!IS_ERR(opp))
  339. *freq = opp->rate;
  340. return opp;
  341. }
  342. EXPORT_SYMBOL_GPL(opp_find_freq_floor);
  343. /**
  344. * opp_add() - Add an OPP table from a table definitions
  345. * @dev: device for which we do this operation
  346. * @freq: Frequency in Hz for this OPP
  347. * @u_volt: Voltage in uVolts for this OPP
  348. *
  349. * This function adds an opp definition to the opp list and returns status.
  350. * The opp is made available by default and it can be controlled using
  351. * opp_enable/disable functions.
  352. *
  353. * Locking: The internal device_opp and opp structures are RCU protected.
  354. * Hence this function internally uses RCU updater strategy with mutex locks
  355. * to keep the integrity of the internal data structures. Callers should ensure
  356. * that this function is *NOT* called under RCU protection or in contexts where
  357. * mutex cannot be locked.
  358. */
  359. int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  360. {
  361. struct device_opp *dev_opp = NULL;
  362. struct opp *opp, *new_opp;
  363. struct list_head *head;
  364. /* allocate new OPP node */
  365. new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
  366. if (!new_opp) {
  367. dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
  368. return -ENOMEM;
  369. }
  370. /* Hold our list modification lock here */
  371. mutex_lock(&dev_opp_list_lock);
  372. /* Check for existing list for 'dev' */
  373. dev_opp = find_device_opp(dev);
  374. if (IS_ERR(dev_opp)) {
  375. /*
  376. * Allocate a new device OPP table. In the infrequent case
  377. * where a new device is needed to be added, we pay this
  378. * penalty.
  379. */
  380. dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
  381. if (!dev_opp) {
  382. mutex_unlock(&dev_opp_list_lock);
  383. kfree(new_opp);
  384. dev_warn(dev,
  385. "%s: Unable to create device OPP structure\n",
  386. __func__);
  387. return -ENOMEM;
  388. }
  389. dev_opp->dev = dev;
  390. srcu_init_notifier_head(&dev_opp->head);
  391. INIT_LIST_HEAD(&dev_opp->opp_list);
  392. /* Secure the device list modification */
  393. list_add_rcu(&dev_opp->node, &dev_opp_list);
  394. }
  395. /* populate the opp table */
  396. new_opp->dev_opp = dev_opp;
  397. new_opp->rate = freq;
  398. new_opp->u_volt = u_volt;
  399. new_opp->available = true;
  400. /* Insert new OPP in order of increasing frequency */
  401. head = &dev_opp->opp_list;
  402. list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
  403. if (new_opp->rate < opp->rate)
  404. break;
  405. else
  406. head = &opp->node;
  407. }
  408. list_add_rcu(&new_opp->node, head);
  409. mutex_unlock(&dev_opp_list_lock);
  410. /*
  411. * Notify the changes in the availability of the operable
  412. * frequency/voltage list.
  413. */
  414. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
  415. return 0;
  416. }
  417. EXPORT_SYMBOL_GPL(opp_add);
  418. /**
  419. * opp_set_availability() - helper to set the availability of an opp
  420. * @dev: device for which we do this operation
  421. * @freq: OPP frequency to modify availability
  422. * @availability_req: availability status requested for this opp
  423. *
  424. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  425. * share a common logic which is isolated here.
  426. *
  427. * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
  428. * copy operation, returns 0 if no modifcation was done OR modification was
  429. * successful.
  430. *
  431. * Locking: The internal device_opp and opp structures are RCU protected.
  432. * Hence this function internally uses RCU updater strategy with mutex locks to
  433. * keep the integrity of the internal data structures. Callers should ensure
  434. * that this function is *NOT* called under RCU protection or in contexts where
  435. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  436. */
  437. static int opp_set_availability(struct device *dev, unsigned long freq,
  438. bool availability_req)
  439. {
  440. struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
  441. struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  442. int r = 0;
  443. /* keep the node allocated */
  444. new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
  445. if (!new_opp) {
  446. dev_warn(dev, "%s: Unable to create OPP\n", __func__);
  447. return -ENOMEM;
  448. }
  449. mutex_lock(&dev_opp_list_lock);
  450. /* Find the device_opp */
  451. list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
  452. if (dev == tmp_dev_opp->dev) {
  453. dev_opp = tmp_dev_opp;
  454. break;
  455. }
  456. }
  457. if (IS_ERR(dev_opp)) {
  458. r = PTR_ERR(dev_opp);
  459. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  460. goto unlock;
  461. }
  462. /* Do we have the frequency? */
  463. list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
  464. if (tmp_opp->rate == freq) {
  465. opp = tmp_opp;
  466. break;
  467. }
  468. }
  469. if (IS_ERR(opp)) {
  470. r = PTR_ERR(opp);
  471. goto unlock;
  472. }
  473. /* Is update really needed? */
  474. if (opp->available == availability_req)
  475. goto unlock;
  476. /* copy the old data over */
  477. *new_opp = *opp;
  478. /* plug in new node */
  479. new_opp->available = availability_req;
  480. list_replace_rcu(&opp->node, &new_opp->node);
  481. mutex_unlock(&dev_opp_list_lock);
  482. kfree_rcu(opp, head);
  483. /* Notify the change of the OPP availability */
  484. if (availability_req)
  485. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
  486. new_opp);
  487. else
  488. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
  489. new_opp);
  490. return 0;
  491. unlock:
  492. mutex_unlock(&dev_opp_list_lock);
  493. kfree(new_opp);
  494. return r;
  495. }
  496. /**
  497. * opp_enable() - Enable a specific OPP
  498. * @dev: device for which we do this operation
  499. * @freq: OPP frequency to enable
  500. *
  501. * Enables a provided opp. If the operation is valid, this returns 0, else the
  502. * corresponding error value. It is meant to be used for users an OPP available
  503. * after being temporarily made unavailable with opp_disable.
  504. *
  505. * Locking: The internal device_opp and opp structures are RCU protected.
  506. * Hence this function indirectly uses RCU and mutex locks to keep the
  507. * integrity of the internal data structures. Callers should ensure that
  508. * this function is *NOT* called under RCU protection or in contexts where
  509. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  510. */
  511. int opp_enable(struct device *dev, unsigned long freq)
  512. {
  513. return opp_set_availability(dev, freq, true);
  514. }
  515. EXPORT_SYMBOL_GPL(opp_enable);
  516. /**
  517. * opp_disable() - Disable a specific OPP
  518. * @dev: device for which we do this operation
  519. * @freq: OPP frequency to disable
  520. *
  521. * Disables a provided opp. If the operation is valid, this returns
  522. * 0, else the corresponding error value. It is meant to be a temporary
  523. * control by users to make this OPP not available until the circumstances are
  524. * right to make it available again (with a call to opp_enable).
  525. *
  526. * Locking: The internal device_opp and opp structures are RCU protected.
  527. * Hence this function indirectly uses RCU and mutex locks to keep the
  528. * integrity of the internal data structures. Callers should ensure that
  529. * this function is *NOT* called under RCU protection or in contexts where
  530. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  531. */
  532. int opp_disable(struct device *dev, unsigned long freq)
  533. {
  534. return opp_set_availability(dev, freq, false);
  535. }
  536. EXPORT_SYMBOL_GPL(opp_disable);
  537. #ifdef CONFIG_CPU_FREQ
  538. /**
  539. * opp_init_cpufreq_table() - create a cpufreq table for a device
  540. * @dev: device for which we do this operation
  541. * @table: Cpufreq table returned back to caller
  542. *
  543. * Generate a cpufreq table for a provided device- this assumes that the
  544. * opp list is already initialized and ready for usage.
  545. *
  546. * This function allocates required memory for the cpufreq table. It is
  547. * expected that the caller does the required maintenance such as freeing
  548. * the table as required.
  549. *
  550. * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
  551. * if no memory available for the operation (table is not populated), returns 0
  552. * if successful and table is populated.
  553. *
  554. * WARNING: It is important for the callers to ensure refreshing their copy of
  555. * the table if any of the mentioned functions have been invoked in the interim.
  556. *
  557. * Locking: The internal device_opp and opp structures are RCU protected.
  558. * To simplify the logic, we pretend we are updater and hold relevant mutex here
  559. * Callers should ensure that this function is *NOT* called under RCU protection
  560. * or in contexts where mutex locking cannot be used.
  561. */
  562. int opp_init_cpufreq_table(struct device *dev,
  563. struct cpufreq_frequency_table **table)
  564. {
  565. struct device_opp *dev_opp;
  566. struct opp *opp;
  567. struct cpufreq_frequency_table *freq_table;
  568. int i = 0;
  569. /* Pretend as if I am an updater */
  570. mutex_lock(&dev_opp_list_lock);
  571. dev_opp = find_device_opp(dev);
  572. if (IS_ERR(dev_opp)) {
  573. int r = PTR_ERR(dev_opp);
  574. mutex_unlock(&dev_opp_list_lock);
  575. dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  576. return r;
  577. }
  578. freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
  579. (opp_get_opp_count(dev) + 1), GFP_KERNEL);
  580. if (!freq_table) {
  581. mutex_unlock(&dev_opp_list_lock);
  582. dev_warn(dev, "%s: Unable to allocate frequency table\n",
  583. __func__);
  584. return -ENOMEM;
  585. }
  586. list_for_each_entry(opp, &dev_opp->opp_list, node) {
  587. if (opp->available) {
  588. freq_table[i].driver_data = i;
  589. freq_table[i].frequency = opp->rate / 1000;
  590. i++;
  591. }
  592. }
  593. mutex_unlock(&dev_opp_list_lock);
  594. freq_table[i].driver_data = i;
  595. freq_table[i].frequency = CPUFREQ_TABLE_END;
  596. *table = &freq_table[0];
  597. return 0;
  598. }
  599. EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
  600. /**
  601. * opp_free_cpufreq_table() - free the cpufreq table
  602. * @dev: device for which we do this operation
  603. * @table: table to free
  604. *
  605. * Free up the table allocated by opp_init_cpufreq_table
  606. */
  607. void opp_free_cpufreq_table(struct device *dev,
  608. struct cpufreq_frequency_table **table)
  609. {
  610. if (!table)
  611. return;
  612. kfree(*table);
  613. *table = NULL;
  614. }
  615. EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
  616. #endif /* CONFIG_CPU_FREQ */
  617. /**
  618. * opp_get_notifier() - find notifier_head of the device with opp
  619. * @dev: device pointer used to lookup device OPPs.
  620. */
  621. struct srcu_notifier_head *opp_get_notifier(struct device *dev)
  622. {
  623. struct device_opp *dev_opp = find_device_opp(dev);
  624. if (IS_ERR(dev_opp))
  625. return ERR_CAST(dev_opp); /* matching type */
  626. return &dev_opp->head;
  627. }
  628. #ifdef CONFIG_OF
  629. /**
  630. * of_init_opp_table() - Initialize opp table from device tree
  631. * @dev: device pointer used to lookup device OPPs.
  632. *
  633. * Register the initial OPP table with the OPP library for given device.
  634. */
  635. int of_init_opp_table(struct device *dev)
  636. {
  637. const struct property *prop;
  638. const __be32 *val;
  639. int nr;
  640. prop = of_find_property(dev->of_node, "operating-points", NULL);
  641. if (!prop)
  642. return -ENODEV;
  643. if (!prop->value)
  644. return -ENODATA;
  645. /*
  646. * Each OPP is a set of tuples consisting of frequency and
  647. * voltage like <freq-kHz vol-uV>.
  648. */
  649. nr = prop->length / sizeof(u32);
  650. if (nr % 2) {
  651. dev_err(dev, "%s: Invalid OPP list\n", __func__);
  652. return -EINVAL;
  653. }
  654. val = prop->value;
  655. while (nr) {
  656. unsigned long freq = be32_to_cpup(val++) * 1000;
  657. unsigned long volt = be32_to_cpup(val++);
  658. if (opp_add(dev, freq, volt)) {
  659. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  660. __func__, freq);
  661. continue;
  662. }
  663. nr -= 2;
  664. }
  665. return 0;
  666. }
  667. EXPORT_SYMBOL_GPL(of_init_opp_table);
  668. #endif