opp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/slab.h>
  18. #include <linux/cpufreq.h>
  19. #include <linux/device.h>
  20. #include <linux/list.h>
  21. #include <linux/rculist.h>
  22. #include <linux/rcupdate.h>
  23. #include <linux/opp.h>
  24. /*
  25. * Internal data structure organization with the OPP layer library is as
  26. * follows:
  27. * dev_opp_list (root)
  28. * |- device 1 (represents voltage domain 1)
  29. * | |- opp 1 (availability, freq, voltage)
  30. * | |- opp 2 ..
  31. * ... ...
  32. * | `- opp n ..
  33. * |- device 2 (represents the next voltage domain)
  34. * ...
  35. * `- device m (represents mth voltage domain)
  36. * device 1, 2.. are represented by dev_opp structure while each opp
  37. * is represented by the opp structure.
  38. */
  39. /**
  40. * struct opp - Generic OPP description structure
  41. * @node: opp list node. The nodes are maintained throughout the lifetime
  42. * of boot. It is expected only an optimal set of OPPs are
  43. * added to the library by the SoC framework.
  44. * RCU usage: opp list is traversed with RCU locks. node
  45. * modification is possible realtime, hence the modifications
  46. * are protected by the dev_opp_list_lock for integrity.
  47. * IMPORTANT: the opp nodes should be maintained in increasing
  48. * order.
  49. * @available: true/false - marks if this OPP as available or not
  50. * @rate: Frequency in hertz
  51. * @u_volt: Nominal voltage in microvolts corresponding to this OPP
  52. * @dev_opp: points back to the device_opp struct this opp belongs to
  53. *
  54. * This structure stores the OPP information for a given device.
  55. */
  56. struct opp {
  57. struct list_head node;
  58. bool available;
  59. unsigned long rate;
  60. unsigned long u_volt;
  61. struct device_opp *dev_opp;
  62. };
  63. /**
  64. * struct device_opp - Device opp structure
  65. * @node: list node - contains the devices with OPPs that
  66. * have been registered. Nodes once added are not modified in this
  67. * list.
  68. * RCU usage: nodes are not modified in the list of device_opp,
  69. * however addition is possible and is secured by dev_opp_list_lock
  70. * @dev: device pointer
  71. * @head: notifier head to notify the OPP availability changes.
  72. * @opp_list: list of opps
  73. *
  74. * This is an internal data structure maintaining the link to opps attached to
  75. * a device. This structure is not meant to be shared to users as it is
  76. * meant for book keeping and private to OPP library
  77. */
  78. struct device_opp {
  79. struct list_head node;
  80. struct device *dev;
  81. struct srcu_notifier_head head;
  82. struct list_head opp_list;
  83. };
  84. /*
  85. * The root of the list of all devices. All device_opp structures branch off
  86. * from here, with each device_opp containing the list of opp it supports in
  87. * various states of availability.
  88. */
  89. static LIST_HEAD(dev_opp_list);
  90. /* Lock to allow exclusive modification to the device and opp lists */
  91. static DEFINE_MUTEX(dev_opp_list_lock);
  92. /**
  93. * find_device_opp() - find device_opp struct using device pointer
  94. * @dev: device pointer used to lookup device OPPs
  95. *
  96. * Search list of device OPPs for one containing matching device. Does a RCU
  97. * reader operation to grab the pointer needed.
  98. *
  99. * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
  100. * -EINVAL based on type of error.
  101. *
  102. * Locking: This function must be called under rcu_read_lock(). device_opp
  103. * is a RCU protected pointer. This means that device_opp is valid as long
  104. * as we are under RCU lock.
  105. */
  106. static struct device_opp *find_device_opp(struct device *dev)
  107. {
  108. struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
  109. if (unlikely(IS_ERR_OR_NULL(dev))) {
  110. pr_err("%s: Invalid parameters\n", __func__);
  111. return ERR_PTR(-EINVAL);
  112. }
  113. list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
  114. if (tmp_dev_opp->dev == dev) {
  115. dev_opp = tmp_dev_opp;
  116. break;
  117. }
  118. }
  119. return dev_opp;
  120. }
  121. /**
  122. * opp_get_voltage() - Gets the voltage corresponding to an available opp
  123. * @opp: opp for which voltage has to be returned for
  124. *
  125. * Return voltage in micro volt corresponding to the opp, else
  126. * return 0
  127. *
  128. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  129. * protected pointer. This means that opp which could have been fetched by
  130. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  131. * under RCU lock. The pointer returned by the opp_find_freq family must be
  132. * used in the same section as the usage of this function with the pointer
  133. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  134. * pointer.
  135. */
  136. unsigned long opp_get_voltage(struct opp *opp)
  137. {
  138. struct opp *tmp_opp;
  139. unsigned long v = 0;
  140. tmp_opp = rcu_dereference(opp);
  141. if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
  142. pr_err("%s: Invalid parameters\n", __func__);
  143. else
  144. v = tmp_opp->u_volt;
  145. return v;
  146. }
  147. /**
  148. * opp_get_freq() - Gets the frequency corresponding to an available opp
  149. * @opp: opp for which frequency has to be returned for
  150. *
  151. * Return frequency in hertz corresponding to the opp, else
  152. * return 0
  153. *
  154. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  155. * protected pointer. This means that opp which could have been fetched by
  156. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  157. * under RCU lock. The pointer returned by the opp_find_freq family must be
  158. * used in the same section as the usage of this function with the pointer
  159. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  160. * pointer.
  161. */
  162. unsigned long opp_get_freq(struct opp *opp)
  163. {
  164. struct opp *tmp_opp;
  165. unsigned long f = 0;
  166. tmp_opp = rcu_dereference(opp);
  167. if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
  168. pr_err("%s: Invalid parameters\n", __func__);
  169. else
  170. f = tmp_opp->rate;
  171. return f;
  172. }
  173. /**
  174. * opp_get_opp_count() - Get number of opps available in the opp list
  175. * @dev: device for which we do this operation
  176. *
  177. * This function returns the number of available opps if there are any,
  178. * else returns 0 if none or the corresponding error value.
  179. *
  180. * Locking: This function must be called under rcu_read_lock(). This function
  181. * internally references two RCU protected structures: device_opp and opp which
  182. * are safe as long as we are under a common RCU locked section.
  183. */
  184. int opp_get_opp_count(struct device *dev)
  185. {
  186. struct device_opp *dev_opp;
  187. struct opp *temp_opp;
  188. int count = 0;
  189. dev_opp = find_device_opp(dev);
  190. if (IS_ERR(dev_opp)) {
  191. int r = PTR_ERR(dev_opp);
  192. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  193. return r;
  194. }
  195. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  196. if (temp_opp->available)
  197. count++;
  198. }
  199. return count;
  200. }
  201. /**
  202. * opp_find_freq_exact() - search for an exact frequency
  203. * @dev: device for which we do this operation
  204. * @freq: frequency to search for
  205. * @available: true/false - match for available opp
  206. *
  207. * Searches for exact match in the opp list and returns pointer to the matching
  208. * opp if found, else returns ERR_PTR in case of error and should be handled
  209. * using IS_ERR.
  210. *
  211. * Note: available is a modifier for the search. if available=true, then the
  212. * match is for exact matching frequency and is available in the stored OPP
  213. * table. if false, the match is for exact frequency which is not available.
  214. *
  215. * This provides a mechanism to enable an opp which is not available currently
  216. * or the opposite as well.
  217. *
  218. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  219. * protected pointer. The reason for the same is that the opp pointer which is
  220. * returned will remain valid for use with opp_get_{voltage, freq} only while
  221. * under the locked area. The pointer returned must be used prior to unlocking
  222. * with rcu_read_unlock() to maintain the integrity of the pointer.
  223. */
  224. struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
  225. bool available)
  226. {
  227. struct device_opp *dev_opp;
  228. struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
  229. dev_opp = find_device_opp(dev);
  230. if (IS_ERR(dev_opp)) {
  231. int r = PTR_ERR(dev_opp);
  232. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  233. return ERR_PTR(r);
  234. }
  235. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  236. if (temp_opp->available == available &&
  237. temp_opp->rate == freq) {
  238. opp = temp_opp;
  239. break;
  240. }
  241. }
  242. return opp;
  243. }
  244. /**
  245. * opp_find_freq_ceil() - Search for an rounded ceil freq
  246. * @dev: device for which we do this operation
  247. * @freq: Start frequency
  248. *
  249. * Search for the matching ceil *available* OPP from a starting freq
  250. * for a device.
  251. *
  252. * Returns matching *opp and refreshes *freq accordingly, else returns
  253. * ERR_PTR in case of error and should be handled using IS_ERR.
  254. *
  255. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  256. * protected pointer. The reason for the same is that the opp pointer which is
  257. * returned will remain valid for use with opp_get_{voltage, freq} only while
  258. * under the locked area. The pointer returned must be used prior to unlocking
  259. * with rcu_read_unlock() to maintain the integrity of the pointer.
  260. */
  261. struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
  262. {
  263. struct device_opp *dev_opp;
  264. struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
  265. if (!dev || !freq) {
  266. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  267. return ERR_PTR(-EINVAL);
  268. }
  269. dev_opp = find_device_opp(dev);
  270. if (IS_ERR(dev_opp))
  271. return opp;
  272. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  273. if (temp_opp->available && temp_opp->rate >= *freq) {
  274. opp = temp_opp;
  275. *freq = opp->rate;
  276. break;
  277. }
  278. }
  279. return opp;
  280. }
  281. /**
  282. * opp_find_freq_floor() - Search for a rounded floor freq
  283. * @dev: device for which we do this operation
  284. * @freq: Start frequency
  285. *
  286. * Search for the matching floor *available* OPP from a starting freq
  287. * for a device.
  288. *
  289. * Returns matching *opp and refreshes *freq accordingly, else returns
  290. * ERR_PTR in case of error and should be handled using IS_ERR.
  291. *
  292. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  293. * protected pointer. The reason for the same is that the opp pointer which is
  294. * returned will remain valid for use with opp_get_{voltage, freq} only while
  295. * under the locked area. The pointer returned must be used prior to unlocking
  296. * with rcu_read_unlock() to maintain the integrity of the pointer.
  297. */
  298. struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
  299. {
  300. struct device_opp *dev_opp;
  301. struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
  302. if (!dev || !freq) {
  303. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  304. return ERR_PTR(-EINVAL);
  305. }
  306. dev_opp = find_device_opp(dev);
  307. if (IS_ERR(dev_opp))
  308. return opp;
  309. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  310. if (temp_opp->available) {
  311. /* go to the next node, before choosing prev */
  312. if (temp_opp->rate > *freq)
  313. break;
  314. else
  315. opp = temp_opp;
  316. }
  317. }
  318. if (!IS_ERR(opp))
  319. *freq = opp->rate;
  320. return opp;
  321. }
  322. /**
  323. * opp_add() - Add an OPP table from a table definitions
  324. * @dev: device for which we do this operation
  325. * @freq: Frequency in Hz for this OPP
  326. * @u_volt: Voltage in uVolts for this OPP
  327. *
  328. * This function adds an opp definition to the opp list and returns status.
  329. * The opp is made available by default and it can be controlled using
  330. * opp_enable/disable functions.
  331. *
  332. * Locking: The internal device_opp and opp structures are RCU protected.
  333. * Hence this function internally uses RCU updater strategy with mutex locks
  334. * to keep the integrity of the internal data structures. Callers should ensure
  335. * that this function is *NOT* called under RCU protection or in contexts where
  336. * mutex cannot be locked.
  337. */
  338. int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  339. {
  340. struct device_opp *dev_opp = NULL;
  341. struct opp *opp, *new_opp;
  342. struct list_head *head;
  343. /* allocate new OPP node */
  344. new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
  345. if (!new_opp) {
  346. dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
  347. return -ENOMEM;
  348. }
  349. /* Hold our list modification lock here */
  350. mutex_lock(&dev_opp_list_lock);
  351. /* Check for existing list for 'dev' */
  352. dev_opp = find_device_opp(dev);
  353. if (IS_ERR(dev_opp)) {
  354. /*
  355. * Allocate a new device OPP table. In the infrequent case
  356. * where a new device is needed to be added, we pay this
  357. * penalty.
  358. */
  359. dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
  360. if (!dev_opp) {
  361. mutex_unlock(&dev_opp_list_lock);
  362. kfree(new_opp);
  363. dev_warn(dev,
  364. "%s: Unable to create device OPP structure\n",
  365. __func__);
  366. return -ENOMEM;
  367. }
  368. dev_opp->dev = dev;
  369. srcu_init_notifier_head(&dev_opp->head);
  370. INIT_LIST_HEAD(&dev_opp->opp_list);
  371. /* Secure the device list modification */
  372. list_add_rcu(&dev_opp->node, &dev_opp_list);
  373. }
  374. /* populate the opp table */
  375. new_opp->dev_opp = dev_opp;
  376. new_opp->rate = freq;
  377. new_opp->u_volt = u_volt;
  378. new_opp->available = true;
  379. /* Insert new OPP in order of increasing frequency */
  380. head = &dev_opp->opp_list;
  381. list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
  382. if (new_opp->rate < opp->rate)
  383. break;
  384. else
  385. head = &opp->node;
  386. }
  387. list_add_rcu(&new_opp->node, head);
  388. mutex_unlock(&dev_opp_list_lock);
  389. /*
  390. * Notify the changes in the availability of the operable
  391. * frequency/voltage list.
  392. */
  393. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
  394. return 0;
  395. }
  396. /**
  397. * opp_set_availability() - helper to set the availability of an opp
  398. * @dev: device for which we do this operation
  399. * @freq: OPP frequency to modify availability
  400. * @availability_req: availability status requested for this opp
  401. *
  402. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  403. * share a common logic which is isolated here.
  404. *
  405. * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
  406. * copy operation, returns 0 if no modifcation was done OR modification was
  407. * successful.
  408. *
  409. * Locking: The internal device_opp and opp structures are RCU protected.
  410. * Hence this function internally uses RCU updater strategy with mutex locks to
  411. * keep the integrity of the internal data structures. Callers should ensure
  412. * that this function is *NOT* called under RCU protection or in contexts where
  413. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  414. */
  415. static int opp_set_availability(struct device *dev, unsigned long freq,
  416. bool availability_req)
  417. {
  418. struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
  419. struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  420. int r = 0;
  421. /* keep the node allocated */
  422. new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
  423. if (!new_opp) {
  424. dev_warn(dev, "%s: Unable to create OPP\n", __func__);
  425. return -ENOMEM;
  426. }
  427. mutex_lock(&dev_opp_list_lock);
  428. /* Find the device_opp */
  429. list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
  430. if (dev == tmp_dev_opp->dev) {
  431. dev_opp = tmp_dev_opp;
  432. break;
  433. }
  434. }
  435. if (IS_ERR(dev_opp)) {
  436. r = PTR_ERR(dev_opp);
  437. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  438. goto unlock;
  439. }
  440. /* Do we have the frequency? */
  441. list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
  442. if (tmp_opp->rate == freq) {
  443. opp = tmp_opp;
  444. break;
  445. }
  446. }
  447. if (IS_ERR(opp)) {
  448. r = PTR_ERR(opp);
  449. goto unlock;
  450. }
  451. /* Is update really needed? */
  452. if (opp->available == availability_req)
  453. goto unlock;
  454. /* copy the old data over */
  455. *new_opp = *opp;
  456. /* plug in new node */
  457. new_opp->available = availability_req;
  458. list_replace_rcu(&opp->node, &new_opp->node);
  459. mutex_unlock(&dev_opp_list_lock);
  460. synchronize_rcu();
  461. /* Notify the change of the OPP availability */
  462. if (availability_req)
  463. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
  464. new_opp);
  465. else
  466. srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
  467. new_opp);
  468. /* clean up old opp */
  469. new_opp = opp;
  470. goto out;
  471. unlock:
  472. mutex_unlock(&dev_opp_list_lock);
  473. out:
  474. kfree(new_opp);
  475. return r;
  476. }
  477. /**
  478. * opp_enable() - Enable a specific OPP
  479. * @dev: device for which we do this operation
  480. * @freq: OPP frequency to enable
  481. *
  482. * Enables a provided opp. If the operation is valid, this returns 0, else the
  483. * corresponding error value. It is meant to be used for users an OPP available
  484. * after being temporarily made unavailable with opp_disable.
  485. *
  486. * Locking: The internal device_opp and opp structures are RCU protected.
  487. * Hence this function indirectly uses RCU and mutex locks to keep the
  488. * integrity of the internal data structures. Callers should ensure that
  489. * this function is *NOT* called under RCU protection or in contexts where
  490. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  491. */
  492. int opp_enable(struct device *dev, unsigned long freq)
  493. {
  494. return opp_set_availability(dev, freq, true);
  495. }
  496. /**
  497. * opp_disable() - Disable a specific OPP
  498. * @dev: device for which we do this operation
  499. * @freq: OPP frequency to disable
  500. *
  501. * Disables a provided opp. If the operation is valid, this returns
  502. * 0, else the corresponding error value. It is meant to be a temporary
  503. * control by users to make this OPP not available until the circumstances are
  504. * right to make it available again (with a call to opp_enable).
  505. *
  506. * Locking: The internal device_opp and opp structures are RCU protected.
  507. * Hence this function indirectly uses RCU and mutex locks to keep the
  508. * integrity of the internal data structures. Callers should ensure that
  509. * this function is *NOT* called under RCU protection or in contexts where
  510. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  511. */
  512. int opp_disable(struct device *dev, unsigned long freq)
  513. {
  514. return opp_set_availability(dev, freq, false);
  515. }
  516. #ifdef CONFIG_CPU_FREQ
  517. /**
  518. * opp_init_cpufreq_table() - create a cpufreq table for a device
  519. * @dev: device for which we do this operation
  520. * @table: Cpufreq table returned back to caller
  521. *
  522. * Generate a cpufreq table for a provided device- this assumes that the
  523. * opp list is already initialized and ready for usage.
  524. *
  525. * This function allocates required memory for the cpufreq table. It is
  526. * expected that the caller does the required maintenance such as freeing
  527. * the table as required.
  528. *
  529. * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
  530. * if no memory available for the operation (table is not populated), returns 0
  531. * if successful and table is populated.
  532. *
  533. * WARNING: It is important for the callers to ensure refreshing their copy of
  534. * the table if any of the mentioned functions have been invoked in the interim.
  535. *
  536. * Locking: The internal device_opp and opp structures are RCU protected.
  537. * To simplify the logic, we pretend we are updater and hold relevant mutex here
  538. * Callers should ensure that this function is *NOT* called under RCU protection
  539. * or in contexts where mutex locking cannot be used.
  540. */
  541. int opp_init_cpufreq_table(struct device *dev,
  542. struct cpufreq_frequency_table **table)
  543. {
  544. struct device_opp *dev_opp;
  545. struct opp *opp;
  546. struct cpufreq_frequency_table *freq_table;
  547. int i = 0;
  548. /* Pretend as if I am an updater */
  549. mutex_lock(&dev_opp_list_lock);
  550. dev_opp = find_device_opp(dev);
  551. if (IS_ERR(dev_opp)) {
  552. int r = PTR_ERR(dev_opp);
  553. mutex_unlock(&dev_opp_list_lock);
  554. dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  555. return r;
  556. }
  557. freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
  558. (opp_get_opp_count(dev) + 1), GFP_KERNEL);
  559. if (!freq_table) {
  560. mutex_unlock(&dev_opp_list_lock);
  561. dev_warn(dev, "%s: Unable to allocate frequency table\n",
  562. __func__);
  563. return -ENOMEM;
  564. }
  565. list_for_each_entry(opp, &dev_opp->opp_list, node) {
  566. if (opp->available) {
  567. freq_table[i].index = i;
  568. freq_table[i].frequency = opp->rate / 1000;
  569. i++;
  570. }
  571. }
  572. mutex_unlock(&dev_opp_list_lock);
  573. freq_table[i].index = i;
  574. freq_table[i].frequency = CPUFREQ_TABLE_END;
  575. *table = &freq_table[0];
  576. return 0;
  577. }
  578. /**
  579. * opp_free_cpufreq_table() - free the cpufreq table
  580. * @dev: device for which we do this operation
  581. * @table: table to free
  582. *
  583. * Free up the table allocated by opp_init_cpufreq_table
  584. */
  585. void opp_free_cpufreq_table(struct device *dev,
  586. struct cpufreq_frequency_table **table)
  587. {
  588. if (!table)
  589. return;
  590. kfree(*table);
  591. *table = NULL;
  592. }
  593. #endif /* CONFIG_CPU_FREQ */
  594. /**
  595. * opp_get_notifier() - find notifier_head of the device with opp
  596. * @dev: device pointer used to lookup device OPPs.
  597. */
  598. struct srcu_notifier_head *opp_get_notifier(struct device *dev)
  599. {
  600. struct device_opp *dev_opp = find_device_opp(dev);
  601. if (IS_ERR(dev_opp))
  602. return ERR_CAST(dev_opp); /* matching type */
  603. return &dev_opp->head;
  604. }