devfreq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
  3. * for Non-CPU Devices.
  4. *
  5. * Copyright (C) 2011 Samsung Electronics
  6. * MyungJoo Ham <myungjoo.ham@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/errno.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/stat.h>
  20. #include <linux/opp.h>
  21. #include <linux/devfreq.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/list.h>
  25. #include <linux/printk.h>
  26. #include <linux/hrtimer.h>
  27. #include "governor.h"
  28. struct class *devfreq_class;
  29. /*
  30. * devfreq_work periodically monitors every registered device.
  31. * The minimum polling interval is one jiffy. The polling interval is
  32. * determined by the minimum polling period among all polling devfreq
  33. * devices. The resolution of polling interval is one jiffy.
  34. */
  35. static bool polling;
  36. static struct workqueue_struct *devfreq_wq;
  37. static struct delayed_work devfreq_work;
  38. /* wait removing if this is to be removed */
  39. static struct devfreq *wait_remove_device;
  40. /* The list of all device-devfreq */
  41. static LIST_HEAD(devfreq_list);
  42. static DEFINE_MUTEX(devfreq_list_lock);
  43. /**
  44. * find_device_devfreq() - find devfreq struct using device pointer
  45. * @dev: device pointer used to lookup device devfreq.
  46. *
  47. * Search the list of device devfreqs and return the matched device's
  48. * devfreq info. devfreq_list_lock should be held by the caller.
  49. */
  50. static struct devfreq *find_device_devfreq(struct device *dev)
  51. {
  52. struct devfreq *tmp_devfreq;
  53. if (unlikely(IS_ERR_OR_NULL(dev))) {
  54. pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
  55. return ERR_PTR(-EINVAL);
  56. }
  57. WARN(!mutex_is_locked(&devfreq_list_lock),
  58. "devfreq_list_lock must be locked.");
  59. list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
  60. if (tmp_devfreq->dev.parent == dev)
  61. return tmp_devfreq;
  62. }
  63. return ERR_PTR(-ENODEV);
  64. }
  65. /**
  66. * update_devfreq() - Reevaluate the device and configure frequency.
  67. * @devfreq: the devfreq instance.
  68. *
  69. * Note: Lock devfreq->lock before calling update_devfreq
  70. * This function is exported for governors.
  71. */
  72. int update_devfreq(struct devfreq *devfreq)
  73. {
  74. unsigned long freq;
  75. int err = 0;
  76. u32 flags = 0;
  77. if (!mutex_is_locked(&devfreq->lock)) {
  78. WARN(true, "devfreq->lock must be locked by the caller.\n");
  79. return -EINVAL;
  80. }
  81. /* Reevaluate the proper frequency */
  82. err = devfreq->governor->get_target_freq(devfreq, &freq);
  83. if (err)
  84. return err;
  85. /*
  86. * Adjust the freuqency with user freq and QoS.
  87. *
  88. * List from the highest proiority
  89. * max_freq (probably called by thermal when it's too hot)
  90. * min_freq
  91. */
  92. if (devfreq->min_freq && freq < devfreq->min_freq) {
  93. freq = devfreq->min_freq;
  94. flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
  95. }
  96. if (devfreq->max_freq && freq > devfreq->max_freq) {
  97. freq = devfreq->max_freq;
  98. flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
  99. }
  100. err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
  101. if (err)
  102. return err;
  103. devfreq->previous_freq = freq;
  104. return err;
  105. }
  106. /**
  107. * devfreq_notifier_call() - Notify that the device frequency requirements
  108. * has been changed out of devfreq framework.
  109. * @nb the notifier_block (supposed to be devfreq->nb)
  110. * @type not used
  111. * @devp not used
  112. *
  113. * Called by a notifier that uses devfreq->nb.
  114. */
  115. static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
  116. void *devp)
  117. {
  118. struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
  119. int ret;
  120. mutex_lock(&devfreq->lock);
  121. ret = update_devfreq(devfreq);
  122. mutex_unlock(&devfreq->lock);
  123. return ret;
  124. }
  125. /**
  126. * _remove_devfreq() - Remove devfreq from the device.
  127. * @devfreq: the devfreq struct
  128. * @skip: skip calling device_unregister().
  129. *
  130. * Note that the caller should lock devfreq->lock before calling
  131. * this. _remove_devfreq() will unlock it and free devfreq
  132. * internally. devfreq_list_lock should be locked by the caller
  133. * as well (not relased at return)
  134. *
  135. * Lock usage:
  136. * devfreq->lock: locked before call.
  137. * unlocked at return (and freed)
  138. * devfreq_list_lock: locked before call.
  139. * kept locked at return.
  140. * if devfreq is centrally polled.
  141. *
  142. * Freed memory:
  143. * devfreq
  144. */
  145. static void _remove_devfreq(struct devfreq *devfreq, bool skip)
  146. {
  147. if (!mutex_is_locked(&devfreq->lock)) {
  148. WARN(true, "devfreq->lock must be locked by the caller.\n");
  149. return;
  150. }
  151. if (!devfreq->governor->no_central_polling &&
  152. !mutex_is_locked(&devfreq_list_lock)) {
  153. WARN(true, "devfreq_list_lock must be locked by the caller.\n");
  154. return;
  155. }
  156. if (devfreq->being_removed)
  157. return;
  158. devfreq->being_removed = true;
  159. if (devfreq->profile->exit)
  160. devfreq->profile->exit(devfreq->dev.parent);
  161. if (devfreq->governor->exit)
  162. devfreq->governor->exit(devfreq);
  163. if (!skip && get_device(&devfreq->dev)) {
  164. device_unregister(&devfreq->dev);
  165. put_device(&devfreq->dev);
  166. }
  167. if (!devfreq->governor->no_central_polling)
  168. list_del(&devfreq->node);
  169. mutex_unlock(&devfreq->lock);
  170. mutex_destroy(&devfreq->lock);
  171. kfree(devfreq);
  172. }
  173. /**
  174. * devfreq_dev_release() - Callback for struct device to release the device.
  175. * @dev: the devfreq device
  176. *
  177. * This calls _remove_devfreq() if _remove_devfreq() is not called.
  178. * Note that devfreq_dev_release() could be called by _remove_devfreq() as
  179. * well as by others unregistering the device.
  180. */
  181. static void devfreq_dev_release(struct device *dev)
  182. {
  183. struct devfreq *devfreq = to_devfreq(dev);
  184. bool central_polling = !devfreq->governor->no_central_polling;
  185. /*
  186. * If devfreq_dev_release() was called by device_unregister() of
  187. * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
  188. * being_removed is already set. This also partially checks the case
  189. * where devfreq_dev_release() is called from a thread other than
  190. * the one called _remove_devfreq(); however, this case is
  191. * dealt completely with another following being_removed check.
  192. *
  193. * Because being_removed is never being
  194. * unset, we do not need to worry about race conditions on
  195. * being_removed.
  196. */
  197. if (devfreq->being_removed)
  198. return;
  199. if (central_polling)
  200. mutex_lock(&devfreq_list_lock);
  201. mutex_lock(&devfreq->lock);
  202. /*
  203. * Check being_removed flag again for the case where
  204. * devfreq_dev_release() was called in a thread other than the one
  205. * possibly called _remove_devfreq().
  206. */
  207. if (devfreq->being_removed) {
  208. mutex_unlock(&devfreq->lock);
  209. goto out;
  210. }
  211. /* devfreq->lock is unlocked and removed in _removed_devfreq() */
  212. _remove_devfreq(devfreq, true);
  213. out:
  214. if (central_polling)
  215. mutex_unlock(&devfreq_list_lock);
  216. }
  217. /**
  218. * devfreq_monitor() - Periodically poll devfreq objects.
  219. * @work: the work struct used to run devfreq_monitor periodically.
  220. *
  221. */
  222. static void devfreq_monitor(struct work_struct *work)
  223. {
  224. static unsigned long last_polled_at;
  225. struct devfreq *devfreq, *tmp;
  226. int error;
  227. unsigned long jiffies_passed;
  228. unsigned long next_jiffies = ULONG_MAX, now = jiffies;
  229. struct device *dev;
  230. /* Initially last_polled_at = 0, polling every device at bootup */
  231. jiffies_passed = now - last_polled_at;
  232. last_polled_at = now;
  233. if (jiffies_passed == 0)
  234. jiffies_passed = 1;
  235. mutex_lock(&devfreq_list_lock);
  236. list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
  237. mutex_lock(&devfreq->lock);
  238. dev = devfreq->dev.parent;
  239. /* Do not remove tmp for a while */
  240. wait_remove_device = tmp;
  241. if (devfreq->governor->no_central_polling ||
  242. devfreq->next_polling == 0) {
  243. mutex_unlock(&devfreq->lock);
  244. continue;
  245. }
  246. mutex_unlock(&devfreq_list_lock);
  247. /*
  248. * Reduce more next_polling if devfreq_wq took an extra
  249. * delay. (i.e., CPU has been idled.)
  250. */
  251. if (devfreq->next_polling <= jiffies_passed) {
  252. error = update_devfreq(devfreq);
  253. /* Remove a devfreq with an error. */
  254. if (error && error != -EAGAIN) {
  255. dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
  256. error, devfreq->governor->name);
  257. /*
  258. * Unlock devfreq before locking the list
  259. * in order to avoid deadlock with
  260. * find_device_devfreq or others
  261. */
  262. mutex_unlock(&devfreq->lock);
  263. mutex_lock(&devfreq_list_lock);
  264. /* Check if devfreq is already removed */
  265. if (IS_ERR(find_device_devfreq(dev)))
  266. continue;
  267. mutex_lock(&devfreq->lock);
  268. /* This unlocks devfreq->lock and free it */
  269. _remove_devfreq(devfreq, false);
  270. continue;
  271. }
  272. devfreq->next_polling = devfreq->polling_jiffies;
  273. } else {
  274. devfreq->next_polling -= jiffies_passed;
  275. }
  276. if (devfreq->next_polling)
  277. next_jiffies = (next_jiffies > devfreq->next_polling) ?
  278. devfreq->next_polling : next_jiffies;
  279. mutex_unlock(&devfreq->lock);
  280. mutex_lock(&devfreq_list_lock);
  281. }
  282. wait_remove_device = NULL;
  283. mutex_unlock(&devfreq_list_lock);
  284. if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
  285. polling = true;
  286. queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
  287. } else {
  288. polling = false;
  289. }
  290. }
  291. /**
  292. * devfreq_add_device() - Add devfreq feature to the device
  293. * @dev: the device to add devfreq feature.
  294. * @profile: device-specific profile to run devfreq.
  295. * @governor: the policy to choose frequency.
  296. * @data: private data for the governor. The devfreq framework does not
  297. * touch this value.
  298. */
  299. struct devfreq *devfreq_add_device(struct device *dev,
  300. struct devfreq_dev_profile *profile,
  301. const struct devfreq_governor *governor,
  302. void *data)
  303. {
  304. struct devfreq *devfreq;
  305. int err = 0;
  306. if (!dev || !profile || !governor) {
  307. dev_err(dev, "%s: Invalid parameters.\n", __func__);
  308. return ERR_PTR(-EINVAL);
  309. }
  310. if (!governor->no_central_polling) {
  311. mutex_lock(&devfreq_list_lock);
  312. devfreq = find_device_devfreq(dev);
  313. mutex_unlock(&devfreq_list_lock);
  314. if (!IS_ERR(devfreq)) {
  315. dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
  316. err = -EINVAL;
  317. goto err_out;
  318. }
  319. }
  320. devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
  321. if (!devfreq) {
  322. dev_err(dev, "%s: Unable to create devfreq for the device\n",
  323. __func__);
  324. err = -ENOMEM;
  325. goto err_out;
  326. }
  327. mutex_init(&devfreq->lock);
  328. mutex_lock(&devfreq->lock);
  329. devfreq->dev.parent = dev;
  330. devfreq->dev.class = devfreq_class;
  331. devfreq->dev.release = devfreq_dev_release;
  332. devfreq->profile = profile;
  333. devfreq->governor = governor;
  334. devfreq->previous_freq = profile->initial_freq;
  335. devfreq->data = data;
  336. devfreq->next_polling = devfreq->polling_jiffies
  337. = msecs_to_jiffies(devfreq->profile->polling_ms);
  338. devfreq->nb.notifier_call = devfreq_notifier_call;
  339. dev_set_name(&devfreq->dev, dev_name(dev));
  340. err = device_register(&devfreq->dev);
  341. if (err) {
  342. put_device(&devfreq->dev);
  343. goto err_dev;
  344. }
  345. if (governor->init)
  346. err = governor->init(devfreq);
  347. if (err)
  348. goto err_init;
  349. mutex_unlock(&devfreq->lock);
  350. if (governor->no_central_polling)
  351. goto out;
  352. mutex_lock(&devfreq_list_lock);
  353. list_add(&devfreq->node, &devfreq_list);
  354. if (devfreq_wq && devfreq->next_polling && !polling) {
  355. polling = true;
  356. queue_delayed_work(devfreq_wq, &devfreq_work,
  357. devfreq->next_polling);
  358. }
  359. mutex_unlock(&devfreq_list_lock);
  360. out:
  361. return devfreq;
  362. err_init:
  363. device_unregister(&devfreq->dev);
  364. err_dev:
  365. mutex_unlock(&devfreq->lock);
  366. kfree(devfreq);
  367. err_out:
  368. return ERR_PTR(err);
  369. }
  370. /**
  371. * devfreq_remove_device() - Remove devfreq feature from a device.
  372. * @devfreq the devfreq instance to be removed
  373. */
  374. int devfreq_remove_device(struct devfreq *devfreq)
  375. {
  376. bool central_polling;
  377. if (!devfreq)
  378. return -EINVAL;
  379. central_polling = !devfreq->governor->no_central_polling;
  380. if (central_polling) {
  381. mutex_lock(&devfreq_list_lock);
  382. while (wait_remove_device == devfreq) {
  383. mutex_unlock(&devfreq_list_lock);
  384. schedule();
  385. mutex_lock(&devfreq_list_lock);
  386. }
  387. }
  388. mutex_lock(&devfreq->lock);
  389. _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
  390. if (central_polling)
  391. mutex_unlock(&devfreq_list_lock);
  392. return 0;
  393. }
  394. static ssize_t show_governor(struct device *dev,
  395. struct device_attribute *attr, char *buf)
  396. {
  397. return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
  398. }
  399. static ssize_t show_freq(struct device *dev,
  400. struct device_attribute *attr, char *buf)
  401. {
  402. return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
  403. }
  404. static ssize_t show_polling_interval(struct device *dev,
  405. struct device_attribute *attr, char *buf)
  406. {
  407. return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
  408. }
  409. static ssize_t store_polling_interval(struct device *dev,
  410. struct device_attribute *attr,
  411. const char *buf, size_t count)
  412. {
  413. struct devfreq *df = to_devfreq(dev);
  414. unsigned int value;
  415. int ret;
  416. ret = sscanf(buf, "%u", &value);
  417. if (ret != 1)
  418. goto out;
  419. mutex_lock(&df->lock);
  420. df->profile->polling_ms = value;
  421. df->next_polling = df->polling_jiffies
  422. = msecs_to_jiffies(value);
  423. mutex_unlock(&df->lock);
  424. ret = count;
  425. if (df->governor->no_central_polling)
  426. goto out;
  427. mutex_lock(&devfreq_list_lock);
  428. if (df->next_polling > 0 && !polling) {
  429. polling = true;
  430. queue_delayed_work(devfreq_wq, &devfreq_work,
  431. df->next_polling);
  432. }
  433. mutex_unlock(&devfreq_list_lock);
  434. out:
  435. return ret;
  436. }
  437. static ssize_t show_central_polling(struct device *dev,
  438. struct device_attribute *attr, char *buf)
  439. {
  440. return sprintf(buf, "%d\n",
  441. !to_devfreq(dev)->governor->no_central_polling);
  442. }
  443. static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
  444. const char *buf, size_t count)
  445. {
  446. struct devfreq *df = to_devfreq(dev);
  447. unsigned long value;
  448. int ret;
  449. unsigned long max;
  450. ret = sscanf(buf, "%lu", &value);
  451. if (ret != 1)
  452. goto out;
  453. mutex_lock(&df->lock);
  454. max = df->max_freq;
  455. if (value && max && value > max) {
  456. ret = -EINVAL;
  457. goto unlock;
  458. }
  459. df->min_freq = value;
  460. update_devfreq(df);
  461. ret = count;
  462. unlock:
  463. mutex_unlock(&df->lock);
  464. out:
  465. return ret;
  466. }
  467. static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
  468. char *buf)
  469. {
  470. return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
  471. }
  472. static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
  473. const char *buf, size_t count)
  474. {
  475. struct devfreq *df = to_devfreq(dev);
  476. unsigned long value;
  477. int ret;
  478. unsigned long min;
  479. ret = sscanf(buf, "%lu", &value);
  480. if (ret != 1)
  481. goto out;
  482. mutex_lock(&df->lock);
  483. min = df->min_freq;
  484. if (value && min && value < min) {
  485. ret = -EINVAL;
  486. goto unlock;
  487. }
  488. df->max_freq = value;
  489. update_devfreq(df);
  490. ret = count;
  491. unlock:
  492. mutex_unlock(&df->lock);
  493. out:
  494. return ret;
  495. }
  496. static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
  497. char *buf)
  498. {
  499. return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
  500. }
  501. static struct device_attribute devfreq_attrs[] = {
  502. __ATTR(governor, S_IRUGO, show_governor, NULL),
  503. __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
  504. __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
  505. __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
  506. store_polling_interval),
  507. __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
  508. __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
  509. { },
  510. };
  511. /**
  512. * devfreq_start_polling() - Initialize data structure for devfreq framework and
  513. * start polling registered devfreq devices.
  514. */
  515. static int __init devfreq_start_polling(void)
  516. {
  517. mutex_lock(&devfreq_list_lock);
  518. polling = false;
  519. devfreq_wq = create_freezable_workqueue("devfreq_wq");
  520. INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
  521. mutex_unlock(&devfreq_list_lock);
  522. devfreq_monitor(&devfreq_work.work);
  523. return 0;
  524. }
  525. late_initcall(devfreq_start_polling);
  526. static int __init devfreq_init(void)
  527. {
  528. devfreq_class = class_create(THIS_MODULE, "devfreq");
  529. if (IS_ERR(devfreq_class)) {
  530. pr_err("%s: couldn't create class\n", __FILE__);
  531. return PTR_ERR(devfreq_class);
  532. }
  533. devfreq_class->dev_attrs = devfreq_attrs;
  534. return 0;
  535. }
  536. subsys_initcall(devfreq_init);
  537. static void __exit devfreq_exit(void)
  538. {
  539. class_destroy(devfreq_class);
  540. }
  541. module_exit(devfreq_exit);
  542. /*
  543. * The followings are helper functions for devfreq user device drivers with
  544. * OPP framework.
  545. */
  546. /**
  547. * devfreq_recommended_opp() - Helper function to get proper OPP for the
  548. * freq value given to target callback.
  549. * @dev The devfreq user device. (parent of devfreq)
  550. * @freq The frequency given to target function
  551. * @flags Flags handed from devfreq framework.
  552. *
  553. */
  554. struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
  555. u32 flags)
  556. {
  557. struct opp *opp;
  558. if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
  559. /* The freq is an upper bound. opp should be lower */
  560. opp = opp_find_freq_floor(dev, freq);
  561. /* If not available, use the closest opp */
  562. if (opp == ERR_PTR(-ENODEV))
  563. opp = opp_find_freq_ceil(dev, freq);
  564. } else {
  565. /* The freq is an lower bound. opp should be higher */
  566. opp = opp_find_freq_ceil(dev, freq);
  567. /* If not available, use the closest opp */
  568. if (opp == ERR_PTR(-ENODEV))
  569. opp = opp_find_freq_floor(dev, freq);
  570. }
  571. return opp;
  572. }
  573. /**
  574. * devfreq_register_opp_notifier() - Helper function to get devfreq notified
  575. * for any changes in the OPP availability
  576. * changes
  577. * @dev The devfreq user device. (parent of devfreq)
  578. * @devfreq The devfreq object.
  579. */
  580. int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
  581. {
  582. struct srcu_notifier_head *nh = opp_get_notifier(dev);
  583. if (IS_ERR(nh))
  584. return PTR_ERR(nh);
  585. return srcu_notifier_chain_register(nh, &devfreq->nb);
  586. }
  587. /**
  588. * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
  589. * notified for any changes in the OPP
  590. * availability changes anymore.
  591. * @dev The devfreq user device. (parent of devfreq)
  592. * @devfreq The devfreq object.
  593. *
  594. * At exit() callback of devfreq_dev_profile, this must be included if
  595. * devfreq_recommended_opp is used.
  596. */
  597. int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
  598. {
  599. struct srcu_notifier_head *nh = opp_get_notifier(dev);
  600. if (IS_ERR(nh))
  601. return PTR_ERR(nh);
  602. return srcu_notifier_chain_unregister(nh, &devfreq->nb);
  603. }
  604. MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
  605. MODULE_DESCRIPTION("devfreq class support");
  606. MODULE_LICENSE("GPL");