cpufreq.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950
  1. /*
  2. * linux/drivers/cpufreq/cpufreq.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  6. *
  7. * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  8. * Added handling for CPU hotplug
  9. * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  10. * Fix handling for CPU hotplug -- affected CPUs
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/notifier.h>
  21. #include <linux/cpufreq.h>
  22. #include <linux/delay.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/device.h>
  26. #include <linux/slab.h>
  27. #include <linux/cpu.h>
  28. #include <linux/completion.h>
  29. #include <linux/mutex.h>
  30. #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
  31. "cpufreq-core", msg)
  32. /**
  33. * The "cpufreq driver" - the arch- or hardware-dependent low
  34. * level driver of CPUFreq support, and its spinlock. This lock
  35. * also protects the cpufreq_cpu_data array.
  36. */
  37. static struct cpufreq_driver *cpufreq_driver;
  38. static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  39. #ifdef CONFIG_HOTPLUG_CPU
  40. /* This one keeps track of the previously set governor of a removed CPU */
  41. static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
  42. #endif
  43. static DEFINE_SPINLOCK(cpufreq_driver_lock);
  44. /*
  45. * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
  46. * all cpufreq/hotplug/workqueue/etc related lock issues.
  47. *
  48. * The rules for this semaphore:
  49. * - Any routine that wants to read from the policy structure will
  50. * do a down_read on this semaphore.
  51. * - Any routine that will write to the policy structure and/or may take away
  52. * the policy altogether (eg. CPU hotplug), will hold this lock in write
  53. * mode before doing so.
  54. *
  55. * Additional rules:
  56. * - All holders of the lock should check to make sure that the CPU they
  57. * are concerned with are online after they get the lock.
  58. * - Governor routines that can be called in cpufreq hotplug path should not
  59. * take this sem as top level hotplug notifier handler takes this.
  60. */
  61. static DEFINE_PER_CPU(int, policy_cpu);
  62. static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
  63. #define lock_policy_rwsem(mode, cpu) \
  64. int lock_policy_rwsem_##mode \
  65. (int cpu) \
  66. { \
  67. int policy_cpu = per_cpu(policy_cpu, cpu); \
  68. BUG_ON(policy_cpu == -1); \
  69. down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
  70. if (unlikely(!cpu_online(cpu))) { \
  71. up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
  72. return -1; \
  73. } \
  74. \
  75. return 0; \
  76. }
  77. lock_policy_rwsem(read, cpu);
  78. EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
  79. lock_policy_rwsem(write, cpu);
  80. EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
  81. void unlock_policy_rwsem_read(int cpu)
  82. {
  83. int policy_cpu = per_cpu(policy_cpu, cpu);
  84. BUG_ON(policy_cpu == -1);
  85. up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
  86. }
  87. EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
  88. void unlock_policy_rwsem_write(int cpu)
  89. {
  90. int policy_cpu = per_cpu(policy_cpu, cpu);
  91. BUG_ON(policy_cpu == -1);
  92. up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
  93. }
  94. EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
  95. /* internal prototypes */
  96. static int __cpufreq_governor(struct cpufreq_policy *policy,
  97. unsigned int event);
  98. static unsigned int __cpufreq_get(unsigned int cpu);
  99. static void handle_update(struct work_struct *work);
  100. /**
  101. * Two notifier lists: the "policy" list is involved in the
  102. * validation process for a new CPU frequency policy; the
  103. * "transition" list for kernel code that needs to handle
  104. * changes to devices when the CPU clock speed changes.
  105. * The mutex locks both lists.
  106. */
  107. static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  108. static struct srcu_notifier_head cpufreq_transition_notifier_list;
  109. static bool init_cpufreq_transition_notifier_list_called;
  110. static int __init init_cpufreq_transition_notifier_list(void)
  111. {
  112. srcu_init_notifier_head(&cpufreq_transition_notifier_list);
  113. init_cpufreq_transition_notifier_list_called = true;
  114. return 0;
  115. }
  116. pure_initcall(init_cpufreq_transition_notifier_list);
  117. static LIST_HEAD(cpufreq_governor_list);
  118. static DEFINE_MUTEX(cpufreq_governor_mutex);
  119. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  120. {
  121. struct cpufreq_policy *data;
  122. unsigned long flags;
  123. if (cpu >= nr_cpu_ids)
  124. goto err_out;
  125. /* get the cpufreq driver */
  126. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  127. if (!cpufreq_driver)
  128. goto err_out_unlock;
  129. if (!try_module_get(cpufreq_driver->owner))
  130. goto err_out_unlock;
  131. /* get the CPU */
  132. data = per_cpu(cpufreq_cpu_data, cpu);
  133. if (!data)
  134. goto err_out_put_module;
  135. if (!kobject_get(&data->kobj))
  136. goto err_out_put_module;
  137. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  138. return data;
  139. err_out_put_module:
  140. module_put(cpufreq_driver->owner);
  141. err_out_unlock:
  142. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  143. err_out:
  144. return NULL;
  145. }
  146. EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
  147. void cpufreq_cpu_put(struct cpufreq_policy *data)
  148. {
  149. kobject_put(&data->kobj);
  150. module_put(cpufreq_driver->owner);
  151. }
  152. EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
  153. /*********************************************************************
  154. * UNIFIED DEBUG HELPERS *
  155. *********************************************************************/
  156. #ifdef CONFIG_CPU_FREQ_DEBUG
  157. /* what part(s) of the CPUfreq subsystem are debugged? */
  158. static unsigned int debug;
  159. /* is the debug output ratelimit'ed using printk_ratelimit? User can
  160. * set or modify this value.
  161. */
  162. static unsigned int debug_ratelimit = 1;
  163. /* is the printk_ratelimit'ing enabled? It's enabled after a successful
  164. * loading of a cpufreq driver, temporarily disabled when a new policy
  165. * is set, and disabled upon cpufreq driver removal
  166. */
  167. static unsigned int disable_ratelimit = 1;
  168. static DEFINE_SPINLOCK(disable_ratelimit_lock);
  169. static void cpufreq_debug_enable_ratelimit(void)
  170. {
  171. unsigned long flags;
  172. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  173. if (disable_ratelimit)
  174. disable_ratelimit--;
  175. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  176. }
  177. static void cpufreq_debug_disable_ratelimit(void)
  178. {
  179. unsigned long flags;
  180. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  181. disable_ratelimit++;
  182. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  183. }
  184. void cpufreq_debug_printk(unsigned int type, const char *prefix,
  185. const char *fmt, ...)
  186. {
  187. char s[256];
  188. va_list args;
  189. unsigned int len;
  190. unsigned long flags;
  191. WARN_ON(!prefix);
  192. if (type & debug) {
  193. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  194. if (!disable_ratelimit && debug_ratelimit
  195. && !printk_ratelimit()) {
  196. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  197. return;
  198. }
  199. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  200. len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
  201. va_start(args, fmt);
  202. len += vsnprintf(&s[len], (256 - len), fmt, args);
  203. va_end(args);
  204. printk(s);
  205. WARN_ON(len < 5);
  206. }
  207. }
  208. EXPORT_SYMBOL(cpufreq_debug_printk);
  209. module_param(debug, uint, 0644);
  210. MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
  211. " 2 to debug drivers, and 4 to debug governors.");
  212. module_param(debug_ratelimit, uint, 0644);
  213. MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
  214. " set to 0 to disable ratelimiting.");
  215. #else /* !CONFIG_CPU_FREQ_DEBUG */
  216. static inline void cpufreq_debug_enable_ratelimit(void) { return; }
  217. static inline void cpufreq_debug_disable_ratelimit(void) { return; }
  218. #endif /* CONFIG_CPU_FREQ_DEBUG */
  219. /*********************************************************************
  220. * EXTERNALLY AFFECTING FREQUENCY CHANGES *
  221. *********************************************************************/
  222. /**
  223. * adjust_jiffies - adjust the system "loops_per_jiffy"
  224. *
  225. * This function alters the system "loops_per_jiffy" for the clock
  226. * speed change. Note that loops_per_jiffy cannot be updated on SMP
  227. * systems as each CPU might be scaled differently. So, use the arch
  228. * per-CPU loops_per_jiffy value wherever possible.
  229. */
  230. #ifndef CONFIG_SMP
  231. static unsigned long l_p_j_ref;
  232. static unsigned int l_p_j_ref_freq;
  233. static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  234. {
  235. if (ci->flags & CPUFREQ_CONST_LOOPS)
  236. return;
  237. if (!l_p_j_ref_freq) {
  238. l_p_j_ref = loops_per_jiffy;
  239. l_p_j_ref_freq = ci->old;
  240. dprintk("saving %lu as reference value for loops_per_jiffy; "
  241. "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
  242. }
  243. if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
  244. (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
  245. (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
  246. loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
  247. ci->new);
  248. dprintk("scaling loops_per_jiffy to %lu "
  249. "for frequency %u kHz\n", loops_per_jiffy, ci->new);
  250. }
  251. }
  252. #else
  253. static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  254. {
  255. return;
  256. }
  257. #endif
  258. /**
  259. * cpufreq_notify_transition - call notifier chain and adjust_jiffies
  260. * on frequency transition.
  261. *
  262. * This function calls the transition notifiers and the "adjust_jiffies"
  263. * function. It is called twice on all CPU frequency changes that have
  264. * external effects.
  265. */
  266. void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
  267. {
  268. struct cpufreq_policy *policy;
  269. BUG_ON(irqs_disabled());
  270. freqs->flags = cpufreq_driver->flags;
  271. dprintk("notification %u of frequency transition to %u kHz\n",
  272. state, freqs->new);
  273. policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
  274. switch (state) {
  275. case CPUFREQ_PRECHANGE:
  276. /* detect if the driver reported a value as "old frequency"
  277. * which is not equal to what the cpufreq core thinks is
  278. * "old frequency".
  279. */
  280. if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  281. if ((policy) && (policy->cpu == freqs->cpu) &&
  282. (policy->cur) && (policy->cur != freqs->old)) {
  283. dprintk("Warning: CPU frequency is"
  284. " %u, cpufreq assumed %u kHz.\n",
  285. freqs->old, policy->cur);
  286. freqs->old = policy->cur;
  287. }
  288. }
  289. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  290. CPUFREQ_PRECHANGE, freqs);
  291. adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
  292. break;
  293. case CPUFREQ_POSTCHANGE:
  294. adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
  295. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  296. CPUFREQ_POSTCHANGE, freqs);
  297. if (likely(policy) && likely(policy->cpu == freqs->cpu))
  298. policy->cur = freqs->new;
  299. break;
  300. }
  301. }
  302. EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
  303. /*********************************************************************
  304. * SYSFS INTERFACE *
  305. *********************************************************************/
  306. static struct cpufreq_governor *__find_governor(const char *str_governor)
  307. {
  308. struct cpufreq_governor *t;
  309. list_for_each_entry(t, &cpufreq_governor_list, governor_list)
  310. if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
  311. return t;
  312. return NULL;
  313. }
  314. /**
  315. * cpufreq_parse_governor - parse a governor string
  316. */
  317. static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
  318. struct cpufreq_governor **governor)
  319. {
  320. int err = -EINVAL;
  321. if (!cpufreq_driver)
  322. goto out;
  323. if (cpufreq_driver->setpolicy) {
  324. if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
  325. *policy = CPUFREQ_POLICY_PERFORMANCE;
  326. err = 0;
  327. } else if (!strnicmp(str_governor, "powersave",
  328. CPUFREQ_NAME_LEN)) {
  329. *policy = CPUFREQ_POLICY_POWERSAVE;
  330. err = 0;
  331. }
  332. } else if (cpufreq_driver->target) {
  333. struct cpufreq_governor *t;
  334. mutex_lock(&cpufreq_governor_mutex);
  335. t = __find_governor(str_governor);
  336. if (t == NULL) {
  337. char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
  338. str_governor);
  339. if (name) {
  340. int ret;
  341. mutex_unlock(&cpufreq_governor_mutex);
  342. ret = request_module("%s", name);
  343. mutex_lock(&cpufreq_governor_mutex);
  344. if (ret == 0)
  345. t = __find_governor(str_governor);
  346. }
  347. kfree(name);
  348. }
  349. if (t != NULL) {
  350. *governor = t;
  351. err = 0;
  352. }
  353. mutex_unlock(&cpufreq_governor_mutex);
  354. }
  355. out:
  356. return err;
  357. }
  358. /**
  359. * cpufreq_per_cpu_attr_read() / show_##file_name() -
  360. * print out cpufreq information
  361. *
  362. * Write out information from cpufreq_driver->policy[cpu]; object must be
  363. * "unsigned int".
  364. */
  365. #define show_one(file_name, object) \
  366. static ssize_t show_##file_name \
  367. (struct cpufreq_policy *policy, char *buf) \
  368. { \
  369. return sprintf(buf, "%u\n", policy->object); \
  370. }
  371. show_one(cpuinfo_min_freq, cpuinfo.min_freq);
  372. show_one(cpuinfo_max_freq, cpuinfo.max_freq);
  373. show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
  374. show_one(scaling_min_freq, min);
  375. show_one(scaling_max_freq, max);
  376. show_one(scaling_cur_freq, cur);
  377. static int __cpufreq_set_policy(struct cpufreq_policy *data,
  378. struct cpufreq_policy *policy);
  379. /**
  380. * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  381. */
  382. #define store_one(file_name, object) \
  383. static ssize_t store_##file_name \
  384. (struct cpufreq_policy *policy, const char *buf, size_t count) \
  385. { \
  386. unsigned int ret = -EINVAL; \
  387. struct cpufreq_policy new_policy; \
  388. \
  389. ret = cpufreq_get_policy(&new_policy, policy->cpu); \
  390. if (ret) \
  391. return -EINVAL; \
  392. \
  393. ret = sscanf(buf, "%u", &new_policy.object); \
  394. if (ret != 1) \
  395. return -EINVAL; \
  396. \
  397. ret = __cpufreq_set_policy(policy, &new_policy); \
  398. policy->user_policy.object = policy->object; \
  399. \
  400. return ret ? ret : count; \
  401. }
  402. store_one(scaling_min_freq, min);
  403. store_one(scaling_max_freq, max);
  404. /**
  405. * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  406. */
  407. static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
  408. char *buf)
  409. {
  410. unsigned int cur_freq = __cpufreq_get(policy->cpu);
  411. if (!cur_freq)
  412. return sprintf(buf, "<unknown>");
  413. return sprintf(buf, "%u\n", cur_freq);
  414. }
  415. /**
  416. * show_scaling_governor - show the current policy for the specified CPU
  417. */
  418. static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
  419. {
  420. if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
  421. return sprintf(buf, "powersave\n");
  422. else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
  423. return sprintf(buf, "performance\n");
  424. else if (policy->governor)
  425. return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
  426. policy->governor->name);
  427. return -EINVAL;
  428. }
  429. /**
  430. * store_scaling_governor - store policy for the specified CPU
  431. */
  432. static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
  433. const char *buf, size_t count)
  434. {
  435. unsigned int ret = -EINVAL;
  436. char str_governor[16];
  437. struct cpufreq_policy new_policy;
  438. ret = cpufreq_get_policy(&new_policy, policy->cpu);
  439. if (ret)
  440. return ret;
  441. ret = sscanf(buf, "%15s", str_governor);
  442. if (ret != 1)
  443. return -EINVAL;
  444. if (cpufreq_parse_governor(str_governor, &new_policy.policy,
  445. &new_policy.governor))
  446. return -EINVAL;
  447. /* Do not use cpufreq_set_policy here or the user_policy.max
  448. will be wrongly overridden */
  449. ret = __cpufreq_set_policy(policy, &new_policy);
  450. policy->user_policy.policy = policy->policy;
  451. policy->user_policy.governor = policy->governor;
  452. if (ret)
  453. return ret;
  454. else
  455. return count;
  456. }
  457. /**
  458. * show_scaling_driver - show the cpufreq driver currently loaded
  459. */
  460. static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
  461. {
  462. return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
  463. }
  464. /**
  465. * show_scaling_available_governors - show the available CPUfreq governors
  466. */
  467. static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
  468. char *buf)
  469. {
  470. ssize_t i = 0;
  471. struct cpufreq_governor *t;
  472. if (!cpufreq_driver->target) {
  473. i += sprintf(buf, "performance powersave");
  474. goto out;
  475. }
  476. list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
  477. if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
  478. - (CPUFREQ_NAME_LEN + 2)))
  479. goto out;
  480. i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
  481. }
  482. out:
  483. i += sprintf(&buf[i], "\n");
  484. return i;
  485. }
  486. static ssize_t show_cpus(const struct cpumask *mask, char *buf)
  487. {
  488. ssize_t i = 0;
  489. unsigned int cpu;
  490. for_each_cpu(cpu, mask) {
  491. if (i)
  492. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
  493. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
  494. if (i >= (PAGE_SIZE - 5))
  495. break;
  496. }
  497. i += sprintf(&buf[i], "\n");
  498. return i;
  499. }
  500. /**
  501. * show_related_cpus - show the CPUs affected by each transition even if
  502. * hw coordination is in use
  503. */
  504. static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
  505. {
  506. if (cpumask_empty(policy->related_cpus))
  507. return show_cpus(policy->cpus, buf);
  508. return show_cpus(policy->related_cpus, buf);
  509. }
  510. /**
  511. * show_affected_cpus - show the CPUs affected by each transition
  512. */
  513. static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
  514. {
  515. return show_cpus(policy->cpus, buf);
  516. }
  517. static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
  518. const char *buf, size_t count)
  519. {
  520. unsigned int freq = 0;
  521. unsigned int ret;
  522. if (!policy->governor || !policy->governor->store_setspeed)
  523. return -EINVAL;
  524. ret = sscanf(buf, "%u", &freq);
  525. if (ret != 1)
  526. return -EINVAL;
  527. policy->governor->store_setspeed(policy, freq);
  528. return count;
  529. }
  530. static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
  531. {
  532. if (!policy->governor || !policy->governor->show_setspeed)
  533. return sprintf(buf, "<unsupported>\n");
  534. return policy->governor->show_setspeed(policy, buf);
  535. }
  536. #define define_one_ro(_name) \
  537. static struct freq_attr _name = \
  538. __ATTR(_name, 0444, show_##_name, NULL)
  539. #define define_one_ro0400(_name) \
  540. static struct freq_attr _name = \
  541. __ATTR(_name, 0400, show_##_name, NULL)
  542. #define define_one_rw(_name) \
  543. static struct freq_attr _name = \
  544. __ATTR(_name, 0644, show_##_name, store_##_name)
  545. define_one_ro0400(cpuinfo_cur_freq);
  546. define_one_ro(cpuinfo_min_freq);
  547. define_one_ro(cpuinfo_max_freq);
  548. define_one_ro(cpuinfo_transition_latency);
  549. define_one_ro(scaling_available_governors);
  550. define_one_ro(scaling_driver);
  551. define_one_ro(scaling_cur_freq);
  552. define_one_ro(related_cpus);
  553. define_one_ro(affected_cpus);
  554. define_one_rw(scaling_min_freq);
  555. define_one_rw(scaling_max_freq);
  556. define_one_rw(scaling_governor);
  557. define_one_rw(scaling_setspeed);
  558. static struct attribute *default_attrs[] = {
  559. &cpuinfo_min_freq.attr,
  560. &cpuinfo_max_freq.attr,
  561. &cpuinfo_transition_latency.attr,
  562. &scaling_min_freq.attr,
  563. &scaling_max_freq.attr,
  564. &affected_cpus.attr,
  565. &related_cpus.attr,
  566. &scaling_governor.attr,
  567. &scaling_driver.attr,
  568. &scaling_available_governors.attr,
  569. &scaling_setspeed.attr,
  570. NULL
  571. };
  572. #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
  573. #define to_attr(a) container_of(a, struct freq_attr, attr)
  574. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  575. {
  576. struct cpufreq_policy *policy = to_policy(kobj);
  577. struct freq_attr *fattr = to_attr(attr);
  578. ssize_t ret = -EINVAL;
  579. policy = cpufreq_cpu_get(policy->cpu);
  580. if (!policy)
  581. goto no_policy;
  582. if (lock_policy_rwsem_read(policy->cpu) < 0)
  583. goto fail;
  584. if (fattr->show)
  585. ret = fattr->show(policy, buf);
  586. else
  587. ret = -EIO;
  588. unlock_policy_rwsem_read(policy->cpu);
  589. fail:
  590. cpufreq_cpu_put(policy);
  591. no_policy:
  592. return ret;
  593. }
  594. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  595. const char *buf, size_t count)
  596. {
  597. struct cpufreq_policy *policy = to_policy(kobj);
  598. struct freq_attr *fattr = to_attr(attr);
  599. ssize_t ret = -EINVAL;
  600. policy = cpufreq_cpu_get(policy->cpu);
  601. if (!policy)
  602. goto no_policy;
  603. if (lock_policy_rwsem_write(policy->cpu) < 0)
  604. goto fail;
  605. if (fattr->store)
  606. ret = fattr->store(policy, buf, count);
  607. else
  608. ret = -EIO;
  609. unlock_policy_rwsem_write(policy->cpu);
  610. fail:
  611. cpufreq_cpu_put(policy);
  612. no_policy:
  613. return ret;
  614. }
  615. static void cpufreq_sysfs_release(struct kobject *kobj)
  616. {
  617. struct cpufreq_policy *policy = to_policy(kobj);
  618. dprintk("last reference is dropped\n");
  619. complete(&policy->kobj_unregister);
  620. }
  621. static struct sysfs_ops sysfs_ops = {
  622. .show = show,
  623. .store = store,
  624. };
  625. static struct kobj_type ktype_cpufreq = {
  626. .sysfs_ops = &sysfs_ops,
  627. .default_attrs = default_attrs,
  628. .release = cpufreq_sysfs_release,
  629. };
  630. /**
  631. * cpufreq_add_dev - add a CPU device
  632. *
  633. * Adds the cpufreq interface for a CPU device.
  634. *
  635. * The Oracle says: try running cpufreq registration/unregistration concurrently
  636. * with with cpu hotplugging and all hell will break loose. Tried to clean this
  637. * mess up, but more thorough testing is needed. - Mathieu
  638. */
  639. static int cpufreq_add_dev(struct sys_device *sys_dev)
  640. {
  641. unsigned int cpu = sys_dev->id;
  642. int ret = 0;
  643. struct cpufreq_policy new_policy;
  644. struct cpufreq_policy *policy;
  645. struct freq_attr **drv_attr;
  646. struct sys_device *cpu_sys_dev;
  647. unsigned long flags;
  648. unsigned int j;
  649. if (cpu_is_offline(cpu))
  650. return 0;
  651. cpufreq_debug_disable_ratelimit();
  652. dprintk("adding CPU %u\n", cpu);
  653. #ifdef CONFIG_SMP
  654. /* check whether a different CPU already registered this
  655. * CPU because it is in the same boat. */
  656. policy = cpufreq_cpu_get(cpu);
  657. if (unlikely(policy)) {
  658. cpufreq_cpu_put(policy);
  659. cpufreq_debug_enable_ratelimit();
  660. return 0;
  661. }
  662. #endif
  663. if (!try_module_get(cpufreq_driver->owner)) {
  664. ret = -EINVAL;
  665. goto module_out;
  666. }
  667. policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
  668. if (!policy) {
  669. ret = -ENOMEM;
  670. goto nomem_out;
  671. }
  672. if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
  673. ret = -ENOMEM;
  674. goto err_free_policy;
  675. }
  676. if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
  677. ret = -ENOMEM;
  678. goto err_free_cpumask;
  679. }
  680. policy->cpu = cpu;
  681. cpumask_copy(policy->cpus, cpumask_of(cpu));
  682. /* Initially set CPU itself as the policy_cpu */
  683. per_cpu(policy_cpu, cpu) = cpu;
  684. ret = (lock_policy_rwsem_write(cpu) < 0);
  685. WARN_ON(ret);
  686. init_completion(&policy->kobj_unregister);
  687. INIT_WORK(&policy->update, handle_update);
  688. /* Set governor before ->init, so that driver could check it */
  689. policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
  690. /* call driver. From then on the cpufreq must be able
  691. * to accept all calls to ->verify and ->setpolicy for this CPU
  692. */
  693. ret = cpufreq_driver->init(policy);
  694. if (ret) {
  695. dprintk("initialization failed\n");
  696. goto err_unlock_policy;
  697. }
  698. policy->user_policy.min = policy->min;
  699. policy->user_policy.max = policy->max;
  700. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  701. CPUFREQ_START, policy);
  702. #ifdef CONFIG_SMP
  703. #ifdef CONFIG_HOTPLUG_CPU
  704. if (per_cpu(cpufreq_cpu_governor, cpu)) {
  705. policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
  706. dprintk("Restoring governor %s for cpu %d\n",
  707. policy->governor->name, cpu);
  708. }
  709. #endif
  710. for_each_cpu(j, policy->cpus) {
  711. struct cpufreq_policy *managed_policy;
  712. if (cpu == j)
  713. continue;
  714. /* Check for existing affected CPUs.
  715. * They may not be aware of it due to CPU Hotplug.
  716. */
  717. managed_policy = cpufreq_cpu_get(j);
  718. if (unlikely(managed_policy)) {
  719. /* Set proper policy_cpu */
  720. unlock_policy_rwsem_write(cpu);
  721. per_cpu(policy_cpu, cpu) = managed_policy->cpu;
  722. if (lock_policy_rwsem_write(cpu) < 0) {
  723. /* Should not go through policy unlock path */
  724. if (cpufreq_driver->exit)
  725. cpufreq_driver->exit(policy);
  726. ret = -EBUSY;
  727. cpufreq_cpu_put(managed_policy);
  728. goto err_free_cpumask;
  729. }
  730. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  731. cpumask_copy(managed_policy->cpus, policy->cpus);
  732. per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
  733. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  734. dprintk("CPU already managed, adding link\n");
  735. ret = sysfs_create_link(&sys_dev->kobj,
  736. &managed_policy->kobj,
  737. "cpufreq");
  738. if (!ret)
  739. cpufreq_cpu_put(managed_policy);
  740. /*
  741. * Success. We only needed to be added to the mask.
  742. * Call driver->exit() because only the cpu parent of
  743. * the kobj needed to call init().
  744. */
  745. goto out_driver_exit; /* call driver->exit() */
  746. }
  747. }
  748. #endif
  749. memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
  750. /* prepare interface data */
  751. ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
  752. "cpufreq");
  753. if (ret)
  754. goto out_driver_exit;
  755. /* set up files for this cpu device */
  756. drv_attr = cpufreq_driver->attr;
  757. while ((drv_attr) && (*drv_attr)) {
  758. ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
  759. if (ret)
  760. goto err_out_kobj_put;
  761. drv_attr++;
  762. }
  763. if (cpufreq_driver->get) {
  764. ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
  765. if (ret)
  766. goto err_out_kobj_put;
  767. }
  768. if (cpufreq_driver->target) {
  769. ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
  770. if (ret)
  771. goto err_out_kobj_put;
  772. }
  773. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  774. for_each_cpu(j, policy->cpus) {
  775. per_cpu(cpufreq_cpu_data, j) = policy;
  776. per_cpu(policy_cpu, j) = policy->cpu;
  777. }
  778. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  779. /* symlink affected CPUs */
  780. for_each_cpu(j, policy->cpus) {
  781. struct cpufreq_policy *managed_policy;
  782. if (j == cpu)
  783. continue;
  784. if (!cpu_online(j))
  785. continue;
  786. dprintk("CPU %u already managed, adding link\n", j);
  787. managed_policy = cpufreq_cpu_get(cpu);
  788. cpu_sys_dev = get_cpu_sysdev(j);
  789. ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
  790. "cpufreq");
  791. if (ret) {
  792. cpufreq_cpu_put(managed_policy);
  793. goto err_out_unregister;
  794. }
  795. }
  796. policy->governor = NULL; /* to assure that the starting sequence is
  797. * run in cpufreq_set_policy */
  798. /* set default policy */
  799. ret = __cpufreq_set_policy(policy, &new_policy);
  800. policy->user_policy.policy = policy->policy;
  801. policy->user_policy.governor = policy->governor;
  802. if (ret) {
  803. dprintk("setting policy failed\n");
  804. goto err_out_unregister;
  805. }
  806. unlock_policy_rwsem_write(cpu);
  807. kobject_uevent(&policy->kobj, KOBJ_ADD);
  808. module_put(cpufreq_driver->owner);
  809. dprintk("initialization complete\n");
  810. cpufreq_debug_enable_ratelimit();
  811. return 0;
  812. err_out_unregister:
  813. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  814. for_each_cpu(j, policy->cpus)
  815. per_cpu(cpufreq_cpu_data, j) = NULL;
  816. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  817. err_out_kobj_put:
  818. kobject_put(&policy->kobj);
  819. wait_for_completion(&policy->kobj_unregister);
  820. out_driver_exit:
  821. if (cpufreq_driver->exit)
  822. cpufreq_driver->exit(policy);
  823. err_unlock_policy:
  824. unlock_policy_rwsem_write(cpu);
  825. err_free_cpumask:
  826. free_cpumask_var(policy->cpus);
  827. err_free_policy:
  828. kfree(policy);
  829. nomem_out:
  830. module_put(cpufreq_driver->owner);
  831. module_out:
  832. cpufreq_debug_enable_ratelimit();
  833. return ret;
  834. }
  835. /**
  836. * __cpufreq_remove_dev - remove a CPU device
  837. *
  838. * Removes the cpufreq interface for a CPU device.
  839. * Caller should already have policy_rwsem in write mode for this CPU.
  840. * This routine frees the rwsem before returning.
  841. */
  842. static int __cpufreq_remove_dev(struct sys_device *sys_dev)
  843. {
  844. unsigned int cpu = sys_dev->id;
  845. unsigned long flags;
  846. struct cpufreq_policy *data;
  847. #ifdef CONFIG_SMP
  848. struct sys_device *cpu_sys_dev;
  849. unsigned int j;
  850. #endif
  851. cpufreq_debug_disable_ratelimit();
  852. dprintk("unregistering CPU %u\n", cpu);
  853. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  854. data = per_cpu(cpufreq_cpu_data, cpu);
  855. if (!data) {
  856. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  857. cpufreq_debug_enable_ratelimit();
  858. unlock_policy_rwsem_write(cpu);
  859. return -EINVAL;
  860. }
  861. per_cpu(cpufreq_cpu_data, cpu) = NULL;
  862. #ifdef CONFIG_SMP
  863. /* if this isn't the CPU which is the parent of the kobj, we
  864. * only need to unlink, put and exit
  865. */
  866. if (unlikely(cpu != data->cpu)) {
  867. dprintk("removing link\n");
  868. cpumask_clear_cpu(cpu, data->cpus);
  869. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  870. sysfs_remove_link(&sys_dev->kobj, "cpufreq");
  871. cpufreq_cpu_put(data);
  872. cpufreq_debug_enable_ratelimit();
  873. unlock_policy_rwsem_write(cpu);
  874. return 0;
  875. }
  876. #endif
  877. #ifdef CONFIG_SMP
  878. #ifdef CONFIG_HOTPLUG_CPU
  879. per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
  880. #endif
  881. /* if we have other CPUs still registered, we need to unlink them,
  882. * or else wait_for_completion below will lock up. Clean the
  883. * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
  884. * the sysfs links afterwards.
  885. */
  886. if (unlikely(cpumask_weight(data->cpus) > 1)) {
  887. for_each_cpu(j, data->cpus) {
  888. if (j == cpu)
  889. continue;
  890. per_cpu(cpufreq_cpu_data, j) = NULL;
  891. }
  892. }
  893. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  894. if (unlikely(cpumask_weight(data->cpus) > 1)) {
  895. for_each_cpu(j, data->cpus) {
  896. if (j == cpu)
  897. continue;
  898. dprintk("removing link for cpu %u\n", j);
  899. #ifdef CONFIG_HOTPLUG_CPU
  900. per_cpu(cpufreq_cpu_governor, j) = data->governor;
  901. #endif
  902. cpu_sys_dev = get_cpu_sysdev(j);
  903. sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
  904. cpufreq_cpu_put(data);
  905. }
  906. }
  907. #else
  908. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  909. #endif
  910. if (cpufreq_driver->target)
  911. __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  912. kobject_put(&data->kobj);
  913. /* we need to make sure that the underlying kobj is actually
  914. * not referenced anymore by anybody before we proceed with
  915. * unloading.
  916. */
  917. dprintk("waiting for dropping of refcount\n");
  918. wait_for_completion(&data->kobj_unregister);
  919. dprintk("wait complete\n");
  920. if (cpufreq_driver->exit)
  921. cpufreq_driver->exit(data);
  922. unlock_policy_rwsem_write(cpu);
  923. free_cpumask_var(data->related_cpus);
  924. free_cpumask_var(data->cpus);
  925. kfree(data);
  926. per_cpu(cpufreq_cpu_data, cpu) = NULL;
  927. cpufreq_debug_enable_ratelimit();
  928. return 0;
  929. }
  930. static int cpufreq_remove_dev(struct sys_device *sys_dev)
  931. {
  932. unsigned int cpu = sys_dev->id;
  933. int retval;
  934. if (cpu_is_offline(cpu))
  935. return 0;
  936. if (unlikely(lock_policy_rwsem_write(cpu)))
  937. BUG();
  938. retval = __cpufreq_remove_dev(sys_dev);
  939. return retval;
  940. }
  941. static void handle_update(struct work_struct *work)
  942. {
  943. struct cpufreq_policy *policy =
  944. container_of(work, struct cpufreq_policy, update);
  945. unsigned int cpu = policy->cpu;
  946. dprintk("handle_update for cpu %u called\n", cpu);
  947. cpufreq_update_policy(cpu);
  948. }
  949. /**
  950. * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
  951. * @cpu: cpu number
  952. * @old_freq: CPU frequency the kernel thinks the CPU runs at
  953. * @new_freq: CPU frequency the CPU actually runs at
  954. *
  955. * We adjust to current frequency first, and need to clean up later.
  956. * So either call to cpufreq_update_policy() or schedule handle_update()).
  957. */
  958. static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
  959. unsigned int new_freq)
  960. {
  961. struct cpufreq_freqs freqs;
  962. dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
  963. "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
  964. freqs.cpu = cpu;
  965. freqs.old = old_freq;
  966. freqs.new = new_freq;
  967. cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
  968. cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
  969. }
  970. /**
  971. * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
  972. * @cpu: CPU number
  973. *
  974. * This is the last known freq, without actually getting it from the driver.
  975. * Return value will be same as what is shown in scaling_cur_freq in sysfs.
  976. */
  977. unsigned int cpufreq_quick_get(unsigned int cpu)
  978. {
  979. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  980. unsigned int ret_freq = 0;
  981. if (policy) {
  982. ret_freq = policy->cur;
  983. cpufreq_cpu_put(policy);
  984. }
  985. return ret_freq;
  986. }
  987. EXPORT_SYMBOL(cpufreq_quick_get);
  988. static unsigned int __cpufreq_get(unsigned int cpu)
  989. {
  990. struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
  991. unsigned int ret_freq = 0;
  992. if (!cpufreq_driver->get)
  993. return ret_freq;
  994. ret_freq = cpufreq_driver->get(cpu);
  995. if (ret_freq && policy->cur &&
  996. !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  997. /* verify no discrepancy between actual and
  998. saved value exists */
  999. if (unlikely(ret_freq != policy->cur)) {
  1000. cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
  1001. schedule_work(&policy->update);
  1002. }
  1003. }
  1004. return ret_freq;
  1005. }
  1006. /**
  1007. * cpufreq_get - get the current CPU frequency (in kHz)
  1008. * @cpu: CPU number
  1009. *
  1010. * Get the CPU current (static) CPU frequency
  1011. */
  1012. unsigned int cpufreq_get(unsigned int cpu)
  1013. {
  1014. unsigned int ret_freq = 0;
  1015. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1016. if (!policy)
  1017. goto out;
  1018. if (unlikely(lock_policy_rwsem_read(cpu)))
  1019. goto out_policy;
  1020. ret_freq = __cpufreq_get(cpu);
  1021. unlock_policy_rwsem_read(cpu);
  1022. out_policy:
  1023. cpufreq_cpu_put(policy);
  1024. out:
  1025. return ret_freq;
  1026. }
  1027. EXPORT_SYMBOL(cpufreq_get);
  1028. /**
  1029. * cpufreq_suspend - let the low level driver prepare for suspend
  1030. */
  1031. static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
  1032. {
  1033. int cpu = sysdev->id;
  1034. int ret = 0;
  1035. unsigned int cur_freq = 0;
  1036. struct cpufreq_policy *cpu_policy;
  1037. dprintk("suspending cpu %u\n", cpu);
  1038. if (!cpu_online(cpu))
  1039. return 0;
  1040. /* we may be lax here as interrupts are off. Nonetheless
  1041. * we need to grab the correct cpu policy, as to check
  1042. * whether we really run on this CPU.
  1043. */
  1044. cpu_policy = cpufreq_cpu_get(cpu);
  1045. if (!cpu_policy)
  1046. return -EINVAL;
  1047. /* only handle each CPU group once */
  1048. if (unlikely(cpu_policy->cpu != cpu))
  1049. goto out;
  1050. if (cpufreq_driver->suspend) {
  1051. ret = cpufreq_driver->suspend(cpu_policy, pmsg);
  1052. if (ret) {
  1053. printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
  1054. "step on CPU %u\n", cpu_policy->cpu);
  1055. goto out;
  1056. }
  1057. }
  1058. if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
  1059. goto out;
  1060. if (cpufreq_driver->get)
  1061. cur_freq = cpufreq_driver->get(cpu_policy->cpu);
  1062. if (!cur_freq || !cpu_policy->cur) {
  1063. printk(KERN_ERR "cpufreq: suspend failed to assert current "
  1064. "frequency is what timing core thinks it is.\n");
  1065. goto out;
  1066. }
  1067. if (unlikely(cur_freq != cpu_policy->cur)) {
  1068. struct cpufreq_freqs freqs;
  1069. if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
  1070. dprintk("Warning: CPU frequency is %u, "
  1071. "cpufreq assumed %u kHz.\n",
  1072. cur_freq, cpu_policy->cur);
  1073. freqs.cpu = cpu;
  1074. freqs.old = cpu_policy->cur;
  1075. freqs.new = cur_freq;
  1076. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  1077. CPUFREQ_SUSPENDCHANGE, &freqs);
  1078. adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
  1079. cpu_policy->cur = cur_freq;
  1080. }
  1081. out:
  1082. cpufreq_cpu_put(cpu_policy);
  1083. return ret;
  1084. }
  1085. /**
  1086. * cpufreq_resume - restore proper CPU frequency handling after resume
  1087. *
  1088. * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
  1089. * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
  1090. * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
  1091. * restored.
  1092. */
  1093. static int cpufreq_resume(struct sys_device *sysdev)
  1094. {
  1095. int cpu = sysdev->id;
  1096. int ret = 0;
  1097. struct cpufreq_policy *cpu_policy;
  1098. dprintk("resuming cpu %u\n", cpu);
  1099. if (!cpu_online(cpu))
  1100. return 0;
  1101. /* we may be lax here as interrupts are off. Nonetheless
  1102. * we need to grab the correct cpu policy, as to check
  1103. * whether we really run on this CPU.
  1104. */
  1105. cpu_policy = cpufreq_cpu_get(cpu);
  1106. if (!cpu_policy)
  1107. return -EINVAL;
  1108. /* only handle each CPU group once */
  1109. if (unlikely(cpu_policy->cpu != cpu))
  1110. goto fail;
  1111. if (cpufreq_driver->resume) {
  1112. ret = cpufreq_driver->resume(cpu_policy);
  1113. if (ret) {
  1114. printk(KERN_ERR "cpufreq: resume failed in ->resume "
  1115. "step on CPU %u\n", cpu_policy->cpu);
  1116. goto fail;
  1117. }
  1118. }
  1119. if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  1120. unsigned int cur_freq = 0;
  1121. if (cpufreq_driver->get)
  1122. cur_freq = cpufreq_driver->get(cpu_policy->cpu);
  1123. if (!cur_freq || !cpu_policy->cur) {
  1124. printk(KERN_ERR "cpufreq: resume failed to assert "
  1125. "current frequency is what timing core "
  1126. "thinks it is.\n");
  1127. goto out;
  1128. }
  1129. if (unlikely(cur_freq != cpu_policy->cur)) {
  1130. struct cpufreq_freqs freqs;
  1131. if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
  1132. dprintk("Warning: CPU frequency "
  1133. "is %u, cpufreq assumed %u kHz.\n",
  1134. cur_freq, cpu_policy->cur);
  1135. freqs.cpu = cpu;
  1136. freqs.old = cpu_policy->cur;
  1137. freqs.new = cur_freq;
  1138. srcu_notifier_call_chain(
  1139. &cpufreq_transition_notifier_list,
  1140. CPUFREQ_RESUMECHANGE, &freqs);
  1141. adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
  1142. cpu_policy->cur = cur_freq;
  1143. }
  1144. }
  1145. out:
  1146. schedule_work(&cpu_policy->update);
  1147. fail:
  1148. cpufreq_cpu_put(cpu_policy);
  1149. return ret;
  1150. }
  1151. static struct sysdev_driver cpufreq_sysdev_driver = {
  1152. .add = cpufreq_add_dev,
  1153. .remove = cpufreq_remove_dev,
  1154. .suspend = cpufreq_suspend,
  1155. .resume = cpufreq_resume,
  1156. };
  1157. /*********************************************************************
  1158. * NOTIFIER LISTS INTERFACE *
  1159. *********************************************************************/
  1160. /**
  1161. * cpufreq_register_notifier - register a driver with cpufreq
  1162. * @nb: notifier function to register
  1163. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1164. *
  1165. * Add a driver to one of two lists: either a list of drivers that
  1166. * are notified about clock rate changes (once before and once after
  1167. * the transition), or a list of drivers that are notified about
  1168. * changes in cpufreq policy.
  1169. *
  1170. * This function may sleep, and has the same return conditions as
  1171. * blocking_notifier_chain_register.
  1172. */
  1173. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
  1174. {
  1175. int ret;
  1176. WARN_ON(!init_cpufreq_transition_notifier_list_called);
  1177. switch (list) {
  1178. case CPUFREQ_TRANSITION_NOTIFIER:
  1179. ret = srcu_notifier_chain_register(
  1180. &cpufreq_transition_notifier_list, nb);
  1181. break;
  1182. case CPUFREQ_POLICY_NOTIFIER:
  1183. ret = blocking_notifier_chain_register(
  1184. &cpufreq_policy_notifier_list, nb);
  1185. break;
  1186. default:
  1187. ret = -EINVAL;
  1188. }
  1189. return ret;
  1190. }
  1191. EXPORT_SYMBOL(cpufreq_register_notifier);
  1192. /**
  1193. * cpufreq_unregister_notifier - unregister a driver with cpufreq
  1194. * @nb: notifier block to be unregistered
  1195. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1196. *
  1197. * Remove a driver from the CPU frequency notifier list.
  1198. *
  1199. * This function may sleep, and has the same return conditions as
  1200. * blocking_notifier_chain_unregister.
  1201. */
  1202. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
  1203. {
  1204. int ret;
  1205. switch (list) {
  1206. case CPUFREQ_TRANSITION_NOTIFIER:
  1207. ret = srcu_notifier_chain_unregister(
  1208. &cpufreq_transition_notifier_list, nb);
  1209. break;
  1210. case CPUFREQ_POLICY_NOTIFIER:
  1211. ret = blocking_notifier_chain_unregister(
  1212. &cpufreq_policy_notifier_list, nb);
  1213. break;
  1214. default:
  1215. ret = -EINVAL;
  1216. }
  1217. return ret;
  1218. }
  1219. EXPORT_SYMBOL(cpufreq_unregister_notifier);
  1220. /*********************************************************************
  1221. * GOVERNORS *
  1222. *********************************************************************/
  1223. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  1224. unsigned int target_freq,
  1225. unsigned int relation)
  1226. {
  1227. int retval = -EINVAL;
  1228. dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
  1229. target_freq, relation);
  1230. if (cpu_online(policy->cpu) && cpufreq_driver->target)
  1231. retval = cpufreq_driver->target(policy, target_freq, relation);
  1232. return retval;
  1233. }
  1234. EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
  1235. int cpufreq_driver_target(struct cpufreq_policy *policy,
  1236. unsigned int target_freq,
  1237. unsigned int relation)
  1238. {
  1239. int ret = -EINVAL;
  1240. policy = cpufreq_cpu_get(policy->cpu);
  1241. if (!policy)
  1242. goto no_policy;
  1243. if (unlikely(lock_policy_rwsem_write(policy->cpu)))
  1244. goto fail;
  1245. ret = __cpufreq_driver_target(policy, target_freq, relation);
  1246. unlock_policy_rwsem_write(policy->cpu);
  1247. fail:
  1248. cpufreq_cpu_put(policy);
  1249. no_policy:
  1250. return ret;
  1251. }
  1252. EXPORT_SYMBOL_GPL(cpufreq_driver_target);
  1253. int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
  1254. {
  1255. int ret = 0;
  1256. policy = cpufreq_cpu_get(policy->cpu);
  1257. if (!policy)
  1258. return -EINVAL;
  1259. if (cpu_online(cpu) && cpufreq_driver->getavg)
  1260. ret = cpufreq_driver->getavg(policy, cpu);
  1261. cpufreq_cpu_put(policy);
  1262. return ret;
  1263. }
  1264. EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
  1265. /*
  1266. * when "event" is CPUFREQ_GOV_LIMITS
  1267. */
  1268. static int __cpufreq_governor(struct cpufreq_policy *policy,
  1269. unsigned int event)
  1270. {
  1271. int ret;
  1272. /* Only must be defined when default governor is known to have latency
  1273. restrictions, like e.g. conservative or ondemand.
  1274. That this is the case is already ensured in Kconfig
  1275. */
  1276. #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
  1277. struct cpufreq_governor *gov = &cpufreq_gov_performance;
  1278. #else
  1279. struct cpufreq_governor *gov = NULL;
  1280. #endif
  1281. if (policy->governor->max_transition_latency &&
  1282. policy->cpuinfo.transition_latency >
  1283. policy->governor->max_transition_latency) {
  1284. if (!gov)
  1285. return -EINVAL;
  1286. else {
  1287. printk(KERN_WARNING "%s governor failed, too long"
  1288. " transition latency of HW, fallback"
  1289. " to %s governor\n",
  1290. policy->governor->name,
  1291. gov->name);
  1292. policy->governor = gov;
  1293. }
  1294. }
  1295. if (!try_module_get(policy->governor->owner))
  1296. return -EINVAL;
  1297. dprintk("__cpufreq_governor for CPU %u, event %u\n",
  1298. policy->cpu, event);
  1299. ret = policy->governor->governor(policy, event);
  1300. /* we keep one module reference alive for
  1301. each CPU governed by this CPU */
  1302. if ((event != CPUFREQ_GOV_START) || ret)
  1303. module_put(policy->governor->owner);
  1304. if ((event == CPUFREQ_GOV_STOP) && !ret)
  1305. module_put(policy->governor->owner);
  1306. return ret;
  1307. }
  1308. int cpufreq_register_governor(struct cpufreq_governor *governor)
  1309. {
  1310. int err;
  1311. if (!governor)
  1312. return -EINVAL;
  1313. mutex_lock(&cpufreq_governor_mutex);
  1314. err = -EBUSY;
  1315. if (__find_governor(governor->name) == NULL) {
  1316. err = 0;
  1317. list_add(&governor->governor_list, &cpufreq_governor_list);
  1318. }
  1319. mutex_unlock(&cpufreq_governor_mutex);
  1320. return err;
  1321. }
  1322. EXPORT_SYMBOL_GPL(cpufreq_register_governor);
  1323. void cpufreq_unregister_governor(struct cpufreq_governor *governor)
  1324. {
  1325. if (!governor)
  1326. return;
  1327. mutex_lock(&cpufreq_governor_mutex);
  1328. list_del(&governor->governor_list);
  1329. mutex_unlock(&cpufreq_governor_mutex);
  1330. return;
  1331. }
  1332. EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
  1333. /*********************************************************************
  1334. * POLICY INTERFACE *
  1335. *********************************************************************/
  1336. /**
  1337. * cpufreq_get_policy - get the current cpufreq_policy
  1338. * @policy: struct cpufreq_policy into which the current cpufreq_policy
  1339. * is written
  1340. *
  1341. * Reads the current cpufreq policy.
  1342. */
  1343. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
  1344. {
  1345. struct cpufreq_policy *cpu_policy;
  1346. if (!policy)
  1347. return -EINVAL;
  1348. cpu_policy = cpufreq_cpu_get(cpu);
  1349. if (!cpu_policy)
  1350. return -EINVAL;
  1351. memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
  1352. cpufreq_cpu_put(cpu_policy);
  1353. return 0;
  1354. }
  1355. EXPORT_SYMBOL(cpufreq_get_policy);
  1356. /*
  1357. * data : current policy.
  1358. * policy : policy to be set.
  1359. */
  1360. static int __cpufreq_set_policy(struct cpufreq_policy *data,
  1361. struct cpufreq_policy *policy)
  1362. {
  1363. int ret = 0;
  1364. cpufreq_debug_disable_ratelimit();
  1365. dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
  1366. policy->min, policy->max);
  1367. memcpy(&policy->cpuinfo, &data->cpuinfo,
  1368. sizeof(struct cpufreq_cpuinfo));
  1369. if (policy->min > data->max || policy->max < data->min) {
  1370. ret = -EINVAL;
  1371. goto error_out;
  1372. }
  1373. /* verify the cpu speed can be set within this limit */
  1374. ret = cpufreq_driver->verify(policy);
  1375. if (ret)
  1376. goto error_out;
  1377. /* adjust if necessary - all reasons */
  1378. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1379. CPUFREQ_ADJUST, policy);
  1380. /* adjust if necessary - hardware incompatibility*/
  1381. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1382. CPUFREQ_INCOMPATIBLE, policy);
  1383. /* verify the cpu speed can be set within this limit,
  1384. which might be different to the first one */
  1385. ret = cpufreq_driver->verify(policy);
  1386. if (ret)
  1387. goto error_out;
  1388. /* notification of the new policy */
  1389. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1390. CPUFREQ_NOTIFY, policy);
  1391. data->min = policy->min;
  1392. data->max = policy->max;
  1393. dprintk("new min and max freqs are %u - %u kHz\n",
  1394. data->min, data->max);
  1395. if (cpufreq_driver->setpolicy) {
  1396. data->policy = policy->policy;
  1397. dprintk("setting range\n");
  1398. ret = cpufreq_driver->setpolicy(policy);
  1399. } else {
  1400. if (policy->governor != data->governor) {
  1401. /* save old, working values */
  1402. struct cpufreq_governor *old_gov = data->governor;
  1403. dprintk("governor switch\n");
  1404. /* end old governor */
  1405. if (data->governor)
  1406. __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  1407. /* start new governor */
  1408. data->governor = policy->governor;
  1409. if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
  1410. /* new governor failed, so re-start old one */
  1411. dprintk("starting governor %s failed\n",
  1412. data->governor->name);
  1413. if (old_gov) {
  1414. data->governor = old_gov;
  1415. __cpufreq_governor(data,
  1416. CPUFREQ_GOV_START);
  1417. }
  1418. ret = -EINVAL;
  1419. goto error_out;
  1420. }
  1421. /* might be a policy change, too, so fall through */
  1422. }
  1423. dprintk("governor: change or update limits\n");
  1424. __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
  1425. }
  1426. error_out:
  1427. cpufreq_debug_enable_ratelimit();
  1428. return ret;
  1429. }
  1430. /**
  1431. * cpufreq_update_policy - re-evaluate an existing cpufreq policy
  1432. * @cpu: CPU which shall be re-evaluated
  1433. *
  1434. * Usefull for policy notifiers which have different necessities
  1435. * at different times.
  1436. */
  1437. int cpufreq_update_policy(unsigned int cpu)
  1438. {
  1439. struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
  1440. struct cpufreq_policy policy;
  1441. int ret;
  1442. if (!data) {
  1443. ret = -ENODEV;
  1444. goto no_policy;
  1445. }
  1446. if (unlikely(lock_policy_rwsem_write(cpu))) {
  1447. ret = -EINVAL;
  1448. goto fail;
  1449. }
  1450. dprintk("updating policy for CPU %u\n", cpu);
  1451. memcpy(&policy, data, sizeof(struct cpufreq_policy));
  1452. policy.min = data->user_policy.min;
  1453. policy.max = data->user_policy.max;
  1454. policy.policy = data->user_policy.policy;
  1455. policy.governor = data->user_policy.governor;
  1456. /* BIOS might change freq behind our back
  1457. -> ask driver for current freq and notify governors about a change */
  1458. if (cpufreq_driver->get) {
  1459. policy.cur = cpufreq_driver->get(cpu);
  1460. if (!data->cur) {
  1461. dprintk("Driver did not initialize current freq");
  1462. data->cur = policy.cur;
  1463. } else {
  1464. if (data->cur != policy.cur)
  1465. cpufreq_out_of_sync(cpu, data->cur,
  1466. policy.cur);
  1467. }
  1468. }
  1469. ret = __cpufreq_set_policy(data, &policy);
  1470. unlock_policy_rwsem_write(cpu);
  1471. fail:
  1472. cpufreq_cpu_put(data);
  1473. no_policy:
  1474. return ret;
  1475. }
  1476. EXPORT_SYMBOL(cpufreq_update_policy);
  1477. static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
  1478. unsigned long action, void *hcpu)
  1479. {
  1480. unsigned int cpu = (unsigned long)hcpu;
  1481. struct sys_device *sys_dev;
  1482. sys_dev = get_cpu_sysdev(cpu);
  1483. if (sys_dev) {
  1484. switch (action) {
  1485. case CPU_ONLINE:
  1486. case CPU_ONLINE_FROZEN:
  1487. cpufreq_add_dev(sys_dev);
  1488. break;
  1489. case CPU_DOWN_PREPARE:
  1490. case CPU_DOWN_PREPARE_FROZEN:
  1491. if (unlikely(lock_policy_rwsem_write(cpu)))
  1492. BUG();
  1493. __cpufreq_remove_dev(sys_dev);
  1494. break;
  1495. case CPU_DOWN_FAILED:
  1496. case CPU_DOWN_FAILED_FROZEN:
  1497. cpufreq_add_dev(sys_dev);
  1498. break;
  1499. }
  1500. }
  1501. return NOTIFY_OK;
  1502. }
  1503. static struct notifier_block __refdata cpufreq_cpu_notifier =
  1504. {
  1505. .notifier_call = cpufreq_cpu_callback,
  1506. };
  1507. /*********************************************************************
  1508. * REGISTER / UNREGISTER CPUFREQ DRIVER *
  1509. *********************************************************************/
  1510. /**
  1511. * cpufreq_register_driver - register a CPU Frequency driver
  1512. * @driver_data: A struct cpufreq_driver containing the values#
  1513. * submitted by the CPU Frequency driver.
  1514. *
  1515. * Registers a CPU Frequency driver to this core code. This code
  1516. * returns zero on success, -EBUSY when another driver got here first
  1517. * (and isn't unregistered in the meantime).
  1518. *
  1519. */
  1520. int cpufreq_register_driver(struct cpufreq_driver *driver_data)
  1521. {
  1522. unsigned long flags;
  1523. int ret;
  1524. if (!driver_data || !driver_data->verify || !driver_data->init ||
  1525. ((!driver_data->setpolicy) && (!driver_data->target)))
  1526. return -EINVAL;
  1527. dprintk("trying to register driver %s\n", driver_data->name);
  1528. if (driver_data->setpolicy)
  1529. driver_data->flags |= CPUFREQ_CONST_LOOPS;
  1530. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1531. if (cpufreq_driver) {
  1532. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1533. return -EBUSY;
  1534. }
  1535. cpufreq_driver = driver_data;
  1536. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1537. ret = sysdev_driver_register(&cpu_sysdev_class,
  1538. &cpufreq_sysdev_driver);
  1539. if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
  1540. int i;
  1541. ret = -ENODEV;
  1542. /* check for at least one working CPU */
  1543. for (i = 0; i < nr_cpu_ids; i++)
  1544. if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
  1545. ret = 0;
  1546. break;
  1547. }
  1548. /* if all ->init() calls failed, unregister */
  1549. if (ret) {
  1550. dprintk("no CPU initialized for driver %s\n",
  1551. driver_data->name);
  1552. sysdev_driver_unregister(&cpu_sysdev_class,
  1553. &cpufreq_sysdev_driver);
  1554. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1555. cpufreq_driver = NULL;
  1556. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1557. }
  1558. }
  1559. if (!ret) {
  1560. register_hotcpu_notifier(&cpufreq_cpu_notifier);
  1561. dprintk("driver %s up and running\n", driver_data->name);
  1562. cpufreq_debug_enable_ratelimit();
  1563. }
  1564. return ret;
  1565. }
  1566. EXPORT_SYMBOL_GPL(cpufreq_register_driver);
  1567. /**
  1568. * cpufreq_unregister_driver - unregister the current CPUFreq driver
  1569. *
  1570. * Unregister the current CPUFreq driver. Only call this if you have
  1571. * the right to do so, i.e. if you have succeeded in initialising before!
  1572. * Returns zero if successful, and -EINVAL if the cpufreq_driver is
  1573. * currently not initialised.
  1574. */
  1575. int cpufreq_unregister_driver(struct cpufreq_driver *driver)
  1576. {
  1577. unsigned long flags;
  1578. cpufreq_debug_disable_ratelimit();
  1579. if (!cpufreq_driver || (driver != cpufreq_driver)) {
  1580. cpufreq_debug_enable_ratelimit();
  1581. return -EINVAL;
  1582. }
  1583. dprintk("unregistering driver %s\n", driver->name);
  1584. sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
  1585. unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
  1586. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1587. cpufreq_driver = NULL;
  1588. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1589. return 0;
  1590. }
  1591. EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
  1592. static int __init cpufreq_core_init(void)
  1593. {
  1594. int cpu;
  1595. for_each_possible_cpu(cpu) {
  1596. per_cpu(policy_cpu, cpu) = -1;
  1597. init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
  1598. }
  1599. return 0;
  1600. }
  1601. core_initcall(cpufreq_core_init);