cpufreq.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854
  1. /*
  2. * linux/drivers/cpufreq/cpufreq.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  6. *
  7. * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  8. * Added handling for CPU hotplug
  9. * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  10. * Fix handling for CPU hotplug -- affected CPUs
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/notifier.h>
  21. #include <linux/cpufreq.h>
  22. #include <linux/delay.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/device.h>
  26. #include <linux/slab.h>
  27. #include <linux/cpu.h>
  28. #include <linux/completion.h>
  29. #include <linux/mutex.h>
  30. #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
  31. "cpufreq-core", msg)
  32. /**
  33. * The "cpufreq driver" - the arch- or hardware-dependent low
  34. * level driver of CPUFreq support, and its spinlock. This lock
  35. * also protects the cpufreq_cpu_data array.
  36. */
  37. static struct cpufreq_driver *cpufreq_driver;
  38. static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
  39. #ifdef CONFIG_HOTPLUG_CPU
  40. /* This one keeps track of the previously set governor of a removed CPU */
  41. static struct cpufreq_governor *cpufreq_cpu_governor[NR_CPUS];
  42. #endif
  43. static DEFINE_SPINLOCK(cpufreq_driver_lock);
  44. /*
  45. * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
  46. * all cpufreq/hotplug/workqueue/etc related lock issues.
  47. *
  48. * The rules for this semaphore:
  49. * - Any routine that wants to read from the policy structure will
  50. * do a down_read on this semaphore.
  51. * - Any routine that will write to the policy structure and/or may take away
  52. * the policy altogether (eg. CPU hotplug), will hold this lock in write
  53. * mode before doing so.
  54. *
  55. * Additional rules:
  56. * - All holders of the lock should check to make sure that the CPU they
  57. * are concerned with are online after they get the lock.
  58. * - Governor routines that can be called in cpufreq hotplug path should not
  59. * take this sem as top level hotplug notifier handler takes this.
  60. */
  61. static DEFINE_PER_CPU(int, policy_cpu);
  62. static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
  63. #define lock_policy_rwsem(mode, cpu) \
  64. int lock_policy_rwsem_##mode \
  65. (int cpu) \
  66. { \
  67. int policy_cpu = per_cpu(policy_cpu, cpu); \
  68. BUG_ON(policy_cpu == -1); \
  69. down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
  70. if (unlikely(!cpu_online(cpu))) { \
  71. up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
  72. return -1; \
  73. } \
  74. \
  75. return 0; \
  76. }
  77. lock_policy_rwsem(read, cpu);
  78. EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
  79. lock_policy_rwsem(write, cpu);
  80. EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
  81. void unlock_policy_rwsem_read(int cpu)
  82. {
  83. int policy_cpu = per_cpu(policy_cpu, cpu);
  84. BUG_ON(policy_cpu == -1);
  85. up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
  86. }
  87. EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
  88. void unlock_policy_rwsem_write(int cpu)
  89. {
  90. int policy_cpu = per_cpu(policy_cpu, cpu);
  91. BUG_ON(policy_cpu == -1);
  92. up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
  93. }
  94. EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
  95. /* internal prototypes */
  96. static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
  97. static unsigned int __cpufreq_get(unsigned int cpu);
  98. static void handle_update(struct work_struct *work);
  99. /**
  100. * Two notifier lists: the "policy" list is involved in the
  101. * validation process for a new CPU frequency policy; the
  102. * "transition" list for kernel code that needs to handle
  103. * changes to devices when the CPU clock speed changes.
  104. * The mutex locks both lists.
  105. */
  106. static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  107. static struct srcu_notifier_head cpufreq_transition_notifier_list;
  108. static int __init init_cpufreq_transition_notifier_list(void)
  109. {
  110. srcu_init_notifier_head(&cpufreq_transition_notifier_list);
  111. return 0;
  112. }
  113. pure_initcall(init_cpufreq_transition_notifier_list);
  114. static LIST_HEAD(cpufreq_governor_list);
  115. static DEFINE_MUTEX (cpufreq_governor_mutex);
  116. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  117. {
  118. struct cpufreq_policy *data;
  119. unsigned long flags;
  120. if (cpu >= NR_CPUS)
  121. goto err_out;
  122. /* get the cpufreq driver */
  123. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  124. if (!cpufreq_driver)
  125. goto err_out_unlock;
  126. if (!try_module_get(cpufreq_driver->owner))
  127. goto err_out_unlock;
  128. /* get the CPU */
  129. data = cpufreq_cpu_data[cpu];
  130. if (!data)
  131. goto err_out_put_module;
  132. if (!kobject_get(&data->kobj))
  133. goto err_out_put_module;
  134. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  135. return data;
  136. err_out_put_module:
  137. module_put(cpufreq_driver->owner);
  138. err_out_unlock:
  139. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  140. err_out:
  141. return NULL;
  142. }
  143. EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
  144. void cpufreq_cpu_put(struct cpufreq_policy *data)
  145. {
  146. kobject_put(&data->kobj);
  147. module_put(cpufreq_driver->owner);
  148. }
  149. EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
  150. /*********************************************************************
  151. * UNIFIED DEBUG HELPERS *
  152. *********************************************************************/
  153. #ifdef CONFIG_CPU_FREQ_DEBUG
  154. /* what part(s) of the CPUfreq subsystem are debugged? */
  155. static unsigned int debug;
  156. /* is the debug output ratelimit'ed using printk_ratelimit? User can
  157. * set or modify this value.
  158. */
  159. static unsigned int debug_ratelimit = 1;
  160. /* is the printk_ratelimit'ing enabled? It's enabled after a successful
  161. * loading of a cpufreq driver, temporarily disabled when a new policy
  162. * is set, and disabled upon cpufreq driver removal
  163. */
  164. static unsigned int disable_ratelimit = 1;
  165. static DEFINE_SPINLOCK(disable_ratelimit_lock);
  166. static void cpufreq_debug_enable_ratelimit(void)
  167. {
  168. unsigned long flags;
  169. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  170. if (disable_ratelimit)
  171. disable_ratelimit--;
  172. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  173. }
  174. static void cpufreq_debug_disable_ratelimit(void)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  178. disable_ratelimit++;
  179. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  180. }
  181. void cpufreq_debug_printk(unsigned int type, const char *prefix,
  182. const char *fmt, ...)
  183. {
  184. char s[256];
  185. va_list args;
  186. unsigned int len;
  187. unsigned long flags;
  188. WARN_ON(!prefix);
  189. if (type & debug) {
  190. spin_lock_irqsave(&disable_ratelimit_lock, flags);
  191. if (!disable_ratelimit && debug_ratelimit
  192. && !printk_ratelimit()) {
  193. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  194. return;
  195. }
  196. spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
  197. len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
  198. va_start(args, fmt);
  199. len += vsnprintf(&s[len], (256 - len), fmt, args);
  200. va_end(args);
  201. printk(s);
  202. WARN_ON(len < 5);
  203. }
  204. }
  205. EXPORT_SYMBOL(cpufreq_debug_printk);
  206. module_param(debug, uint, 0644);
  207. MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
  208. " 2 to debug drivers, and 4 to debug governors.");
  209. module_param(debug_ratelimit, uint, 0644);
  210. MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
  211. " set to 0 to disable ratelimiting.");
  212. #else /* !CONFIG_CPU_FREQ_DEBUG */
  213. static inline void cpufreq_debug_enable_ratelimit(void) { return; }
  214. static inline void cpufreq_debug_disable_ratelimit(void) { return; }
  215. #endif /* CONFIG_CPU_FREQ_DEBUG */
  216. /*********************************************************************
  217. * EXTERNALLY AFFECTING FREQUENCY CHANGES *
  218. *********************************************************************/
  219. /**
  220. * adjust_jiffies - adjust the system "loops_per_jiffy"
  221. *
  222. * This function alters the system "loops_per_jiffy" for the clock
  223. * speed change. Note that loops_per_jiffy cannot be updated on SMP
  224. * systems as each CPU might be scaled differently. So, use the arch
  225. * per-CPU loops_per_jiffy value wherever possible.
  226. */
  227. #ifndef CONFIG_SMP
  228. static unsigned long l_p_j_ref;
  229. static unsigned int l_p_j_ref_freq;
  230. static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  231. {
  232. if (ci->flags & CPUFREQ_CONST_LOOPS)
  233. return;
  234. if (!l_p_j_ref_freq) {
  235. l_p_j_ref = loops_per_jiffy;
  236. l_p_j_ref_freq = ci->old;
  237. dprintk("saving %lu as reference value for loops_per_jiffy;"
  238. "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
  239. }
  240. if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
  241. (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
  242. (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
  243. loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
  244. ci->new);
  245. dprintk("scaling loops_per_jiffy to %lu"
  246. "for frequency %u kHz\n", loops_per_jiffy, ci->new);
  247. }
  248. }
  249. #else
  250. static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  251. {
  252. return;
  253. }
  254. #endif
  255. /**
  256. * cpufreq_notify_transition - call notifier chain and adjust_jiffies
  257. * on frequency transition.
  258. *
  259. * This function calls the transition notifiers and the "adjust_jiffies"
  260. * function. It is called twice on all CPU frequency changes that have
  261. * external effects.
  262. */
  263. void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
  264. {
  265. struct cpufreq_policy *policy;
  266. BUG_ON(irqs_disabled());
  267. freqs->flags = cpufreq_driver->flags;
  268. dprintk("notification %u of frequency transition to %u kHz\n",
  269. state, freqs->new);
  270. policy = cpufreq_cpu_data[freqs->cpu];
  271. switch (state) {
  272. case CPUFREQ_PRECHANGE:
  273. /* detect if the driver reported a value as "old frequency"
  274. * which is not equal to what the cpufreq core thinks is
  275. * "old frequency".
  276. */
  277. if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  278. if ((policy) && (policy->cpu == freqs->cpu) &&
  279. (policy->cur) && (policy->cur != freqs->old)) {
  280. dprintk("Warning: CPU frequency is"
  281. " %u, cpufreq assumed %u kHz.\n",
  282. freqs->old, policy->cur);
  283. freqs->old = policy->cur;
  284. }
  285. }
  286. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  287. CPUFREQ_PRECHANGE, freqs);
  288. adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
  289. break;
  290. case CPUFREQ_POSTCHANGE:
  291. adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
  292. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  293. CPUFREQ_POSTCHANGE, freqs);
  294. if (likely(policy) && likely(policy->cpu == freqs->cpu))
  295. policy->cur = freqs->new;
  296. break;
  297. }
  298. }
  299. EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
  300. /*********************************************************************
  301. * SYSFS INTERFACE *
  302. *********************************************************************/
  303. static struct cpufreq_governor *__find_governor(const char *str_governor)
  304. {
  305. struct cpufreq_governor *t;
  306. list_for_each_entry(t, &cpufreq_governor_list, governor_list)
  307. if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
  308. return t;
  309. return NULL;
  310. }
  311. /**
  312. * cpufreq_parse_governor - parse a governor string
  313. */
  314. static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
  315. struct cpufreq_governor **governor)
  316. {
  317. int err = -EINVAL;
  318. if (!cpufreq_driver)
  319. goto out;
  320. if (cpufreq_driver->setpolicy) {
  321. if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
  322. *policy = CPUFREQ_POLICY_PERFORMANCE;
  323. err = 0;
  324. } else if (!strnicmp(str_governor, "powersave",
  325. CPUFREQ_NAME_LEN)) {
  326. *policy = CPUFREQ_POLICY_POWERSAVE;
  327. err = 0;
  328. }
  329. } else if (cpufreq_driver->target) {
  330. struct cpufreq_governor *t;
  331. mutex_lock(&cpufreq_governor_mutex);
  332. t = __find_governor(str_governor);
  333. if (t == NULL) {
  334. char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
  335. str_governor);
  336. if (name) {
  337. int ret;
  338. mutex_unlock(&cpufreq_governor_mutex);
  339. ret = request_module(name);
  340. mutex_lock(&cpufreq_governor_mutex);
  341. if (ret == 0)
  342. t = __find_governor(str_governor);
  343. }
  344. kfree(name);
  345. }
  346. if (t != NULL) {
  347. *governor = t;
  348. err = 0;
  349. }
  350. mutex_unlock(&cpufreq_governor_mutex);
  351. }
  352. out:
  353. return err;
  354. }
  355. /* drivers/base/cpu.c */
  356. extern struct sysdev_class cpu_sysdev_class;
  357. /**
  358. * cpufreq_per_cpu_attr_read() / show_##file_name() -
  359. * print out cpufreq information
  360. *
  361. * Write out information from cpufreq_driver->policy[cpu]; object must be
  362. * "unsigned int".
  363. */
  364. #define show_one(file_name, object) \
  365. static ssize_t show_##file_name \
  366. (struct cpufreq_policy * policy, char *buf) \
  367. { \
  368. return sprintf (buf, "%u\n", policy->object); \
  369. }
  370. show_one(cpuinfo_min_freq, cpuinfo.min_freq);
  371. show_one(cpuinfo_max_freq, cpuinfo.max_freq);
  372. show_one(scaling_min_freq, min);
  373. show_one(scaling_max_freq, max);
  374. show_one(scaling_cur_freq, cur);
  375. static int __cpufreq_set_policy(struct cpufreq_policy *data,
  376. struct cpufreq_policy *policy);
  377. /**
  378. * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  379. */
  380. #define store_one(file_name, object) \
  381. static ssize_t store_##file_name \
  382. (struct cpufreq_policy * policy, const char *buf, size_t count) \
  383. { \
  384. unsigned int ret = -EINVAL; \
  385. struct cpufreq_policy new_policy; \
  386. \
  387. ret = cpufreq_get_policy(&new_policy, policy->cpu); \
  388. if (ret) \
  389. return -EINVAL; \
  390. \
  391. ret = sscanf (buf, "%u", &new_policy.object); \
  392. if (ret != 1) \
  393. return -EINVAL; \
  394. \
  395. ret = __cpufreq_set_policy(policy, &new_policy); \
  396. policy->user_policy.object = policy->object; \
  397. \
  398. return ret ? ret : count; \
  399. }
  400. store_one(scaling_min_freq,min);
  401. store_one(scaling_max_freq,max);
  402. /**
  403. * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  404. */
  405. static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
  406. char *buf)
  407. {
  408. unsigned int cur_freq = __cpufreq_get(policy->cpu);
  409. if (!cur_freq)
  410. return sprintf(buf, "<unknown>");
  411. return sprintf(buf, "%u\n", cur_freq);
  412. }
  413. /**
  414. * show_scaling_governor - show the current policy for the specified CPU
  415. */
  416. static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
  417. char *buf)
  418. {
  419. if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
  420. return sprintf(buf, "powersave\n");
  421. else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
  422. return sprintf(buf, "performance\n");
  423. else if (policy->governor)
  424. return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
  425. return -EINVAL;
  426. }
  427. /**
  428. * store_scaling_governor - store policy for the specified CPU
  429. */
  430. static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
  431. const char *buf, size_t count)
  432. {
  433. unsigned int ret = -EINVAL;
  434. char str_governor[16];
  435. struct cpufreq_policy new_policy;
  436. ret = cpufreq_get_policy(&new_policy, policy->cpu);
  437. if (ret)
  438. return ret;
  439. ret = sscanf (buf, "%15s", str_governor);
  440. if (ret != 1)
  441. return -EINVAL;
  442. if (cpufreq_parse_governor(str_governor, &new_policy.policy,
  443. &new_policy.governor))
  444. return -EINVAL;
  445. /* Do not use cpufreq_set_policy here or the user_policy.max
  446. will be wrongly overridden */
  447. ret = __cpufreq_set_policy(policy, &new_policy);
  448. policy->user_policy.policy = policy->policy;
  449. policy->user_policy.governor = policy->governor;
  450. if (ret)
  451. return ret;
  452. else
  453. return count;
  454. }
  455. /**
  456. * show_scaling_driver - show the cpufreq driver currently loaded
  457. */
  458. static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
  459. {
  460. return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
  461. }
  462. /**
  463. * show_scaling_available_governors - show the available CPUfreq governors
  464. */
  465. static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
  466. char *buf)
  467. {
  468. ssize_t i = 0;
  469. struct cpufreq_governor *t;
  470. if (!cpufreq_driver->target) {
  471. i += sprintf(buf, "performance powersave");
  472. goto out;
  473. }
  474. list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
  475. if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
  476. goto out;
  477. i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
  478. }
  479. out:
  480. i += sprintf(&buf[i], "\n");
  481. return i;
  482. }
  483. /**
  484. * show_affected_cpus - show the CPUs affected by each transition
  485. */
  486. static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
  487. {
  488. ssize_t i = 0;
  489. unsigned int cpu;
  490. for_each_cpu_mask(cpu, policy->cpus) {
  491. if (i)
  492. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
  493. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
  494. if (i >= (PAGE_SIZE - 5))
  495. break;
  496. }
  497. i += sprintf(&buf[i], "\n");
  498. return i;
  499. }
  500. #define define_one_ro(_name) \
  501. static struct freq_attr _name = \
  502. __ATTR(_name, 0444, show_##_name, NULL)
  503. #define define_one_ro0400(_name) \
  504. static struct freq_attr _name = \
  505. __ATTR(_name, 0400, show_##_name, NULL)
  506. #define define_one_rw(_name) \
  507. static struct freq_attr _name = \
  508. __ATTR(_name, 0644, show_##_name, store_##_name)
  509. define_one_ro0400(cpuinfo_cur_freq);
  510. define_one_ro(cpuinfo_min_freq);
  511. define_one_ro(cpuinfo_max_freq);
  512. define_one_ro(scaling_available_governors);
  513. define_one_ro(scaling_driver);
  514. define_one_ro(scaling_cur_freq);
  515. define_one_ro(affected_cpus);
  516. define_one_rw(scaling_min_freq);
  517. define_one_rw(scaling_max_freq);
  518. define_one_rw(scaling_governor);
  519. static struct attribute * default_attrs[] = {
  520. &cpuinfo_min_freq.attr,
  521. &cpuinfo_max_freq.attr,
  522. &scaling_min_freq.attr,
  523. &scaling_max_freq.attr,
  524. &affected_cpus.attr,
  525. &scaling_governor.attr,
  526. &scaling_driver.attr,
  527. &scaling_available_governors.attr,
  528. NULL
  529. };
  530. #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
  531. #define to_attr(a) container_of(a,struct freq_attr,attr)
  532. static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
  533. {
  534. struct cpufreq_policy * policy = to_policy(kobj);
  535. struct freq_attr * fattr = to_attr(attr);
  536. ssize_t ret;
  537. policy = cpufreq_cpu_get(policy->cpu);
  538. if (!policy)
  539. return -EINVAL;
  540. if (lock_policy_rwsem_read(policy->cpu) < 0)
  541. return -EINVAL;
  542. if (fattr->show)
  543. ret = fattr->show(policy, buf);
  544. else
  545. ret = -EIO;
  546. unlock_policy_rwsem_read(policy->cpu);
  547. cpufreq_cpu_put(policy);
  548. return ret;
  549. }
  550. static ssize_t store(struct kobject * kobj, struct attribute * attr,
  551. const char * buf, size_t count)
  552. {
  553. struct cpufreq_policy * policy = to_policy(kobj);
  554. struct freq_attr * fattr = to_attr(attr);
  555. ssize_t ret;
  556. policy = cpufreq_cpu_get(policy->cpu);
  557. if (!policy)
  558. return -EINVAL;
  559. if (lock_policy_rwsem_write(policy->cpu) < 0)
  560. return -EINVAL;
  561. if (fattr->store)
  562. ret = fattr->store(policy, buf, count);
  563. else
  564. ret = -EIO;
  565. unlock_policy_rwsem_write(policy->cpu);
  566. cpufreq_cpu_put(policy);
  567. return ret;
  568. }
  569. static void cpufreq_sysfs_release(struct kobject * kobj)
  570. {
  571. struct cpufreq_policy * policy = to_policy(kobj);
  572. dprintk("last reference is dropped\n");
  573. complete(&policy->kobj_unregister);
  574. }
  575. static struct sysfs_ops sysfs_ops = {
  576. .show = show,
  577. .store = store,
  578. };
  579. static struct kobj_type ktype_cpufreq = {
  580. .sysfs_ops = &sysfs_ops,
  581. .default_attrs = default_attrs,
  582. .release = cpufreq_sysfs_release,
  583. };
  584. /**
  585. * cpufreq_add_dev - add a CPU device
  586. *
  587. * Adds the cpufreq interface for a CPU device.
  588. */
  589. static int cpufreq_add_dev (struct sys_device * sys_dev)
  590. {
  591. unsigned int cpu = sys_dev->id;
  592. int ret = 0;
  593. struct cpufreq_policy new_policy;
  594. struct cpufreq_policy *policy;
  595. struct freq_attr **drv_attr;
  596. struct sys_device *cpu_sys_dev;
  597. unsigned long flags;
  598. unsigned int j;
  599. #ifdef CONFIG_SMP
  600. struct cpufreq_policy *managed_policy;
  601. #endif
  602. if (cpu_is_offline(cpu))
  603. return 0;
  604. cpufreq_debug_disable_ratelimit();
  605. dprintk("adding CPU %u\n", cpu);
  606. #ifdef CONFIG_SMP
  607. /* check whether a different CPU already registered this
  608. * CPU because it is in the same boat. */
  609. policy = cpufreq_cpu_get(cpu);
  610. if (unlikely(policy)) {
  611. cpufreq_cpu_put(policy);
  612. cpufreq_debug_enable_ratelimit();
  613. return 0;
  614. }
  615. #endif
  616. if (!try_module_get(cpufreq_driver->owner)) {
  617. ret = -EINVAL;
  618. goto module_out;
  619. }
  620. policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
  621. if (!policy) {
  622. ret = -ENOMEM;
  623. goto nomem_out;
  624. }
  625. policy->cpu = cpu;
  626. policy->cpus = cpumask_of_cpu(cpu);
  627. /* Initially set CPU itself as the policy_cpu */
  628. per_cpu(policy_cpu, cpu) = cpu;
  629. lock_policy_rwsem_write(cpu);
  630. init_completion(&policy->kobj_unregister);
  631. INIT_WORK(&policy->update, handle_update);
  632. /* call driver. From then on the cpufreq must be able
  633. * to accept all calls to ->verify and ->setpolicy for this CPU
  634. */
  635. ret = cpufreq_driver->init(policy);
  636. if (ret) {
  637. dprintk("initialization failed\n");
  638. unlock_policy_rwsem_write(cpu);
  639. goto err_out;
  640. }
  641. policy->user_policy.min = policy->cpuinfo.min_freq;
  642. policy->user_policy.max = policy->cpuinfo.max_freq;
  643. #ifdef CONFIG_SMP
  644. #ifdef CONFIG_HOTPLUG_CPU
  645. if (cpufreq_cpu_governor[cpu]){
  646. policy->governor = cpufreq_cpu_governor[cpu];
  647. dprintk("Restoring governor %s for cpu %d\n",
  648. policy->governor->name, cpu);
  649. }
  650. #endif
  651. for_each_cpu_mask(j, policy->cpus) {
  652. if (cpu == j)
  653. continue;
  654. /* check for existing affected CPUs. They may not be aware
  655. * of it due to CPU Hotplug.
  656. */
  657. managed_policy = cpufreq_cpu_get(j);
  658. if (unlikely(managed_policy)) {
  659. /* Set proper policy_cpu */
  660. unlock_policy_rwsem_write(cpu);
  661. per_cpu(policy_cpu, cpu) = managed_policy->cpu;
  662. if (lock_policy_rwsem_write(cpu) < 0)
  663. goto err_out_driver_exit;
  664. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  665. managed_policy->cpus = policy->cpus;
  666. cpufreq_cpu_data[cpu] = managed_policy;
  667. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  668. dprintk("CPU already managed, adding link\n");
  669. ret = sysfs_create_link(&sys_dev->kobj,
  670. &managed_policy->kobj,
  671. "cpufreq");
  672. if (ret) {
  673. unlock_policy_rwsem_write(cpu);
  674. goto err_out_driver_exit;
  675. }
  676. cpufreq_debug_enable_ratelimit();
  677. ret = 0;
  678. unlock_policy_rwsem_write(cpu);
  679. goto err_out_driver_exit; /* call driver->exit() */
  680. }
  681. }
  682. #endif
  683. memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
  684. /* prepare interface data */
  685. policy->kobj.parent = &sys_dev->kobj;
  686. policy->kobj.ktype = &ktype_cpufreq;
  687. strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
  688. ret = kobject_register(&policy->kobj);
  689. if (ret) {
  690. unlock_policy_rwsem_write(cpu);
  691. goto err_out_driver_exit;
  692. }
  693. /* set up files for this cpu device */
  694. drv_attr = cpufreq_driver->attr;
  695. while ((drv_attr) && (*drv_attr)) {
  696. ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
  697. if (ret)
  698. goto err_out_driver_exit;
  699. drv_attr++;
  700. }
  701. if (cpufreq_driver->get){
  702. ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
  703. if (ret)
  704. goto err_out_driver_exit;
  705. }
  706. if (cpufreq_driver->target){
  707. ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
  708. if (ret)
  709. goto err_out_driver_exit;
  710. }
  711. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  712. for_each_cpu_mask(j, policy->cpus) {
  713. cpufreq_cpu_data[j] = policy;
  714. per_cpu(policy_cpu, j) = policy->cpu;
  715. }
  716. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  717. /* symlink affected CPUs */
  718. for_each_cpu_mask(j, policy->cpus) {
  719. if (j == cpu)
  720. continue;
  721. if (!cpu_online(j))
  722. continue;
  723. dprintk("CPU %u already managed, adding link\n", j);
  724. cpufreq_cpu_get(cpu);
  725. cpu_sys_dev = get_cpu_sysdev(j);
  726. ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
  727. "cpufreq");
  728. if (ret) {
  729. unlock_policy_rwsem_write(cpu);
  730. goto err_out_unregister;
  731. }
  732. }
  733. policy->governor = NULL; /* to assure that the starting sequence is
  734. * run in cpufreq_set_policy */
  735. /* set default policy */
  736. ret = __cpufreq_set_policy(policy, &new_policy);
  737. policy->user_policy.policy = policy->policy;
  738. policy->user_policy.governor = policy->governor;
  739. unlock_policy_rwsem_write(cpu);
  740. if (ret) {
  741. dprintk("setting policy failed\n");
  742. goto err_out_unregister;
  743. }
  744. module_put(cpufreq_driver->owner);
  745. dprintk("initialization complete\n");
  746. cpufreq_debug_enable_ratelimit();
  747. return 0;
  748. err_out_unregister:
  749. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  750. for_each_cpu_mask(j, policy->cpus)
  751. cpufreq_cpu_data[j] = NULL;
  752. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  753. kobject_unregister(&policy->kobj);
  754. wait_for_completion(&policy->kobj_unregister);
  755. err_out_driver_exit:
  756. if (cpufreq_driver->exit)
  757. cpufreq_driver->exit(policy);
  758. err_out:
  759. kfree(policy);
  760. nomem_out:
  761. module_put(cpufreq_driver->owner);
  762. module_out:
  763. cpufreq_debug_enable_ratelimit();
  764. return ret;
  765. }
  766. /**
  767. * __cpufreq_remove_dev - remove a CPU device
  768. *
  769. * Removes the cpufreq interface for a CPU device.
  770. * Caller should already have policy_rwsem in write mode for this CPU.
  771. * This routine frees the rwsem before returning.
  772. */
  773. static int __cpufreq_remove_dev (struct sys_device * sys_dev)
  774. {
  775. unsigned int cpu = sys_dev->id;
  776. unsigned long flags;
  777. struct cpufreq_policy *data;
  778. #ifdef CONFIG_SMP
  779. struct sys_device *cpu_sys_dev;
  780. unsigned int j;
  781. #endif
  782. cpufreq_debug_disable_ratelimit();
  783. dprintk("unregistering CPU %u\n", cpu);
  784. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  785. data = cpufreq_cpu_data[cpu];
  786. if (!data) {
  787. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  788. cpufreq_debug_enable_ratelimit();
  789. unlock_policy_rwsem_write(cpu);
  790. return -EINVAL;
  791. }
  792. cpufreq_cpu_data[cpu] = NULL;
  793. #ifdef CONFIG_SMP
  794. /* if this isn't the CPU which is the parent of the kobj, we
  795. * only need to unlink, put and exit
  796. */
  797. if (unlikely(cpu != data->cpu)) {
  798. dprintk("removing link\n");
  799. cpu_clear(cpu, data->cpus);
  800. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  801. sysfs_remove_link(&sys_dev->kobj, "cpufreq");
  802. cpufreq_cpu_put(data);
  803. cpufreq_debug_enable_ratelimit();
  804. unlock_policy_rwsem_write(cpu);
  805. return 0;
  806. }
  807. #endif
  808. if (!kobject_get(&data->kobj)) {
  809. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  810. cpufreq_debug_enable_ratelimit();
  811. unlock_policy_rwsem_write(cpu);
  812. return -EFAULT;
  813. }
  814. #ifdef CONFIG_SMP
  815. #ifdef CONFIG_HOTPLUG_CPU
  816. cpufreq_cpu_governor[cpu] = data->governor;
  817. #endif
  818. /* if we have other CPUs still registered, we need to unlink them,
  819. * or else wait_for_completion below will lock up. Clean the
  820. * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
  821. * links afterwards.
  822. */
  823. if (unlikely(cpus_weight(data->cpus) > 1)) {
  824. for_each_cpu_mask(j, data->cpus) {
  825. if (j == cpu)
  826. continue;
  827. cpufreq_cpu_data[j] = NULL;
  828. }
  829. }
  830. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  831. if (unlikely(cpus_weight(data->cpus) > 1)) {
  832. for_each_cpu_mask(j, data->cpus) {
  833. if (j == cpu)
  834. continue;
  835. dprintk("removing link for cpu %u\n", j);
  836. #ifdef CONFIG_HOTPLUG_CPU
  837. cpufreq_cpu_governor[j] = data->governor;
  838. #endif
  839. cpu_sys_dev = get_cpu_sysdev(j);
  840. sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
  841. cpufreq_cpu_put(data);
  842. }
  843. }
  844. #else
  845. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  846. #endif
  847. if (cpufreq_driver->target)
  848. __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  849. unlock_policy_rwsem_write(cpu);
  850. kobject_unregister(&data->kobj);
  851. kobject_put(&data->kobj);
  852. /* we need to make sure that the underlying kobj is actually
  853. * not referenced anymore by anybody before we proceed with
  854. * unloading.
  855. */
  856. dprintk("waiting for dropping of refcount\n");
  857. wait_for_completion(&data->kobj_unregister);
  858. dprintk("wait complete\n");
  859. if (cpufreq_driver->exit)
  860. cpufreq_driver->exit(data);
  861. kfree(data);
  862. cpufreq_debug_enable_ratelimit();
  863. return 0;
  864. }
  865. static int cpufreq_remove_dev (struct sys_device * sys_dev)
  866. {
  867. unsigned int cpu = sys_dev->id;
  868. int retval;
  869. if (cpu_is_offline(cpu))
  870. return 0;
  871. if (unlikely(lock_policy_rwsem_write(cpu)))
  872. BUG();
  873. retval = __cpufreq_remove_dev(sys_dev);
  874. return retval;
  875. }
  876. static void handle_update(struct work_struct *work)
  877. {
  878. struct cpufreq_policy *policy =
  879. container_of(work, struct cpufreq_policy, update);
  880. unsigned int cpu = policy->cpu;
  881. dprintk("handle_update for cpu %u called\n", cpu);
  882. cpufreq_update_policy(cpu);
  883. }
  884. /**
  885. * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
  886. * @cpu: cpu number
  887. * @old_freq: CPU frequency the kernel thinks the CPU runs at
  888. * @new_freq: CPU frequency the CPU actually runs at
  889. *
  890. * We adjust to current frequency first, and need to clean up later. So either call
  891. * to cpufreq_update_policy() or schedule handle_update()).
  892. */
  893. static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
  894. unsigned int new_freq)
  895. {
  896. struct cpufreq_freqs freqs;
  897. dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
  898. "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
  899. freqs.cpu = cpu;
  900. freqs.old = old_freq;
  901. freqs.new = new_freq;
  902. cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
  903. cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
  904. }
  905. /**
  906. * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
  907. * @cpu: CPU number
  908. *
  909. * This is the last known freq, without actually getting it from the driver.
  910. * Return value will be same as what is shown in scaling_cur_freq in sysfs.
  911. */
  912. unsigned int cpufreq_quick_get(unsigned int cpu)
  913. {
  914. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  915. unsigned int ret_freq = 0;
  916. if (policy) {
  917. if (unlikely(lock_policy_rwsem_read(cpu)))
  918. return ret_freq;
  919. ret_freq = policy->cur;
  920. unlock_policy_rwsem_read(cpu);
  921. cpufreq_cpu_put(policy);
  922. }
  923. return (ret_freq);
  924. }
  925. EXPORT_SYMBOL(cpufreq_quick_get);
  926. static unsigned int __cpufreq_get(unsigned int cpu)
  927. {
  928. struct cpufreq_policy *policy = cpufreq_cpu_data[cpu];
  929. unsigned int ret_freq = 0;
  930. if (!cpufreq_driver->get)
  931. return (ret_freq);
  932. ret_freq = cpufreq_driver->get(cpu);
  933. if (ret_freq && policy->cur &&
  934. !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  935. /* verify no discrepancy between actual and
  936. saved value exists */
  937. if (unlikely(ret_freq != policy->cur)) {
  938. cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
  939. schedule_work(&policy->update);
  940. }
  941. }
  942. return (ret_freq);
  943. }
  944. /**
  945. * cpufreq_get - get the current CPU frequency (in kHz)
  946. * @cpu: CPU number
  947. *
  948. * Get the CPU current (static) CPU frequency
  949. */
  950. unsigned int cpufreq_get(unsigned int cpu)
  951. {
  952. unsigned int ret_freq = 0;
  953. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  954. if (!policy)
  955. goto out;
  956. if (unlikely(lock_policy_rwsem_read(cpu)))
  957. goto out_policy;
  958. ret_freq = __cpufreq_get(cpu);
  959. unlock_policy_rwsem_read(cpu);
  960. out_policy:
  961. cpufreq_cpu_put(policy);
  962. out:
  963. return (ret_freq);
  964. }
  965. EXPORT_SYMBOL(cpufreq_get);
  966. /**
  967. * cpufreq_suspend - let the low level driver prepare for suspend
  968. */
  969. static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
  970. {
  971. int cpu = sysdev->id;
  972. int ret = 0;
  973. unsigned int cur_freq = 0;
  974. struct cpufreq_policy *cpu_policy;
  975. dprintk("suspending cpu %u\n", cpu);
  976. if (!cpu_online(cpu))
  977. return 0;
  978. /* we may be lax here as interrupts are off. Nonetheless
  979. * we need to grab the correct cpu policy, as to check
  980. * whether we really run on this CPU.
  981. */
  982. cpu_policy = cpufreq_cpu_get(cpu);
  983. if (!cpu_policy)
  984. return -EINVAL;
  985. /* only handle each CPU group once */
  986. if (unlikely(cpu_policy->cpu != cpu)) {
  987. cpufreq_cpu_put(cpu_policy);
  988. return 0;
  989. }
  990. if (cpufreq_driver->suspend) {
  991. ret = cpufreq_driver->suspend(cpu_policy, pmsg);
  992. if (ret) {
  993. printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
  994. "step on CPU %u\n", cpu_policy->cpu);
  995. cpufreq_cpu_put(cpu_policy);
  996. return ret;
  997. }
  998. }
  999. if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
  1000. goto out;
  1001. if (cpufreq_driver->get)
  1002. cur_freq = cpufreq_driver->get(cpu_policy->cpu);
  1003. if (!cur_freq || !cpu_policy->cur) {
  1004. printk(KERN_ERR "cpufreq: suspend failed to assert current "
  1005. "frequency is what timing core thinks it is.\n");
  1006. goto out;
  1007. }
  1008. if (unlikely(cur_freq != cpu_policy->cur)) {
  1009. struct cpufreq_freqs freqs;
  1010. if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
  1011. dprintk("Warning: CPU frequency is %u, "
  1012. "cpufreq assumed %u kHz.\n",
  1013. cur_freq, cpu_policy->cur);
  1014. freqs.cpu = cpu;
  1015. freqs.old = cpu_policy->cur;
  1016. freqs.new = cur_freq;
  1017. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  1018. CPUFREQ_SUSPENDCHANGE, &freqs);
  1019. adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
  1020. cpu_policy->cur = cur_freq;
  1021. }
  1022. out:
  1023. cpufreq_cpu_put(cpu_policy);
  1024. return 0;
  1025. }
  1026. /**
  1027. * cpufreq_resume - restore proper CPU frequency handling after resume
  1028. *
  1029. * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
  1030. * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
  1031. * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
  1032. * restored.
  1033. */
  1034. static int cpufreq_resume(struct sys_device * sysdev)
  1035. {
  1036. int cpu = sysdev->id;
  1037. int ret = 0;
  1038. struct cpufreq_policy *cpu_policy;
  1039. dprintk("resuming cpu %u\n", cpu);
  1040. if (!cpu_online(cpu))
  1041. return 0;
  1042. /* we may be lax here as interrupts are off. Nonetheless
  1043. * we need to grab the correct cpu policy, as to check
  1044. * whether we really run on this CPU.
  1045. */
  1046. cpu_policy = cpufreq_cpu_get(cpu);
  1047. if (!cpu_policy)
  1048. return -EINVAL;
  1049. /* only handle each CPU group once */
  1050. if (unlikely(cpu_policy->cpu != cpu)) {
  1051. cpufreq_cpu_put(cpu_policy);
  1052. return 0;
  1053. }
  1054. if (cpufreq_driver->resume) {
  1055. ret = cpufreq_driver->resume(cpu_policy);
  1056. if (ret) {
  1057. printk(KERN_ERR "cpufreq: resume failed in ->resume "
  1058. "step on CPU %u\n", cpu_policy->cpu);
  1059. cpufreq_cpu_put(cpu_policy);
  1060. return ret;
  1061. }
  1062. }
  1063. if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
  1064. unsigned int cur_freq = 0;
  1065. if (cpufreq_driver->get)
  1066. cur_freq = cpufreq_driver->get(cpu_policy->cpu);
  1067. if (!cur_freq || !cpu_policy->cur) {
  1068. printk(KERN_ERR "cpufreq: resume failed to assert "
  1069. "current frequency is what timing core "
  1070. "thinks it is.\n");
  1071. goto out;
  1072. }
  1073. if (unlikely(cur_freq != cpu_policy->cur)) {
  1074. struct cpufreq_freqs freqs;
  1075. if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
  1076. dprintk("Warning: CPU frequency"
  1077. "is %u, cpufreq assumed %u kHz.\n",
  1078. cur_freq, cpu_policy->cur);
  1079. freqs.cpu = cpu;
  1080. freqs.old = cpu_policy->cur;
  1081. freqs.new = cur_freq;
  1082. srcu_notifier_call_chain(
  1083. &cpufreq_transition_notifier_list,
  1084. CPUFREQ_RESUMECHANGE, &freqs);
  1085. adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
  1086. cpu_policy->cur = cur_freq;
  1087. }
  1088. }
  1089. out:
  1090. schedule_work(&cpu_policy->update);
  1091. cpufreq_cpu_put(cpu_policy);
  1092. return ret;
  1093. }
  1094. static struct sysdev_driver cpufreq_sysdev_driver = {
  1095. .add = cpufreq_add_dev,
  1096. .remove = cpufreq_remove_dev,
  1097. .suspend = cpufreq_suspend,
  1098. .resume = cpufreq_resume,
  1099. };
  1100. /*********************************************************************
  1101. * NOTIFIER LISTS INTERFACE *
  1102. *********************************************************************/
  1103. /**
  1104. * cpufreq_register_notifier - register a driver with cpufreq
  1105. * @nb: notifier function to register
  1106. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1107. *
  1108. * Add a driver to one of two lists: either a list of drivers that
  1109. * are notified about clock rate changes (once before and once after
  1110. * the transition), or a list of drivers that are notified about
  1111. * changes in cpufreq policy.
  1112. *
  1113. * This function may sleep, and has the same return conditions as
  1114. * blocking_notifier_chain_register.
  1115. */
  1116. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
  1117. {
  1118. int ret;
  1119. switch (list) {
  1120. case CPUFREQ_TRANSITION_NOTIFIER:
  1121. ret = srcu_notifier_chain_register(
  1122. &cpufreq_transition_notifier_list, nb);
  1123. break;
  1124. case CPUFREQ_POLICY_NOTIFIER:
  1125. ret = blocking_notifier_chain_register(
  1126. &cpufreq_policy_notifier_list, nb);
  1127. break;
  1128. default:
  1129. ret = -EINVAL;
  1130. }
  1131. return ret;
  1132. }
  1133. EXPORT_SYMBOL(cpufreq_register_notifier);
  1134. /**
  1135. * cpufreq_unregister_notifier - unregister a driver with cpufreq
  1136. * @nb: notifier block to be unregistered
  1137. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1138. *
  1139. * Remove a driver from the CPU frequency notifier list.
  1140. *
  1141. * This function may sleep, and has the same return conditions as
  1142. * blocking_notifier_chain_unregister.
  1143. */
  1144. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
  1145. {
  1146. int ret;
  1147. switch (list) {
  1148. case CPUFREQ_TRANSITION_NOTIFIER:
  1149. ret = srcu_notifier_chain_unregister(
  1150. &cpufreq_transition_notifier_list, nb);
  1151. break;
  1152. case CPUFREQ_POLICY_NOTIFIER:
  1153. ret = blocking_notifier_chain_unregister(
  1154. &cpufreq_policy_notifier_list, nb);
  1155. break;
  1156. default:
  1157. ret = -EINVAL;
  1158. }
  1159. return ret;
  1160. }
  1161. EXPORT_SYMBOL(cpufreq_unregister_notifier);
  1162. /*********************************************************************
  1163. * GOVERNORS *
  1164. *********************************************************************/
  1165. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  1166. unsigned int target_freq,
  1167. unsigned int relation)
  1168. {
  1169. int retval = -EINVAL;
  1170. dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
  1171. target_freq, relation);
  1172. if (cpu_online(policy->cpu) && cpufreq_driver->target)
  1173. retval = cpufreq_driver->target(policy, target_freq, relation);
  1174. return retval;
  1175. }
  1176. EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
  1177. int cpufreq_driver_target(struct cpufreq_policy *policy,
  1178. unsigned int target_freq,
  1179. unsigned int relation)
  1180. {
  1181. int ret;
  1182. policy = cpufreq_cpu_get(policy->cpu);
  1183. if (!policy)
  1184. return -EINVAL;
  1185. if (unlikely(lock_policy_rwsem_write(policy->cpu)))
  1186. return -EINVAL;
  1187. ret = __cpufreq_driver_target(policy, target_freq, relation);
  1188. unlock_policy_rwsem_write(policy->cpu);
  1189. cpufreq_cpu_put(policy);
  1190. return ret;
  1191. }
  1192. EXPORT_SYMBOL_GPL(cpufreq_driver_target);
  1193. int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
  1194. {
  1195. int ret = 0;
  1196. policy = cpufreq_cpu_get(policy->cpu);
  1197. if (!policy)
  1198. return -EINVAL;
  1199. if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
  1200. ret = cpufreq_driver->getavg(policy->cpu);
  1201. cpufreq_cpu_put(policy);
  1202. return ret;
  1203. }
  1204. EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
  1205. /*
  1206. * when "event" is CPUFREQ_GOV_LIMITS
  1207. */
  1208. static int __cpufreq_governor(struct cpufreq_policy *policy,
  1209. unsigned int event)
  1210. {
  1211. int ret;
  1212. if (!try_module_get(policy->governor->owner))
  1213. return -EINVAL;
  1214. dprintk("__cpufreq_governor for CPU %u, event %u\n",
  1215. policy->cpu, event);
  1216. ret = policy->governor->governor(policy, event);
  1217. /* we keep one module reference alive for
  1218. each CPU governed by this CPU */
  1219. if ((event != CPUFREQ_GOV_START) || ret)
  1220. module_put(policy->governor->owner);
  1221. if ((event == CPUFREQ_GOV_STOP) && !ret)
  1222. module_put(policy->governor->owner);
  1223. return ret;
  1224. }
  1225. int cpufreq_register_governor(struct cpufreq_governor *governor)
  1226. {
  1227. int err;
  1228. if (!governor)
  1229. return -EINVAL;
  1230. mutex_lock(&cpufreq_governor_mutex);
  1231. err = -EBUSY;
  1232. if (__find_governor(governor->name) == NULL) {
  1233. err = 0;
  1234. list_add(&governor->governor_list, &cpufreq_governor_list);
  1235. }
  1236. mutex_unlock(&cpufreq_governor_mutex);
  1237. return err;
  1238. }
  1239. EXPORT_SYMBOL_GPL(cpufreq_register_governor);
  1240. void cpufreq_unregister_governor(struct cpufreq_governor *governor)
  1241. {
  1242. if (!governor)
  1243. return;
  1244. mutex_lock(&cpufreq_governor_mutex);
  1245. list_del(&governor->governor_list);
  1246. mutex_unlock(&cpufreq_governor_mutex);
  1247. return;
  1248. }
  1249. EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
  1250. /*********************************************************************
  1251. * POLICY INTERFACE *
  1252. *********************************************************************/
  1253. /**
  1254. * cpufreq_get_policy - get the current cpufreq_policy
  1255. * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
  1256. *
  1257. * Reads the current cpufreq policy.
  1258. */
  1259. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
  1260. {
  1261. struct cpufreq_policy *cpu_policy;
  1262. if (!policy)
  1263. return -EINVAL;
  1264. cpu_policy = cpufreq_cpu_get(cpu);
  1265. if (!cpu_policy)
  1266. return -EINVAL;
  1267. memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
  1268. cpufreq_cpu_put(cpu_policy);
  1269. return 0;
  1270. }
  1271. EXPORT_SYMBOL(cpufreq_get_policy);
  1272. /*
  1273. * data : current policy.
  1274. * policy : policy to be set.
  1275. */
  1276. static int __cpufreq_set_policy(struct cpufreq_policy *data,
  1277. struct cpufreq_policy *policy)
  1278. {
  1279. int ret = 0;
  1280. cpufreq_debug_disable_ratelimit();
  1281. dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
  1282. policy->min, policy->max);
  1283. memcpy(&policy->cpuinfo, &data->cpuinfo,
  1284. sizeof(struct cpufreq_cpuinfo));
  1285. if (policy->min > data->min && policy->min > policy->max) {
  1286. ret = -EINVAL;
  1287. goto error_out;
  1288. }
  1289. /* verify the cpu speed can be set within this limit */
  1290. ret = cpufreq_driver->verify(policy);
  1291. if (ret)
  1292. goto error_out;
  1293. /* adjust if necessary - all reasons */
  1294. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1295. CPUFREQ_ADJUST, policy);
  1296. /* adjust if necessary - hardware incompatibility*/
  1297. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1298. CPUFREQ_INCOMPATIBLE, policy);
  1299. /* verify the cpu speed can be set within this limit,
  1300. which might be different to the first one */
  1301. ret = cpufreq_driver->verify(policy);
  1302. if (ret)
  1303. goto error_out;
  1304. /* notification of the new policy */
  1305. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1306. CPUFREQ_NOTIFY, policy);
  1307. data->min = policy->min;
  1308. data->max = policy->max;
  1309. dprintk("new min and max freqs are %u - %u kHz\n",
  1310. data->min, data->max);
  1311. if (cpufreq_driver->setpolicy) {
  1312. data->policy = policy->policy;
  1313. dprintk("setting range\n");
  1314. ret = cpufreq_driver->setpolicy(policy);
  1315. } else {
  1316. if (policy->governor != data->governor) {
  1317. /* save old, working values */
  1318. struct cpufreq_governor *old_gov = data->governor;
  1319. dprintk("governor switch\n");
  1320. /* end old governor */
  1321. if (data->governor)
  1322. __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  1323. /* start new governor */
  1324. data->governor = policy->governor;
  1325. if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
  1326. /* new governor failed, so re-start old one */
  1327. dprintk("starting governor %s failed\n",
  1328. data->governor->name);
  1329. if (old_gov) {
  1330. data->governor = old_gov;
  1331. __cpufreq_governor(data,
  1332. CPUFREQ_GOV_START);
  1333. }
  1334. ret = -EINVAL;
  1335. goto error_out;
  1336. }
  1337. /* might be a policy change, too, so fall through */
  1338. }
  1339. dprintk("governor: change or update limits\n");
  1340. __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
  1341. }
  1342. error_out:
  1343. cpufreq_debug_enable_ratelimit();
  1344. return ret;
  1345. }
  1346. /**
  1347. * cpufreq_update_policy - re-evaluate an existing cpufreq policy
  1348. * @cpu: CPU which shall be re-evaluated
  1349. *
  1350. * Usefull for policy notifiers which have different necessities
  1351. * at different times.
  1352. */
  1353. int cpufreq_update_policy(unsigned int cpu)
  1354. {
  1355. struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
  1356. struct cpufreq_policy policy;
  1357. int ret = 0;
  1358. if (!data)
  1359. return -ENODEV;
  1360. if (unlikely(lock_policy_rwsem_write(cpu)))
  1361. return -EINVAL;
  1362. dprintk("updating policy for CPU %u\n", cpu);
  1363. memcpy(&policy, data, sizeof(struct cpufreq_policy));
  1364. policy.min = data->user_policy.min;
  1365. policy.max = data->user_policy.max;
  1366. policy.policy = data->user_policy.policy;
  1367. policy.governor = data->user_policy.governor;
  1368. /* BIOS might change freq behind our back
  1369. -> ask driver for current freq and notify governors about a change */
  1370. if (cpufreq_driver->get) {
  1371. policy.cur = cpufreq_driver->get(cpu);
  1372. if (!data->cur) {
  1373. dprintk("Driver did not initialize current freq");
  1374. data->cur = policy.cur;
  1375. } else {
  1376. if (data->cur != policy.cur)
  1377. cpufreq_out_of_sync(cpu, data->cur,
  1378. policy.cur);
  1379. }
  1380. }
  1381. ret = __cpufreq_set_policy(data, &policy);
  1382. unlock_policy_rwsem_write(cpu);
  1383. cpufreq_cpu_put(data);
  1384. return ret;
  1385. }
  1386. EXPORT_SYMBOL(cpufreq_update_policy);
  1387. static int cpufreq_cpu_callback(struct notifier_block *nfb,
  1388. unsigned long action, void *hcpu)
  1389. {
  1390. unsigned int cpu = (unsigned long)hcpu;
  1391. struct sys_device *sys_dev;
  1392. sys_dev = get_cpu_sysdev(cpu);
  1393. if (sys_dev) {
  1394. switch (action) {
  1395. case CPU_ONLINE:
  1396. case CPU_ONLINE_FROZEN:
  1397. cpufreq_add_dev(sys_dev);
  1398. break;
  1399. case CPU_DOWN_PREPARE:
  1400. case CPU_DOWN_PREPARE_FROZEN:
  1401. if (unlikely(lock_policy_rwsem_write(cpu)))
  1402. BUG();
  1403. __cpufreq_remove_dev(sys_dev);
  1404. break;
  1405. case CPU_DOWN_FAILED:
  1406. case CPU_DOWN_FAILED_FROZEN:
  1407. cpufreq_add_dev(sys_dev);
  1408. break;
  1409. }
  1410. }
  1411. return NOTIFY_OK;
  1412. }
  1413. static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
  1414. {
  1415. .notifier_call = cpufreq_cpu_callback,
  1416. };
  1417. /*********************************************************************
  1418. * REGISTER / UNREGISTER CPUFREQ DRIVER *
  1419. *********************************************************************/
  1420. /**
  1421. * cpufreq_register_driver - register a CPU Frequency driver
  1422. * @driver_data: A struct cpufreq_driver containing the values#
  1423. * submitted by the CPU Frequency driver.
  1424. *
  1425. * Registers a CPU Frequency driver to this core code. This code
  1426. * returns zero on success, -EBUSY when another driver got here first
  1427. * (and isn't unregistered in the meantime).
  1428. *
  1429. */
  1430. int cpufreq_register_driver(struct cpufreq_driver *driver_data)
  1431. {
  1432. unsigned long flags;
  1433. int ret;
  1434. if (!driver_data || !driver_data->verify || !driver_data->init ||
  1435. ((!driver_data->setpolicy) && (!driver_data->target)))
  1436. return -EINVAL;
  1437. dprintk("trying to register driver %s\n", driver_data->name);
  1438. if (driver_data->setpolicy)
  1439. driver_data->flags |= CPUFREQ_CONST_LOOPS;
  1440. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1441. if (cpufreq_driver) {
  1442. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1443. return -EBUSY;
  1444. }
  1445. cpufreq_driver = driver_data;
  1446. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1447. ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
  1448. if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
  1449. int i;
  1450. ret = -ENODEV;
  1451. /* check for at least one working CPU */
  1452. for (i=0; i<NR_CPUS; i++)
  1453. if (cpufreq_cpu_data[i])
  1454. ret = 0;
  1455. /* if all ->init() calls failed, unregister */
  1456. if (ret) {
  1457. dprintk("no CPU initialized for driver %s\n",
  1458. driver_data->name);
  1459. sysdev_driver_unregister(&cpu_sysdev_class,
  1460. &cpufreq_sysdev_driver);
  1461. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1462. cpufreq_driver = NULL;
  1463. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1464. }
  1465. }
  1466. if (!ret) {
  1467. register_hotcpu_notifier(&cpufreq_cpu_notifier);
  1468. dprintk("driver %s up and running\n", driver_data->name);
  1469. cpufreq_debug_enable_ratelimit();
  1470. }
  1471. return (ret);
  1472. }
  1473. EXPORT_SYMBOL_GPL(cpufreq_register_driver);
  1474. /**
  1475. * cpufreq_unregister_driver - unregister the current CPUFreq driver
  1476. *
  1477. * Unregister the current CPUFreq driver. Only call this if you have
  1478. * the right to do so, i.e. if you have succeeded in initialising before!
  1479. * Returns zero if successful, and -EINVAL if the cpufreq_driver is
  1480. * currently not initialised.
  1481. */
  1482. int cpufreq_unregister_driver(struct cpufreq_driver *driver)
  1483. {
  1484. unsigned long flags;
  1485. cpufreq_debug_disable_ratelimit();
  1486. if (!cpufreq_driver || (driver != cpufreq_driver)) {
  1487. cpufreq_debug_enable_ratelimit();
  1488. return -EINVAL;
  1489. }
  1490. dprintk("unregistering driver %s\n", driver->name);
  1491. sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
  1492. unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
  1493. spin_lock_irqsave(&cpufreq_driver_lock, flags);
  1494. cpufreq_driver = NULL;
  1495. spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1496. return 0;
  1497. }
  1498. EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
  1499. static int __init cpufreq_core_init(void)
  1500. {
  1501. int cpu;
  1502. for_each_possible_cpu(cpu) {
  1503. per_cpu(policy_cpu, cpu) = -1;
  1504. init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
  1505. }
  1506. return 0;
  1507. }
  1508. core_initcall(cpufreq_core_init);