processor_idle.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/sched.h> /* need_resched() */
  40. #include <linux/pm_qos_params.h>
  41. #include <linux/clockchips.h>
  42. #include <linux/cpuidle.h>
  43. /*
  44. * Include the apic definitions for x86 to have the APIC timer related defines
  45. * available also for UP (on SMP it gets magically included via linux/smp.h).
  46. * asm/acpi.h is not an option, as it would require more include magic. Also
  47. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  48. */
  49. #ifdef CONFIG_X86
  50. #include <asm/apic.h>
  51. #endif
  52. #include <asm/io.h>
  53. #include <asm/uaccess.h>
  54. #include <acpi/acpi_bus.h>
  55. #include <acpi/processor.h>
  56. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  57. #define ACPI_PROCESSOR_CLASS "processor"
  58. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  59. ACPI_MODULE_NAME("processor_idle");
  60. #define ACPI_PROCESSOR_FILE_POWER "power"
  61. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  62. #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
  63. #ifndef CONFIG_CPU_IDLE
  64. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  65. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  66. static void (*pm_idle_save) (void) __read_mostly;
  67. #else
  68. #define C2_OVERHEAD 1 /* 1us */
  69. #define C3_OVERHEAD 1 /* 1us */
  70. #endif
  71. #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
  72. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  73. #ifdef CONFIG_CPU_IDLE
  74. module_param(max_cstate, uint, 0000);
  75. #else
  76. module_param(max_cstate, uint, 0644);
  77. #endif
  78. static unsigned int nocst __read_mostly;
  79. module_param(nocst, uint, 0000);
  80. #ifndef CONFIG_CPU_IDLE
  81. /*
  82. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  83. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  84. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  85. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  86. * reduce history for more aggressive entry into C3
  87. */
  88. static unsigned int bm_history __read_mostly =
  89. (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  90. module_param(bm_history, uint, 0644);
  91. static int acpi_processor_set_power_policy(struct acpi_processor *pr);
  92. #else /* CONFIG_CPU_IDLE */
  93. static unsigned int latency_factor __read_mostly = 2;
  94. module_param(latency_factor, uint, 0644);
  95. #endif
  96. /*
  97. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  98. * For now disable this. Probably a bug somewhere else.
  99. *
  100. * To skip this limit, boot/load with a large max_cstate limit.
  101. */
  102. static int set_max_cstate(const struct dmi_system_id *id)
  103. {
  104. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  105. return 0;
  106. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  107. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  108. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  109. max_cstate = (long)id->driver_data;
  110. return 0;
  111. }
  112. /* Actually this shouldn't be __cpuinitdata, would be better to fix the
  113. callers to only run once -AK */
  114. static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
  115. { set_max_cstate, "IBM ThinkPad R40e", {
  116. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  117. DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
  118. { set_max_cstate, "IBM ThinkPad R40e", {
  119. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  120. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
  121. { set_max_cstate, "IBM ThinkPad R40e", {
  122. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  123. DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
  124. { set_max_cstate, "IBM ThinkPad R40e", {
  125. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  126. DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
  127. { set_max_cstate, "IBM ThinkPad R40e", {
  128. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  129. DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
  130. { set_max_cstate, "IBM ThinkPad R40e", {
  131. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  132. DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
  133. { set_max_cstate, "IBM ThinkPad R40e", {
  134. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  135. DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
  136. { set_max_cstate, "IBM ThinkPad R40e", {
  137. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  138. DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
  139. { set_max_cstate, "IBM ThinkPad R40e", {
  140. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  141. DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
  142. { set_max_cstate, "IBM ThinkPad R40e", {
  143. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  144. DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
  145. { set_max_cstate, "IBM ThinkPad R40e", {
  146. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  147. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
  148. { set_max_cstate, "IBM ThinkPad R40e", {
  149. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  150. DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
  151. { set_max_cstate, "IBM ThinkPad R40e", {
  152. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  153. DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
  154. { set_max_cstate, "IBM ThinkPad R40e", {
  155. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  156. DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
  157. { set_max_cstate, "IBM ThinkPad R40e", {
  158. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  159. DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
  160. { set_max_cstate, "IBM ThinkPad R40e", {
  161. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  162. DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
  163. { set_max_cstate, "Medion 41700", {
  164. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  165. DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
  166. { set_max_cstate, "Clevo 5600D", {
  167. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  168. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  169. (void *)2},
  170. {},
  171. };
  172. static inline u32 ticks_elapsed(u32 t1, u32 t2)
  173. {
  174. if (t2 >= t1)
  175. return (t2 - t1);
  176. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  177. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  178. else
  179. return ((0xFFFFFFFF - t1) + t2);
  180. }
  181. static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
  182. {
  183. if (t2 >= t1)
  184. return PM_TIMER_TICKS_TO_US(t2 - t1);
  185. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  186. return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  187. else
  188. return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
  189. }
  190. /*
  191. * Callers should disable interrupts before the call and enable
  192. * interrupts after return.
  193. */
  194. static void acpi_safe_halt(void)
  195. {
  196. current_thread_info()->status &= ~TS_POLLING;
  197. /*
  198. * TS_POLLING-cleared state must be visible before we
  199. * test NEED_RESCHED:
  200. */
  201. smp_mb();
  202. if (!need_resched()) {
  203. safe_halt();
  204. local_irq_disable();
  205. }
  206. current_thread_info()->status |= TS_POLLING;
  207. }
  208. #ifndef CONFIG_CPU_IDLE
  209. static void
  210. acpi_processor_power_activate(struct acpi_processor *pr,
  211. struct acpi_processor_cx *new)
  212. {
  213. struct acpi_processor_cx *old;
  214. if (!pr || !new)
  215. return;
  216. old = pr->power.state;
  217. if (old)
  218. old->promotion.count = 0;
  219. new->demotion.count = 0;
  220. /* Cleanup from old state. */
  221. if (old) {
  222. switch (old->type) {
  223. case ACPI_STATE_C3:
  224. /* Disable bus master reload */
  225. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  226. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  227. break;
  228. }
  229. }
  230. /* Prepare to use new state. */
  231. switch (new->type) {
  232. case ACPI_STATE_C3:
  233. /* Enable bus master reload */
  234. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  235. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  236. break;
  237. }
  238. pr->power.state = new;
  239. return;
  240. }
  241. static atomic_t c3_cpu_count;
  242. /* Common C-state entry for C2, C3, .. */
  243. static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
  244. {
  245. if (cstate->entry_method == ACPI_CSTATE_FFH) {
  246. /* Call into architectural FFH based C-state */
  247. acpi_processor_ffh_cstate_enter(cstate);
  248. } else {
  249. int unused;
  250. /* IO port based C-state */
  251. inb(cstate->address);
  252. /* Dummy wait op - must do something useless after P_LVL2 read
  253. because chipsets cannot guarantee that STPCLK# signal
  254. gets asserted in time to freeze execution properly. */
  255. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  256. }
  257. }
  258. #endif /* !CONFIG_CPU_IDLE */
  259. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  260. /*
  261. * Some BIOS implementations switch to C3 in the published C2 state.
  262. * This seems to be a common problem on AMD boxen, but other vendors
  263. * are affected too. We pick the most conservative approach: we assume
  264. * that the local APIC stops in both C2 and C3.
  265. */
  266. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  267. struct acpi_processor_cx *cx)
  268. {
  269. struct acpi_processor_power *pwr = &pr->power;
  270. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  271. /*
  272. * Check, if one of the previous states already marked the lapic
  273. * unstable
  274. */
  275. if (pwr->timer_broadcast_on_state < state)
  276. return;
  277. if (cx->type >= type)
  278. pr->power.timer_broadcast_on_state = state;
  279. }
  280. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
  281. {
  282. unsigned long reason;
  283. reason = pr->power.timer_broadcast_on_state < INT_MAX ?
  284. CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  285. clockevents_notify(reason, &pr->id);
  286. }
  287. /* Power(C) State timer broadcast control */
  288. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  289. struct acpi_processor_cx *cx,
  290. int broadcast)
  291. {
  292. int state = cx - pr->power.states;
  293. if (state >= pr->power.timer_broadcast_on_state) {
  294. unsigned long reason;
  295. reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
  296. CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
  297. clockevents_notify(reason, &pr->id);
  298. }
  299. }
  300. #else
  301. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  302. struct acpi_processor_cx *cstate) { }
  303. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
  304. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  305. struct acpi_processor_cx *cx,
  306. int broadcast)
  307. {
  308. }
  309. #endif
  310. /*
  311. * Suspend / resume control
  312. */
  313. static int acpi_idle_suspend;
  314. int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
  315. {
  316. acpi_idle_suspend = 1;
  317. return 0;
  318. }
  319. int acpi_processor_resume(struct acpi_device * device)
  320. {
  321. acpi_idle_suspend = 0;
  322. return 0;
  323. }
  324. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  325. static int tsc_halts_in_c(int state)
  326. {
  327. switch (boot_cpu_data.x86_vendor) {
  328. case X86_VENDOR_AMD:
  329. /*
  330. * AMD Fam10h TSC will tick in all
  331. * C/P/S0/S1 states when this bit is set.
  332. */
  333. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  334. return 0;
  335. /*FALL THROUGH*/
  336. case X86_VENDOR_INTEL:
  337. /* Several cases known where TSC halts in C2 too */
  338. default:
  339. return state > ACPI_STATE_C1;
  340. }
  341. }
  342. #endif
  343. #ifndef CONFIG_CPU_IDLE
  344. static void acpi_processor_idle(void)
  345. {
  346. struct acpi_processor *pr = NULL;
  347. struct acpi_processor_cx *cx = NULL;
  348. struct acpi_processor_cx *next_state = NULL;
  349. int sleep_ticks = 0;
  350. u32 t1, t2 = 0;
  351. /*
  352. * Interrupts must be disabled during bus mastering calculations and
  353. * for C2/C3 transitions.
  354. */
  355. local_irq_disable();
  356. pr = processors[smp_processor_id()];
  357. if (!pr) {
  358. local_irq_enable();
  359. return;
  360. }
  361. /*
  362. * Check whether we truly need to go idle, or should
  363. * reschedule:
  364. */
  365. if (unlikely(need_resched())) {
  366. local_irq_enable();
  367. return;
  368. }
  369. cx = pr->power.state;
  370. if (!cx || acpi_idle_suspend) {
  371. if (pm_idle_save)
  372. pm_idle_save();
  373. else
  374. acpi_safe_halt();
  375. if (irqs_disabled())
  376. local_irq_enable();
  377. return;
  378. }
  379. /*
  380. * Check BM Activity
  381. * -----------------
  382. * Check for bus mastering activity (if required), record, and check
  383. * for demotion.
  384. */
  385. if (pr->flags.bm_check) {
  386. u32 bm_status = 0;
  387. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  388. if (diff > 31)
  389. diff = 31;
  390. pr->power.bm_activity <<= diff;
  391. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  392. if (bm_status) {
  393. pr->power.bm_activity |= 0x1;
  394. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  395. }
  396. /*
  397. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  398. * the true state of bus mastering activity; forcing us to
  399. * manually check the BMIDEA bit of each IDE channel.
  400. */
  401. else if (errata.piix4.bmisx) {
  402. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  403. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  404. pr->power.bm_activity |= 0x1;
  405. }
  406. pr->power.bm_check_timestamp = jiffies;
  407. /*
  408. * If bus mastering is or was active this jiffy, demote
  409. * to avoid a faulty transition. Note that the processor
  410. * won't enter a low-power state during this call (to this
  411. * function) but should upon the next.
  412. *
  413. * TBD: A better policy might be to fallback to the demotion
  414. * state (use it for this quantum only) istead of
  415. * demoting -- and rely on duration as our sole demotion
  416. * qualification. This may, however, introduce DMA
  417. * issues (e.g. floppy DMA transfer overrun/underrun).
  418. */
  419. if ((pr->power.bm_activity & 0x1) &&
  420. cx->demotion.threshold.bm) {
  421. local_irq_enable();
  422. next_state = cx->demotion.state;
  423. goto end;
  424. }
  425. }
  426. #ifdef CONFIG_HOTPLUG_CPU
  427. /*
  428. * Check for P_LVL2_UP flag before entering C2 and above on
  429. * an SMP system. We do it here instead of doing it at _CST/P_LVL
  430. * detection phase, to work cleanly with logical CPU hotplug.
  431. */
  432. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  433. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  434. cx = &pr->power.states[ACPI_STATE_C1];
  435. #endif
  436. /*
  437. * Sleep:
  438. * ------
  439. * Invoke the current Cx state to put the processor to sleep.
  440. */
  441. if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
  442. current_thread_info()->status &= ~TS_POLLING;
  443. /*
  444. * TS_POLLING-cleared state must be visible before we
  445. * test NEED_RESCHED:
  446. */
  447. smp_mb();
  448. if (need_resched()) {
  449. current_thread_info()->status |= TS_POLLING;
  450. local_irq_enable();
  451. return;
  452. }
  453. }
  454. switch (cx->type) {
  455. case ACPI_STATE_C1:
  456. /*
  457. * Invoke C1.
  458. * Use the appropriate idle routine, the one that would
  459. * be used without acpi C-states.
  460. */
  461. if (pm_idle_save)
  462. pm_idle_save();
  463. else
  464. acpi_safe_halt();
  465. /*
  466. * TBD: Can't get time duration while in C1, as resumes
  467. * go to an ISR rather than here. Need to instrument
  468. * base interrupt handler.
  469. *
  470. * Note: the TSC better not stop in C1, sched_clock() will
  471. * skew otherwise.
  472. */
  473. sleep_ticks = 0xFFFFFFFF;
  474. if (irqs_disabled())
  475. local_irq_enable();
  476. break;
  477. case ACPI_STATE_C2:
  478. /* Get start time (ticks) */
  479. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  480. /* Tell the scheduler that we are going deep-idle: */
  481. sched_clock_idle_sleep_event();
  482. /* Invoke C2 */
  483. acpi_state_timer_broadcast(pr, cx, 1);
  484. acpi_cstate_enter(cx);
  485. /* Get end time (ticks) */
  486. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  487. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  488. /* TSC halts in C2, so notify users */
  489. if (tsc_halts_in_c(ACPI_STATE_C2))
  490. mark_tsc_unstable("possible TSC halt in C2");
  491. #endif
  492. /* Compute time (ticks) that we were actually asleep */
  493. sleep_ticks = ticks_elapsed(t1, t2);
  494. /* Tell the scheduler how much we idled: */
  495. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  496. /* Re-enable interrupts */
  497. local_irq_enable();
  498. /* Do not account our idle-switching overhead: */
  499. sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
  500. current_thread_info()->status |= TS_POLLING;
  501. acpi_state_timer_broadcast(pr, cx, 0);
  502. break;
  503. case ACPI_STATE_C3:
  504. acpi_unlazy_tlb(smp_processor_id());
  505. /*
  506. * Must be done before busmaster disable as we might
  507. * need to access HPET !
  508. */
  509. acpi_state_timer_broadcast(pr, cx, 1);
  510. /*
  511. * disable bus master
  512. * bm_check implies we need ARB_DIS
  513. * !bm_check implies we need cache flush
  514. * bm_control implies whether we can do ARB_DIS
  515. *
  516. * That leaves a case where bm_check is set and bm_control is
  517. * not set. In that case we cannot do much, we enter C3
  518. * without doing anything.
  519. */
  520. if (pr->flags.bm_check && pr->flags.bm_control) {
  521. if (atomic_inc_return(&c3_cpu_count) ==
  522. num_online_cpus()) {
  523. /*
  524. * All CPUs are trying to go to C3
  525. * Disable bus master arbitration
  526. */
  527. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  528. }
  529. } else if (!pr->flags.bm_check) {
  530. /* SMP with no shared cache... Invalidate cache */
  531. ACPI_FLUSH_CPU_CACHE();
  532. }
  533. /* Get start time (ticks) */
  534. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  535. /* Invoke C3 */
  536. /* Tell the scheduler that we are going deep-idle: */
  537. sched_clock_idle_sleep_event();
  538. acpi_cstate_enter(cx);
  539. /* Get end time (ticks) */
  540. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  541. if (pr->flags.bm_check && pr->flags.bm_control) {
  542. /* Enable bus master arbitration */
  543. atomic_dec(&c3_cpu_count);
  544. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  545. }
  546. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  547. /* TSC halts in C3, so notify users */
  548. if (tsc_halts_in_c(ACPI_STATE_C3))
  549. mark_tsc_unstable("TSC halts in C3");
  550. #endif
  551. /* Compute time (ticks) that we were actually asleep */
  552. sleep_ticks = ticks_elapsed(t1, t2);
  553. /* Tell the scheduler how much we idled: */
  554. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  555. /* Re-enable interrupts */
  556. local_irq_enable();
  557. /* Do not account our idle-switching overhead: */
  558. sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
  559. current_thread_info()->status |= TS_POLLING;
  560. acpi_state_timer_broadcast(pr, cx, 0);
  561. break;
  562. default:
  563. local_irq_enable();
  564. return;
  565. }
  566. cx->usage++;
  567. if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
  568. cx->time += sleep_ticks;
  569. next_state = pr->power.state;
  570. #ifdef CONFIG_HOTPLUG_CPU
  571. /* Don't do promotion/demotion */
  572. if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  573. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
  574. next_state = cx;
  575. goto end;
  576. }
  577. #endif
  578. /*
  579. * Promotion?
  580. * ----------
  581. * Track the number of longs (time asleep is greater than threshold)
  582. * and promote when the count threshold is reached. Note that bus
  583. * mastering activity may prevent promotions.
  584. * Do not promote above max_cstate.
  585. */
  586. if (cx->promotion.state &&
  587. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  588. if (sleep_ticks > cx->promotion.threshold.ticks &&
  589. cx->promotion.state->latency <=
  590. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  591. cx->promotion.count++;
  592. cx->demotion.count = 0;
  593. if (cx->promotion.count >=
  594. cx->promotion.threshold.count) {
  595. if (pr->flags.bm_check) {
  596. if (!
  597. (pr->power.bm_activity & cx->
  598. promotion.threshold.bm)) {
  599. next_state =
  600. cx->promotion.state;
  601. goto end;
  602. }
  603. } else {
  604. next_state = cx->promotion.state;
  605. goto end;
  606. }
  607. }
  608. }
  609. }
  610. /*
  611. * Demotion?
  612. * ---------
  613. * Track the number of shorts (time asleep is less than time threshold)
  614. * and demote when the usage threshold is reached.
  615. */
  616. if (cx->demotion.state) {
  617. if (sleep_ticks < cx->demotion.threshold.ticks) {
  618. cx->demotion.count++;
  619. cx->promotion.count = 0;
  620. if (cx->demotion.count >= cx->demotion.threshold.count) {
  621. next_state = cx->demotion.state;
  622. goto end;
  623. }
  624. }
  625. }
  626. end:
  627. /*
  628. * Demote if current state exceeds max_cstate
  629. * or if the latency of the current state is unacceptable
  630. */
  631. if ((pr->power.state - pr->power.states) > max_cstate ||
  632. pr->power.state->latency >
  633. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  634. if (cx->demotion.state)
  635. next_state = cx->demotion.state;
  636. }
  637. /*
  638. * New Cx State?
  639. * -------------
  640. * If we're going to start using a new Cx state we must clean up
  641. * from the previous and prepare to use the new.
  642. */
  643. if (next_state != pr->power.state)
  644. acpi_processor_power_activate(pr, next_state);
  645. }
  646. static int acpi_processor_set_power_policy(struct acpi_processor *pr)
  647. {
  648. unsigned int i;
  649. unsigned int state_is_set = 0;
  650. struct acpi_processor_cx *lower = NULL;
  651. struct acpi_processor_cx *higher = NULL;
  652. struct acpi_processor_cx *cx;
  653. if (!pr)
  654. return -EINVAL;
  655. /*
  656. * This function sets the default Cx state policy (OS idle handler).
  657. * Our scheme is to promote quickly to C2 but more conservatively
  658. * to C3. We're favoring C2 for its characteristics of low latency
  659. * (quick response), good power savings, and ability to allow bus
  660. * mastering activity. Note that the Cx state policy is completely
  661. * customizable and can be altered dynamically.
  662. */
  663. /* startup state */
  664. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  665. cx = &pr->power.states[i];
  666. if (!cx->valid)
  667. continue;
  668. if (!state_is_set)
  669. pr->power.state = cx;
  670. state_is_set++;
  671. break;
  672. }
  673. if (!state_is_set)
  674. return -ENODEV;
  675. /* demotion */
  676. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  677. cx = &pr->power.states[i];
  678. if (!cx->valid)
  679. continue;
  680. if (lower) {
  681. cx->demotion.state = lower;
  682. cx->demotion.threshold.ticks = cx->latency_ticks;
  683. cx->demotion.threshold.count = 1;
  684. if (cx->type == ACPI_STATE_C3)
  685. cx->demotion.threshold.bm = bm_history;
  686. }
  687. lower = cx;
  688. }
  689. /* promotion */
  690. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  691. cx = &pr->power.states[i];
  692. if (!cx->valid)
  693. continue;
  694. if (higher) {
  695. cx->promotion.state = higher;
  696. cx->promotion.threshold.ticks = cx->latency_ticks;
  697. if (cx->type >= ACPI_STATE_C2)
  698. cx->promotion.threshold.count = 4;
  699. else
  700. cx->promotion.threshold.count = 10;
  701. if (higher->type == ACPI_STATE_C3)
  702. cx->promotion.threshold.bm = bm_history;
  703. }
  704. higher = cx;
  705. }
  706. return 0;
  707. }
  708. #endif /* !CONFIG_CPU_IDLE */
  709. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  710. {
  711. if (!pr)
  712. return -EINVAL;
  713. if (!pr->pblk)
  714. return -ENODEV;
  715. /* if info is obtained from pblk/fadt, type equals state */
  716. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  717. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  718. #ifndef CONFIG_HOTPLUG_CPU
  719. /*
  720. * Check for P_LVL2_UP flag before entering C2 and above on
  721. * an SMP system.
  722. */
  723. if ((num_online_cpus() > 1) &&
  724. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  725. return -ENODEV;
  726. #endif
  727. /* determine C2 and C3 address from pblk */
  728. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  729. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  730. /* determine latencies from FADT */
  731. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
  732. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
  733. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  734. "lvl2[0x%08x] lvl3[0x%08x]\n",
  735. pr->power.states[ACPI_STATE_C2].address,
  736. pr->power.states[ACPI_STATE_C3].address));
  737. return 0;
  738. }
  739. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  740. {
  741. if (!pr->power.states[ACPI_STATE_C1].valid) {
  742. /* set the first C-State to C1 */
  743. /* all processors need to support C1 */
  744. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  745. pr->power.states[ACPI_STATE_C1].valid = 1;
  746. }
  747. /* the C0 state only exists as a filler in our array */
  748. pr->power.states[ACPI_STATE_C0].valid = 1;
  749. return 0;
  750. }
  751. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  752. {
  753. acpi_status status = 0;
  754. acpi_integer count;
  755. int current_count;
  756. int i;
  757. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  758. union acpi_object *cst;
  759. if (nocst)
  760. return -ENODEV;
  761. current_count = 0;
  762. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  763. if (ACPI_FAILURE(status)) {
  764. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  765. return -ENODEV;
  766. }
  767. cst = buffer.pointer;
  768. /* There must be at least 2 elements */
  769. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  770. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  771. status = -EFAULT;
  772. goto end;
  773. }
  774. count = cst->package.elements[0].integer.value;
  775. /* Validate number of power states. */
  776. if (count < 1 || count != cst->package.count - 1) {
  777. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  778. status = -EFAULT;
  779. goto end;
  780. }
  781. /* Tell driver that at least _CST is supported. */
  782. pr->flags.has_cst = 1;
  783. for (i = 1; i <= count; i++) {
  784. union acpi_object *element;
  785. union acpi_object *obj;
  786. struct acpi_power_register *reg;
  787. struct acpi_processor_cx cx;
  788. memset(&cx, 0, sizeof(cx));
  789. element = &(cst->package.elements[i]);
  790. if (element->type != ACPI_TYPE_PACKAGE)
  791. continue;
  792. if (element->package.count != 4)
  793. continue;
  794. obj = &(element->package.elements[0]);
  795. if (obj->type != ACPI_TYPE_BUFFER)
  796. continue;
  797. reg = (struct acpi_power_register *)obj->buffer.pointer;
  798. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  799. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  800. continue;
  801. /* There should be an easy way to extract an integer... */
  802. obj = &(element->package.elements[1]);
  803. if (obj->type != ACPI_TYPE_INTEGER)
  804. continue;
  805. cx.type = obj->integer.value;
  806. /*
  807. * Some buggy BIOSes won't list C1 in _CST -
  808. * Let acpi_processor_get_power_info_default() handle them later
  809. */
  810. if (i == 1 && cx.type != ACPI_STATE_C1)
  811. current_count++;
  812. cx.address = reg->address;
  813. cx.index = current_count + 1;
  814. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  815. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  816. if (acpi_processor_ffh_cstate_probe
  817. (pr->id, &cx, reg) == 0) {
  818. cx.entry_method = ACPI_CSTATE_FFH;
  819. } else if (cx.type == ACPI_STATE_C1) {
  820. /*
  821. * C1 is a special case where FIXED_HARDWARE
  822. * can be handled in non-MWAIT way as well.
  823. * In that case, save this _CST entry info.
  824. * Otherwise, ignore this info and continue.
  825. */
  826. cx.entry_method = ACPI_CSTATE_HALT;
  827. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  828. } else {
  829. continue;
  830. }
  831. } else {
  832. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  833. cx.address);
  834. }
  835. obj = &(element->package.elements[2]);
  836. if (obj->type != ACPI_TYPE_INTEGER)
  837. continue;
  838. cx.latency = obj->integer.value;
  839. obj = &(element->package.elements[3]);
  840. if (obj->type != ACPI_TYPE_INTEGER)
  841. continue;
  842. cx.power = obj->integer.value;
  843. current_count++;
  844. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  845. /*
  846. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  847. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  848. */
  849. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  850. printk(KERN_WARNING
  851. "Limiting number of power states to max (%d)\n",
  852. ACPI_PROCESSOR_MAX_POWER);
  853. printk(KERN_WARNING
  854. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  855. break;
  856. }
  857. }
  858. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  859. current_count));
  860. /* Validate number of power states discovered */
  861. if (current_count < 2)
  862. status = -EFAULT;
  863. end:
  864. kfree(buffer.pointer);
  865. return status;
  866. }
  867. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  868. {
  869. if (!cx->address)
  870. return;
  871. /*
  872. * C2 latency must be less than or equal to 100
  873. * microseconds.
  874. */
  875. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  876. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  877. "latency too large [%d]\n", cx->latency));
  878. return;
  879. }
  880. /*
  881. * Otherwise we've met all of our C2 requirements.
  882. * Normalize the C2 latency to expidite policy
  883. */
  884. cx->valid = 1;
  885. #ifndef CONFIG_CPU_IDLE
  886. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  887. #else
  888. cx->latency_ticks = cx->latency;
  889. #endif
  890. return;
  891. }
  892. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  893. struct acpi_processor_cx *cx)
  894. {
  895. static int bm_check_flag;
  896. if (!cx->address)
  897. return;
  898. /*
  899. * C3 latency must be less than or equal to 1000
  900. * microseconds.
  901. */
  902. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  903. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  904. "latency too large [%d]\n", cx->latency));
  905. return;
  906. }
  907. /*
  908. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  909. * DMA transfers are used by any ISA device to avoid livelock.
  910. * Note that we could disable Type-F DMA (as recommended by
  911. * the erratum), but this is known to disrupt certain ISA
  912. * devices thus we take the conservative approach.
  913. */
  914. else if (errata.piix4.fdma) {
  915. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  916. "C3 not supported on PIIX4 with Type-F DMA\n"));
  917. return;
  918. }
  919. /* All the logic here assumes flags.bm_check is same across all CPUs */
  920. if (!bm_check_flag) {
  921. /* Determine whether bm_check is needed based on CPU */
  922. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  923. bm_check_flag = pr->flags.bm_check;
  924. } else {
  925. pr->flags.bm_check = bm_check_flag;
  926. }
  927. if (pr->flags.bm_check) {
  928. if (!pr->flags.bm_control) {
  929. if (pr->flags.has_cst != 1) {
  930. /* bus mastering control is necessary */
  931. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  932. "C3 support requires BM control\n"));
  933. return;
  934. } else {
  935. /* Here we enter C3 without bus mastering */
  936. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  937. "C3 support without BM control\n"));
  938. }
  939. }
  940. } else {
  941. /*
  942. * WBINVD should be set in fadt, for C3 state to be
  943. * supported on when bm_check is not required.
  944. */
  945. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  946. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  947. "Cache invalidation should work properly"
  948. " for C3 to be enabled on SMP systems\n"));
  949. return;
  950. }
  951. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  952. }
  953. /*
  954. * Otherwise we've met all of our C3 requirements.
  955. * Normalize the C3 latency to expidite policy. Enable
  956. * checking of bus mastering status (bm_check) so we can
  957. * use this in our C3 policy
  958. */
  959. cx->valid = 1;
  960. #ifndef CONFIG_CPU_IDLE
  961. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  962. #else
  963. cx->latency_ticks = cx->latency;
  964. #endif
  965. return;
  966. }
  967. static int acpi_processor_power_verify(struct acpi_processor *pr)
  968. {
  969. unsigned int i;
  970. unsigned int working = 0;
  971. pr->power.timer_broadcast_on_state = INT_MAX;
  972. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  973. struct acpi_processor_cx *cx = &pr->power.states[i];
  974. switch (cx->type) {
  975. case ACPI_STATE_C1:
  976. cx->valid = 1;
  977. break;
  978. case ACPI_STATE_C2:
  979. acpi_processor_power_verify_c2(cx);
  980. if (cx->valid)
  981. acpi_timer_check_state(i, pr, cx);
  982. break;
  983. case ACPI_STATE_C3:
  984. acpi_processor_power_verify_c3(pr, cx);
  985. if (cx->valid)
  986. acpi_timer_check_state(i, pr, cx);
  987. break;
  988. }
  989. if (cx->valid)
  990. working++;
  991. }
  992. acpi_propagate_timer_broadcast(pr);
  993. return (working);
  994. }
  995. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  996. {
  997. unsigned int i;
  998. int result;
  999. /* NOTE: the idle thread may not be running while calling
  1000. * this function */
  1001. /* Zero initialize all the C-states info. */
  1002. memset(pr->power.states, 0, sizeof(pr->power.states));
  1003. result = acpi_processor_get_power_info_cst(pr);
  1004. if (result == -ENODEV)
  1005. result = acpi_processor_get_power_info_fadt(pr);
  1006. if (result)
  1007. return result;
  1008. acpi_processor_get_power_info_default(pr);
  1009. pr->power.count = acpi_processor_power_verify(pr);
  1010. #ifndef CONFIG_CPU_IDLE
  1011. /*
  1012. * Set Default Policy
  1013. * ------------------
  1014. * Now that we know which states are supported, set the default
  1015. * policy. Note that this policy can be changed dynamically
  1016. * (e.g. encourage deeper sleeps to conserve battery life when
  1017. * not on AC).
  1018. */
  1019. result = acpi_processor_set_power_policy(pr);
  1020. if (result)
  1021. return result;
  1022. #endif
  1023. /*
  1024. * if one state of type C2 or C3 is available, mark this
  1025. * CPU as being "idle manageable"
  1026. */
  1027. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  1028. if (pr->power.states[i].valid) {
  1029. pr->power.count = i;
  1030. if (pr->power.states[i].type >= ACPI_STATE_C2)
  1031. pr->flags.power = 1;
  1032. }
  1033. }
  1034. return 0;
  1035. }
  1036. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  1037. {
  1038. struct acpi_processor *pr = seq->private;
  1039. unsigned int i;
  1040. if (!pr)
  1041. goto end;
  1042. seq_printf(seq, "active state: C%zd\n"
  1043. "max_cstate: C%d\n"
  1044. "bus master activity: %08x\n"
  1045. "maximum allowed latency: %d usec\n",
  1046. pr->power.state ? pr->power.state - pr->power.states : 0,
  1047. max_cstate, (unsigned)pr->power.bm_activity,
  1048. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
  1049. seq_puts(seq, "states:\n");
  1050. for (i = 1; i <= pr->power.count; i++) {
  1051. seq_printf(seq, " %cC%d: ",
  1052. (&pr->power.states[i] ==
  1053. pr->power.state ? '*' : ' '), i);
  1054. if (!pr->power.states[i].valid) {
  1055. seq_puts(seq, "<not supported>\n");
  1056. continue;
  1057. }
  1058. switch (pr->power.states[i].type) {
  1059. case ACPI_STATE_C1:
  1060. seq_printf(seq, "type[C1] ");
  1061. break;
  1062. case ACPI_STATE_C2:
  1063. seq_printf(seq, "type[C2] ");
  1064. break;
  1065. case ACPI_STATE_C3:
  1066. seq_printf(seq, "type[C3] ");
  1067. break;
  1068. default:
  1069. seq_printf(seq, "type[--] ");
  1070. break;
  1071. }
  1072. if (pr->power.states[i].promotion.state)
  1073. seq_printf(seq, "promotion[C%zd] ",
  1074. (pr->power.states[i].promotion.state -
  1075. pr->power.states));
  1076. else
  1077. seq_puts(seq, "promotion[--] ");
  1078. if (pr->power.states[i].demotion.state)
  1079. seq_printf(seq, "demotion[C%zd] ",
  1080. (pr->power.states[i].demotion.state -
  1081. pr->power.states));
  1082. else
  1083. seq_puts(seq, "demotion[--] ");
  1084. seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
  1085. pr->power.states[i].latency,
  1086. pr->power.states[i].usage,
  1087. (unsigned long long)pr->power.states[i].time);
  1088. }
  1089. end:
  1090. return 0;
  1091. }
  1092. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  1093. {
  1094. return single_open(file, acpi_processor_power_seq_show,
  1095. PDE(inode)->data);
  1096. }
  1097. static const struct file_operations acpi_processor_power_fops = {
  1098. .open = acpi_processor_power_open_fs,
  1099. .read = seq_read,
  1100. .llseek = seq_lseek,
  1101. .release = single_release,
  1102. };
  1103. #ifndef CONFIG_CPU_IDLE
  1104. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1105. {
  1106. int result = 0;
  1107. if (!pr)
  1108. return -EINVAL;
  1109. if (nocst) {
  1110. return -ENODEV;
  1111. }
  1112. if (!pr->flags.power_setup_done)
  1113. return -ENODEV;
  1114. /* Fall back to the default idle loop */
  1115. pm_idle = pm_idle_save;
  1116. synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
  1117. pr->flags.power = 0;
  1118. result = acpi_processor_get_power_info(pr);
  1119. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  1120. pm_idle = acpi_processor_idle;
  1121. return result;
  1122. }
  1123. #ifdef CONFIG_SMP
  1124. static void smp_callback(void *v)
  1125. {
  1126. /* we already woke the CPU up, nothing more to do */
  1127. }
  1128. /*
  1129. * This function gets called when a part of the kernel has a new latency
  1130. * requirement. This means we need to get all processors out of their C-state,
  1131. * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
  1132. * wakes them all right up.
  1133. */
  1134. static int acpi_processor_latency_notify(struct notifier_block *b,
  1135. unsigned long l, void *v)
  1136. {
  1137. smp_call_function(smp_callback, NULL, 0, 1);
  1138. return NOTIFY_OK;
  1139. }
  1140. static struct notifier_block acpi_processor_latency_notifier = {
  1141. .notifier_call = acpi_processor_latency_notify,
  1142. };
  1143. #endif
  1144. #else /* CONFIG_CPU_IDLE */
  1145. /**
  1146. * acpi_idle_bm_check - checks if bus master activity was detected
  1147. */
  1148. static int acpi_idle_bm_check(void)
  1149. {
  1150. u32 bm_status = 0;
  1151. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  1152. if (bm_status)
  1153. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  1154. /*
  1155. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  1156. * the true state of bus mastering activity; forcing us to
  1157. * manually check the BMIDEA bit of each IDE channel.
  1158. */
  1159. else if (errata.piix4.bmisx) {
  1160. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  1161. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  1162. bm_status = 1;
  1163. }
  1164. return bm_status;
  1165. }
  1166. /**
  1167. * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
  1168. * @pr: the processor
  1169. * @target: the new target state
  1170. */
  1171. static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
  1172. struct acpi_processor_cx *target)
  1173. {
  1174. if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
  1175. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  1176. pr->flags.bm_rld_set = 0;
  1177. }
  1178. if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
  1179. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  1180. pr->flags.bm_rld_set = 1;
  1181. }
  1182. }
  1183. /**
  1184. * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  1185. * @cx: cstate data
  1186. *
  1187. * Caller disables interrupt before call and enables interrupt after return.
  1188. */
  1189. static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  1190. {
  1191. if (cx->entry_method == ACPI_CSTATE_FFH) {
  1192. /* Call into architectural FFH based C-state */
  1193. acpi_processor_ffh_cstate_enter(cx);
  1194. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  1195. acpi_safe_halt();
  1196. } else {
  1197. int unused;
  1198. /* IO port based C-state */
  1199. inb(cx->address);
  1200. /* Dummy wait op - must do something useless after P_LVL2 read
  1201. because chipsets cannot guarantee that STPCLK# signal
  1202. gets asserted in time to freeze execution properly. */
  1203. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1204. }
  1205. }
  1206. /**
  1207. * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  1208. * @dev: the target CPU
  1209. * @state: the state data
  1210. *
  1211. * This is equivalent to the HALT instruction.
  1212. */
  1213. static int acpi_idle_enter_c1(struct cpuidle_device *dev,
  1214. struct cpuidle_state *state)
  1215. {
  1216. u32 t1, t2;
  1217. struct acpi_processor *pr;
  1218. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1219. pr = processors[smp_processor_id()];
  1220. if (unlikely(!pr))
  1221. return 0;
  1222. local_irq_disable();
  1223. /* Do not access any ACPI IO ports in suspend path */
  1224. if (acpi_idle_suspend) {
  1225. acpi_safe_halt();
  1226. local_irq_enable();
  1227. return 0;
  1228. }
  1229. if (pr->flags.bm_check)
  1230. acpi_idle_update_bm_rld(pr, cx);
  1231. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1232. acpi_idle_do_entry(cx);
  1233. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1234. local_irq_enable();
  1235. cx->usage++;
  1236. return ticks_elapsed_in_us(t1, t2);
  1237. }
  1238. /**
  1239. * acpi_idle_enter_simple - enters an ACPI state without BM handling
  1240. * @dev: the target CPU
  1241. * @state: the state data
  1242. */
  1243. static int acpi_idle_enter_simple(struct cpuidle_device *dev,
  1244. struct cpuidle_state *state)
  1245. {
  1246. struct acpi_processor *pr;
  1247. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1248. u32 t1, t2;
  1249. int sleep_ticks = 0;
  1250. pr = processors[smp_processor_id()];
  1251. if (unlikely(!pr))
  1252. return 0;
  1253. if (acpi_idle_suspend)
  1254. return(acpi_idle_enter_c1(dev, state));
  1255. local_irq_disable();
  1256. current_thread_info()->status &= ~TS_POLLING;
  1257. /*
  1258. * TS_POLLING-cleared state must be visible before we test
  1259. * NEED_RESCHED:
  1260. */
  1261. smp_mb();
  1262. if (unlikely(need_resched())) {
  1263. current_thread_info()->status |= TS_POLLING;
  1264. local_irq_enable();
  1265. return 0;
  1266. }
  1267. /*
  1268. * Must be done before busmaster disable as we might need to
  1269. * access HPET !
  1270. */
  1271. acpi_state_timer_broadcast(pr, cx, 1);
  1272. if (pr->flags.bm_check)
  1273. acpi_idle_update_bm_rld(pr, cx);
  1274. if (cx->type == ACPI_STATE_C3)
  1275. ACPI_FLUSH_CPU_CACHE();
  1276. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1277. /* Tell the scheduler that we are going deep-idle: */
  1278. sched_clock_idle_sleep_event();
  1279. acpi_idle_do_entry(cx);
  1280. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1281. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1282. /* TSC could halt in idle, so notify users */
  1283. if (tsc_halts_in_c(cx->type))
  1284. mark_tsc_unstable("TSC halts in idle");;
  1285. #endif
  1286. sleep_ticks = ticks_elapsed(t1, t2);
  1287. /* Tell the scheduler how much we idled: */
  1288. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1289. local_irq_enable();
  1290. current_thread_info()->status |= TS_POLLING;
  1291. cx->usage++;
  1292. acpi_state_timer_broadcast(pr, cx, 0);
  1293. cx->time += sleep_ticks;
  1294. return ticks_elapsed_in_us(t1, t2);
  1295. }
  1296. static int c3_cpu_count;
  1297. static DEFINE_SPINLOCK(c3_lock);
  1298. /**
  1299. * acpi_idle_enter_bm - enters C3 with proper BM handling
  1300. * @dev: the target CPU
  1301. * @state: the state data
  1302. *
  1303. * If BM is detected, the deepest non-C3 idle state is entered instead.
  1304. */
  1305. static int acpi_idle_enter_bm(struct cpuidle_device *dev,
  1306. struct cpuidle_state *state)
  1307. {
  1308. struct acpi_processor *pr;
  1309. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1310. u32 t1, t2;
  1311. int sleep_ticks = 0;
  1312. pr = processors[smp_processor_id()];
  1313. if (unlikely(!pr))
  1314. return 0;
  1315. if (acpi_idle_suspend)
  1316. return(acpi_idle_enter_c1(dev, state));
  1317. if (acpi_idle_bm_check()) {
  1318. if (dev->safe_state) {
  1319. return dev->safe_state->enter(dev, dev->safe_state);
  1320. } else {
  1321. local_irq_disable();
  1322. acpi_safe_halt();
  1323. local_irq_enable();
  1324. return 0;
  1325. }
  1326. }
  1327. local_irq_disable();
  1328. current_thread_info()->status &= ~TS_POLLING;
  1329. /*
  1330. * TS_POLLING-cleared state must be visible before we test
  1331. * NEED_RESCHED:
  1332. */
  1333. smp_mb();
  1334. if (unlikely(need_resched())) {
  1335. current_thread_info()->status |= TS_POLLING;
  1336. local_irq_enable();
  1337. return 0;
  1338. }
  1339. acpi_unlazy_tlb(smp_processor_id());
  1340. /* Tell the scheduler that we are going deep-idle: */
  1341. sched_clock_idle_sleep_event();
  1342. /*
  1343. * Must be done before busmaster disable as we might need to
  1344. * access HPET !
  1345. */
  1346. acpi_state_timer_broadcast(pr, cx, 1);
  1347. acpi_idle_update_bm_rld(pr, cx);
  1348. /*
  1349. * disable bus master
  1350. * bm_check implies we need ARB_DIS
  1351. * !bm_check implies we need cache flush
  1352. * bm_control implies whether we can do ARB_DIS
  1353. *
  1354. * That leaves a case where bm_check is set and bm_control is
  1355. * not set. In that case we cannot do much, we enter C3
  1356. * without doing anything.
  1357. */
  1358. if (pr->flags.bm_check && pr->flags.bm_control) {
  1359. spin_lock(&c3_lock);
  1360. c3_cpu_count++;
  1361. /* Disable bus master arbitration when all CPUs are in C3 */
  1362. if (c3_cpu_count == num_online_cpus())
  1363. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  1364. spin_unlock(&c3_lock);
  1365. } else if (!pr->flags.bm_check) {
  1366. ACPI_FLUSH_CPU_CACHE();
  1367. }
  1368. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1369. acpi_idle_do_entry(cx);
  1370. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1371. /* Re-enable bus master arbitration */
  1372. if (pr->flags.bm_check && pr->flags.bm_control) {
  1373. spin_lock(&c3_lock);
  1374. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  1375. c3_cpu_count--;
  1376. spin_unlock(&c3_lock);
  1377. }
  1378. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1379. /* TSC could halt in idle, so notify users */
  1380. if (tsc_halts_in_c(ACPI_STATE_C3))
  1381. mark_tsc_unstable("TSC halts in idle");
  1382. #endif
  1383. sleep_ticks = ticks_elapsed(t1, t2);
  1384. /* Tell the scheduler how much we idled: */
  1385. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1386. local_irq_enable();
  1387. current_thread_info()->status |= TS_POLLING;
  1388. cx->usage++;
  1389. acpi_state_timer_broadcast(pr, cx, 0);
  1390. cx->time += sleep_ticks;
  1391. return ticks_elapsed_in_us(t1, t2);
  1392. }
  1393. struct cpuidle_driver acpi_idle_driver = {
  1394. .name = "acpi_idle",
  1395. .owner = THIS_MODULE,
  1396. };
  1397. /**
  1398. * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
  1399. * @pr: the ACPI processor
  1400. */
  1401. static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
  1402. {
  1403. int i, count = CPUIDLE_DRIVER_STATE_START;
  1404. struct acpi_processor_cx *cx;
  1405. struct cpuidle_state *state;
  1406. struct cpuidle_device *dev = &pr->power.dev;
  1407. if (!pr->flags.power_setup_done)
  1408. return -EINVAL;
  1409. if (pr->flags.power == 0) {
  1410. return -EINVAL;
  1411. }
  1412. for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
  1413. dev->states[i].name[0] = '\0';
  1414. dev->states[i].desc[0] = '\0';
  1415. }
  1416. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  1417. cx = &pr->power.states[i];
  1418. state = &dev->states[count];
  1419. if (!cx->valid)
  1420. continue;
  1421. #ifdef CONFIG_HOTPLUG_CPU
  1422. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  1423. !pr->flags.has_cst &&
  1424. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  1425. continue;
  1426. #endif
  1427. cpuidle_set_statedata(state, cx);
  1428. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  1429. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  1430. state->exit_latency = cx->latency;
  1431. state->target_residency = cx->latency * latency_factor;
  1432. state->power_usage = cx->power;
  1433. state->flags = 0;
  1434. switch (cx->type) {
  1435. case ACPI_STATE_C1:
  1436. state->flags |= CPUIDLE_FLAG_SHALLOW;
  1437. if (cx->entry_method == ACPI_CSTATE_FFH)
  1438. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1439. state->enter = acpi_idle_enter_c1;
  1440. dev->safe_state = state;
  1441. break;
  1442. case ACPI_STATE_C2:
  1443. state->flags |= CPUIDLE_FLAG_BALANCED;
  1444. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1445. state->enter = acpi_idle_enter_simple;
  1446. dev->safe_state = state;
  1447. break;
  1448. case ACPI_STATE_C3:
  1449. state->flags |= CPUIDLE_FLAG_DEEP;
  1450. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1451. state->flags |= CPUIDLE_FLAG_CHECK_BM;
  1452. state->enter = pr->flags.bm_check ?
  1453. acpi_idle_enter_bm :
  1454. acpi_idle_enter_simple;
  1455. break;
  1456. }
  1457. count++;
  1458. if (count == CPUIDLE_STATE_MAX)
  1459. break;
  1460. }
  1461. dev->state_count = count;
  1462. if (!count)
  1463. return -EINVAL;
  1464. return 0;
  1465. }
  1466. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1467. {
  1468. int ret;
  1469. if (!pr)
  1470. return -EINVAL;
  1471. if (nocst) {
  1472. return -ENODEV;
  1473. }
  1474. if (!pr->flags.power_setup_done)
  1475. return -ENODEV;
  1476. cpuidle_pause_and_lock();
  1477. cpuidle_disable_device(&pr->power.dev);
  1478. acpi_processor_get_power_info(pr);
  1479. acpi_processor_setup_cpuidle(pr);
  1480. ret = cpuidle_enable_device(&pr->power.dev);
  1481. cpuidle_resume_and_unlock();
  1482. return ret;
  1483. }
  1484. #endif /* CONFIG_CPU_IDLE */
  1485. int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
  1486. struct acpi_device *device)
  1487. {
  1488. acpi_status status = 0;
  1489. static int first_run;
  1490. struct proc_dir_entry *entry = NULL;
  1491. unsigned int i;
  1492. if (!first_run) {
  1493. dmi_check_system(processor_power_dmi_table);
  1494. max_cstate = acpi_processor_cstate_check(max_cstate);
  1495. if (max_cstate < ACPI_C_STATES_MAX)
  1496. printk(KERN_NOTICE
  1497. "ACPI: processor limited to max C-state %d\n",
  1498. max_cstate);
  1499. first_run++;
  1500. #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
  1501. pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
  1502. &acpi_processor_latency_notifier);
  1503. #endif
  1504. }
  1505. if (!pr)
  1506. return -EINVAL;
  1507. if (acpi_gbl_FADT.cst_control && !nocst) {
  1508. status =
  1509. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  1510. if (ACPI_FAILURE(status)) {
  1511. ACPI_EXCEPTION((AE_INFO, status,
  1512. "Notifying BIOS of _CST ability failed"));
  1513. }
  1514. }
  1515. acpi_processor_get_power_info(pr);
  1516. pr->flags.power_setup_done = 1;
  1517. /*
  1518. * Install the idle handler if processor power management is supported.
  1519. * Note that we use previously set idle handler will be used on
  1520. * platforms that only support C1.
  1521. */
  1522. if ((pr->flags.power) && (!boot_option_idle_override)) {
  1523. #ifdef CONFIG_CPU_IDLE
  1524. acpi_processor_setup_cpuidle(pr);
  1525. pr->power.dev.cpu = pr->id;
  1526. if (cpuidle_register_device(&pr->power.dev))
  1527. return -EIO;
  1528. #endif
  1529. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  1530. for (i = 1; i <= pr->power.count; i++)
  1531. if (pr->power.states[i].valid)
  1532. printk(" C%d[C%d]", i,
  1533. pr->power.states[i].type);
  1534. printk(")\n");
  1535. #ifndef CONFIG_CPU_IDLE
  1536. if (pr->id == 0) {
  1537. pm_idle_save = pm_idle;
  1538. pm_idle = acpi_processor_idle;
  1539. }
  1540. #endif
  1541. }
  1542. /* 'power' [R] */
  1543. entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  1544. S_IRUGO, acpi_device_dir(device));
  1545. if (!entry)
  1546. return -EIO;
  1547. else {
  1548. entry->proc_fops = &acpi_processor_power_fops;
  1549. entry->data = acpi_driver_data(device);
  1550. entry->owner = THIS_MODULE;
  1551. }
  1552. return 0;
  1553. }
  1554. int acpi_processor_power_exit(struct acpi_processor *pr,
  1555. struct acpi_device *device)
  1556. {
  1557. #ifdef CONFIG_CPU_IDLE
  1558. if ((pr->flags.power) && (!boot_option_idle_override))
  1559. cpuidle_unregister_device(&pr->power.dev);
  1560. #endif
  1561. pr->flags.power_setup_done = 0;
  1562. if (acpi_device_dir(device))
  1563. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  1564. acpi_device_dir(device));
  1565. #ifndef CONFIG_CPU_IDLE
  1566. /* Unregister the idle handler when processor #0 is removed. */
  1567. if (pr->id == 0) {
  1568. pm_idle = pm_idle_save;
  1569. /*
  1570. * We are about to unload the current idle thread pm callback
  1571. * (pm_idle), Wait for all processors to update cached/local
  1572. * copies of pm_idle before proceeding.
  1573. */
  1574. cpu_idle_wait();
  1575. #ifdef CONFIG_SMP
  1576. pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
  1577. &acpi_processor_latency_notifier);
  1578. #endif
  1579. }
  1580. #endif
  1581. return 0;
  1582. }