processor_idle.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/sched.h> /* need_resched() */
  40. #include <linux/pm_qos_params.h>
  41. #include <linux/clockchips.h>
  42. #include <linux/cpuidle.h>
  43. /*
  44. * Include the apic definitions for x86 to have the APIC timer related defines
  45. * available also for UP (on SMP it gets magically included via linux/smp.h).
  46. * asm/acpi.h is not an option, as it would require more include magic. Also
  47. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  48. */
  49. #ifdef CONFIG_X86
  50. #include <asm/apic.h>
  51. #endif
  52. #include <asm/io.h>
  53. #include <asm/uaccess.h>
  54. #include <acpi/acpi_bus.h>
  55. #include <acpi/processor.h>
  56. #include <asm/processor.h>
  57. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  58. #define ACPI_PROCESSOR_CLASS "processor"
  59. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  60. ACPI_MODULE_NAME("processor_idle");
  61. #define ACPI_PROCESSOR_FILE_POWER "power"
  62. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  63. #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
  64. #ifndef CONFIG_CPU_IDLE
  65. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  66. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  67. static void (*pm_idle_save) (void) __read_mostly;
  68. #else
  69. #define C2_OVERHEAD 1 /* 1us */
  70. #define C3_OVERHEAD 1 /* 1us */
  71. #endif
  72. #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
  73. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  74. #ifdef CONFIG_CPU_IDLE
  75. module_param(max_cstate, uint, 0000);
  76. #else
  77. module_param(max_cstate, uint, 0644);
  78. #endif
  79. static unsigned int nocst __read_mostly;
  80. module_param(nocst, uint, 0000);
  81. #ifndef CONFIG_CPU_IDLE
  82. /*
  83. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  84. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  85. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  86. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  87. * reduce history for more aggressive entry into C3
  88. */
  89. static unsigned int bm_history __read_mostly =
  90. (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  91. module_param(bm_history, uint, 0644);
  92. static int acpi_processor_set_power_policy(struct acpi_processor *pr);
  93. #else /* CONFIG_CPU_IDLE */
  94. static unsigned int latency_factor __read_mostly = 2;
  95. module_param(latency_factor, uint, 0644);
  96. #endif
  97. /*
  98. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  99. * For now disable this. Probably a bug somewhere else.
  100. *
  101. * To skip this limit, boot/load with a large max_cstate limit.
  102. */
  103. static int set_max_cstate(const struct dmi_system_id *id)
  104. {
  105. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  106. return 0;
  107. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  108. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  109. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  110. max_cstate = (long)id->driver_data;
  111. return 0;
  112. }
  113. /* Actually this shouldn't be __cpuinitdata, would be better to fix the
  114. callers to only run once -AK */
  115. static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
  116. { set_max_cstate, "IBM ThinkPad R40e", {
  117. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  118. DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
  119. { set_max_cstate, "IBM ThinkPad R40e", {
  120. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  121. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
  122. { set_max_cstate, "IBM ThinkPad R40e", {
  123. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  124. DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
  125. { set_max_cstate, "IBM ThinkPad R40e", {
  126. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  127. DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
  128. { set_max_cstate, "IBM ThinkPad R40e", {
  129. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  130. DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
  131. { set_max_cstate, "IBM ThinkPad R40e", {
  132. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  133. DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
  134. { set_max_cstate, "IBM ThinkPad R40e", {
  135. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  136. DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
  137. { set_max_cstate, "IBM ThinkPad R40e", {
  138. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  139. DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
  140. { set_max_cstate, "IBM ThinkPad R40e", {
  141. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  142. DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
  143. { set_max_cstate, "IBM ThinkPad R40e", {
  144. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  145. DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
  146. { set_max_cstate, "IBM ThinkPad R40e", {
  147. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  148. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
  149. { set_max_cstate, "IBM ThinkPad R40e", {
  150. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  151. DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
  152. { set_max_cstate, "IBM ThinkPad R40e", {
  153. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  154. DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
  155. { set_max_cstate, "IBM ThinkPad R40e", {
  156. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  157. DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
  158. { set_max_cstate, "IBM ThinkPad R40e", {
  159. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  160. DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
  161. { set_max_cstate, "IBM ThinkPad R40e", {
  162. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  163. DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
  164. { set_max_cstate, "Medion 41700", {
  165. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  166. DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
  167. { set_max_cstate, "Clevo 5600D", {
  168. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  169. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  170. (void *)2},
  171. {},
  172. };
  173. static inline u32 ticks_elapsed(u32 t1, u32 t2)
  174. {
  175. if (t2 >= t1)
  176. return (t2 - t1);
  177. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  178. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  179. else
  180. return ((0xFFFFFFFF - t1) + t2);
  181. }
  182. static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
  183. {
  184. if (t2 >= t1)
  185. return PM_TIMER_TICKS_TO_US(t2 - t1);
  186. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  187. return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  188. else
  189. return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
  190. }
  191. /*
  192. * Callers should disable interrupts before the call and enable
  193. * interrupts after return.
  194. */
  195. static void acpi_safe_halt(void)
  196. {
  197. current_thread_info()->status &= ~TS_POLLING;
  198. /*
  199. * TS_POLLING-cleared state must be visible before we
  200. * test NEED_RESCHED:
  201. */
  202. smp_mb();
  203. if (!need_resched()) {
  204. safe_halt();
  205. local_irq_disable();
  206. }
  207. current_thread_info()->status |= TS_POLLING;
  208. }
  209. #ifndef CONFIG_CPU_IDLE
  210. static void
  211. acpi_processor_power_activate(struct acpi_processor *pr,
  212. struct acpi_processor_cx *new)
  213. {
  214. struct acpi_processor_cx *old;
  215. if (!pr || !new)
  216. return;
  217. old = pr->power.state;
  218. if (old)
  219. old->promotion.count = 0;
  220. new->demotion.count = 0;
  221. /* Cleanup from old state. */
  222. if (old) {
  223. switch (old->type) {
  224. case ACPI_STATE_C3:
  225. /* Disable bus master reload */
  226. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  227. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  228. break;
  229. }
  230. }
  231. /* Prepare to use new state. */
  232. switch (new->type) {
  233. case ACPI_STATE_C3:
  234. /* Enable bus master reload */
  235. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  236. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  237. break;
  238. }
  239. pr->power.state = new;
  240. return;
  241. }
  242. static atomic_t c3_cpu_count;
  243. /* Common C-state entry for C2, C3, .. */
  244. static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
  245. {
  246. /* Don't trace irqs off for idle */
  247. stop_critical_timings();
  248. if (cstate->entry_method == ACPI_CSTATE_FFH) {
  249. /* Call into architectural FFH based C-state */
  250. acpi_processor_ffh_cstate_enter(cstate);
  251. } else {
  252. int unused;
  253. /* IO port based C-state */
  254. inb(cstate->address);
  255. /* Dummy wait op - must do something useless after P_LVL2 read
  256. because chipsets cannot guarantee that STPCLK# signal
  257. gets asserted in time to freeze execution properly. */
  258. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  259. }
  260. start_critical_timings();
  261. }
  262. #endif /* !CONFIG_CPU_IDLE */
  263. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  264. /*
  265. * Some BIOS implementations switch to C3 in the published C2 state.
  266. * This seems to be a common problem on AMD boxen, but other vendors
  267. * are affected too. We pick the most conservative approach: we assume
  268. * that the local APIC stops in both C2 and C3.
  269. */
  270. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  271. struct acpi_processor_cx *cx)
  272. {
  273. struct acpi_processor_power *pwr = &pr->power;
  274. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  275. /*
  276. * Check, if one of the previous states already marked the lapic
  277. * unstable
  278. */
  279. if (pwr->timer_broadcast_on_state < state)
  280. return;
  281. if (cx->type >= type)
  282. pr->power.timer_broadcast_on_state = state;
  283. }
  284. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
  285. {
  286. unsigned long reason;
  287. reason = pr->power.timer_broadcast_on_state < INT_MAX ?
  288. CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  289. clockevents_notify(reason, &pr->id);
  290. }
  291. /* Power(C) State timer broadcast control */
  292. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  293. struct acpi_processor_cx *cx,
  294. int broadcast)
  295. {
  296. int state = cx - pr->power.states;
  297. if (state >= pr->power.timer_broadcast_on_state) {
  298. unsigned long reason;
  299. reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
  300. CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
  301. clockevents_notify(reason, &pr->id);
  302. }
  303. }
  304. #else
  305. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  306. struct acpi_processor_cx *cstate) { }
  307. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
  308. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  309. struct acpi_processor_cx *cx,
  310. int broadcast)
  311. {
  312. }
  313. #endif
  314. /*
  315. * Suspend / resume control
  316. */
  317. static int acpi_idle_suspend;
  318. int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
  319. {
  320. acpi_idle_suspend = 1;
  321. return 0;
  322. }
  323. int acpi_processor_resume(struct acpi_device * device)
  324. {
  325. acpi_idle_suspend = 0;
  326. return 0;
  327. }
  328. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  329. static int tsc_halts_in_c(int state)
  330. {
  331. switch (boot_cpu_data.x86_vendor) {
  332. case X86_VENDOR_AMD:
  333. /*
  334. * AMD Fam10h TSC will tick in all
  335. * C/P/S0/S1 states when this bit is set.
  336. */
  337. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  338. return 0;
  339. /*FALL THROUGH*/
  340. case X86_VENDOR_INTEL:
  341. /* Several cases known where TSC halts in C2 too */
  342. default:
  343. return state > ACPI_STATE_C1;
  344. }
  345. }
  346. #endif
  347. #ifndef CONFIG_CPU_IDLE
  348. static void acpi_processor_idle(void)
  349. {
  350. struct acpi_processor *pr = NULL;
  351. struct acpi_processor_cx *cx = NULL;
  352. struct acpi_processor_cx *next_state = NULL;
  353. int sleep_ticks = 0;
  354. u32 t1, t2 = 0;
  355. /*
  356. * Interrupts must be disabled during bus mastering calculations and
  357. * for C2/C3 transitions.
  358. */
  359. local_irq_disable();
  360. pr = __get_cpu_var(processors);
  361. if (!pr) {
  362. local_irq_enable();
  363. return;
  364. }
  365. /*
  366. * Check whether we truly need to go idle, or should
  367. * reschedule:
  368. */
  369. if (unlikely(need_resched())) {
  370. local_irq_enable();
  371. return;
  372. }
  373. cx = pr->power.state;
  374. if (!cx || acpi_idle_suspend) {
  375. if (pm_idle_save) {
  376. pm_idle_save(); /* enables IRQs */
  377. } else {
  378. acpi_safe_halt();
  379. local_irq_enable();
  380. }
  381. return;
  382. }
  383. /*
  384. * Check BM Activity
  385. * -----------------
  386. * Check for bus mastering activity (if required), record, and check
  387. * for demotion.
  388. */
  389. if (pr->flags.bm_check) {
  390. u32 bm_status = 0;
  391. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  392. if (diff > 31)
  393. diff = 31;
  394. pr->power.bm_activity <<= diff;
  395. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  396. if (bm_status) {
  397. pr->power.bm_activity |= 0x1;
  398. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  399. }
  400. /*
  401. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  402. * the true state of bus mastering activity; forcing us to
  403. * manually check the BMIDEA bit of each IDE channel.
  404. */
  405. else if (errata.piix4.bmisx) {
  406. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  407. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  408. pr->power.bm_activity |= 0x1;
  409. }
  410. pr->power.bm_check_timestamp = jiffies;
  411. /*
  412. * If bus mastering is or was active this jiffy, demote
  413. * to avoid a faulty transition. Note that the processor
  414. * won't enter a low-power state during this call (to this
  415. * function) but should upon the next.
  416. *
  417. * TBD: A better policy might be to fallback to the demotion
  418. * state (use it for this quantum only) istead of
  419. * demoting -- and rely on duration as our sole demotion
  420. * qualification. This may, however, introduce DMA
  421. * issues (e.g. floppy DMA transfer overrun/underrun).
  422. */
  423. if ((pr->power.bm_activity & 0x1) &&
  424. cx->demotion.threshold.bm) {
  425. local_irq_enable();
  426. next_state = cx->demotion.state;
  427. goto end;
  428. }
  429. }
  430. #ifdef CONFIG_HOTPLUG_CPU
  431. /*
  432. * Check for P_LVL2_UP flag before entering C2 and above on
  433. * an SMP system. We do it here instead of doing it at _CST/P_LVL
  434. * detection phase, to work cleanly with logical CPU hotplug.
  435. */
  436. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  437. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  438. cx = &pr->power.states[ACPI_STATE_C1];
  439. #endif
  440. /*
  441. * Sleep:
  442. * ------
  443. * Invoke the current Cx state to put the processor to sleep.
  444. */
  445. if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
  446. current_thread_info()->status &= ~TS_POLLING;
  447. /*
  448. * TS_POLLING-cleared state must be visible before we
  449. * test NEED_RESCHED:
  450. */
  451. smp_mb();
  452. if (need_resched()) {
  453. current_thread_info()->status |= TS_POLLING;
  454. local_irq_enable();
  455. return;
  456. }
  457. }
  458. switch (cx->type) {
  459. case ACPI_STATE_C1:
  460. /*
  461. * Invoke C1.
  462. * Use the appropriate idle routine, the one that would
  463. * be used without acpi C-states.
  464. */
  465. if (pm_idle_save) {
  466. pm_idle_save(); /* enables IRQs */
  467. } else {
  468. acpi_safe_halt();
  469. local_irq_enable();
  470. }
  471. /*
  472. * TBD: Can't get time duration while in C1, as resumes
  473. * go to an ISR rather than here. Need to instrument
  474. * base interrupt handler.
  475. *
  476. * Note: the TSC better not stop in C1, sched_clock() will
  477. * skew otherwise.
  478. */
  479. sleep_ticks = 0xFFFFFFFF;
  480. break;
  481. case ACPI_STATE_C2:
  482. /* Get start time (ticks) */
  483. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  484. /* Tell the scheduler that we are going deep-idle: */
  485. sched_clock_idle_sleep_event();
  486. /* Invoke C2 */
  487. acpi_state_timer_broadcast(pr, cx, 1);
  488. acpi_cstate_enter(cx);
  489. /* Get end time (ticks) */
  490. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  491. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  492. /* TSC halts in C2, so notify users */
  493. if (tsc_halts_in_c(ACPI_STATE_C2))
  494. mark_tsc_unstable("possible TSC halt in C2");
  495. #endif
  496. /* Compute time (ticks) that we were actually asleep */
  497. sleep_ticks = ticks_elapsed(t1, t2);
  498. /* Tell the scheduler how much we idled: */
  499. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  500. /* Re-enable interrupts */
  501. local_irq_enable();
  502. /* Do not account our idle-switching overhead: */
  503. sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
  504. current_thread_info()->status |= TS_POLLING;
  505. acpi_state_timer_broadcast(pr, cx, 0);
  506. break;
  507. case ACPI_STATE_C3:
  508. acpi_unlazy_tlb(smp_processor_id());
  509. /*
  510. * Must be done before busmaster disable as we might
  511. * need to access HPET !
  512. */
  513. acpi_state_timer_broadcast(pr, cx, 1);
  514. /*
  515. * disable bus master
  516. * bm_check implies we need ARB_DIS
  517. * !bm_check implies we need cache flush
  518. * bm_control implies whether we can do ARB_DIS
  519. *
  520. * That leaves a case where bm_check is set and bm_control is
  521. * not set. In that case we cannot do much, we enter C3
  522. * without doing anything.
  523. */
  524. if (pr->flags.bm_check && pr->flags.bm_control) {
  525. if (atomic_inc_return(&c3_cpu_count) ==
  526. num_online_cpus()) {
  527. /*
  528. * All CPUs are trying to go to C3
  529. * Disable bus master arbitration
  530. */
  531. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  532. }
  533. } else if (!pr->flags.bm_check) {
  534. /* SMP with no shared cache... Invalidate cache */
  535. ACPI_FLUSH_CPU_CACHE();
  536. }
  537. /* Get start time (ticks) */
  538. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  539. /* Invoke C3 */
  540. /* Tell the scheduler that we are going deep-idle: */
  541. sched_clock_idle_sleep_event();
  542. acpi_cstate_enter(cx);
  543. /* Get end time (ticks) */
  544. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  545. if (pr->flags.bm_check && pr->flags.bm_control) {
  546. /* Enable bus master arbitration */
  547. atomic_dec(&c3_cpu_count);
  548. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  549. }
  550. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  551. /* TSC halts in C3, so notify users */
  552. if (tsc_halts_in_c(ACPI_STATE_C3))
  553. mark_tsc_unstable("TSC halts in C3");
  554. #endif
  555. /* Compute time (ticks) that we were actually asleep */
  556. sleep_ticks = ticks_elapsed(t1, t2);
  557. /* Tell the scheduler how much we idled: */
  558. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  559. /* Re-enable interrupts */
  560. local_irq_enable();
  561. /* Do not account our idle-switching overhead: */
  562. sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
  563. current_thread_info()->status |= TS_POLLING;
  564. acpi_state_timer_broadcast(pr, cx, 0);
  565. break;
  566. default:
  567. local_irq_enable();
  568. return;
  569. }
  570. cx->usage++;
  571. if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
  572. cx->time += sleep_ticks;
  573. next_state = pr->power.state;
  574. #ifdef CONFIG_HOTPLUG_CPU
  575. /* Don't do promotion/demotion */
  576. if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  577. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
  578. next_state = cx;
  579. goto end;
  580. }
  581. #endif
  582. /*
  583. * Promotion?
  584. * ----------
  585. * Track the number of longs (time asleep is greater than threshold)
  586. * and promote when the count threshold is reached. Note that bus
  587. * mastering activity may prevent promotions.
  588. * Do not promote above max_cstate.
  589. */
  590. if (cx->promotion.state &&
  591. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  592. if (sleep_ticks > cx->promotion.threshold.ticks &&
  593. cx->promotion.state->latency <=
  594. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  595. cx->promotion.count++;
  596. cx->demotion.count = 0;
  597. if (cx->promotion.count >=
  598. cx->promotion.threshold.count) {
  599. if (pr->flags.bm_check) {
  600. if (!
  601. (pr->power.bm_activity & cx->
  602. promotion.threshold.bm)) {
  603. next_state =
  604. cx->promotion.state;
  605. goto end;
  606. }
  607. } else {
  608. next_state = cx->promotion.state;
  609. goto end;
  610. }
  611. }
  612. }
  613. }
  614. /*
  615. * Demotion?
  616. * ---------
  617. * Track the number of shorts (time asleep is less than time threshold)
  618. * and demote when the usage threshold is reached.
  619. */
  620. if (cx->demotion.state) {
  621. if (sleep_ticks < cx->demotion.threshold.ticks) {
  622. cx->demotion.count++;
  623. cx->promotion.count = 0;
  624. if (cx->demotion.count >= cx->demotion.threshold.count) {
  625. next_state = cx->demotion.state;
  626. goto end;
  627. }
  628. }
  629. }
  630. end:
  631. /*
  632. * Demote if current state exceeds max_cstate
  633. * or if the latency of the current state is unacceptable
  634. */
  635. if ((pr->power.state - pr->power.states) > max_cstate ||
  636. pr->power.state->latency >
  637. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  638. if (cx->demotion.state)
  639. next_state = cx->demotion.state;
  640. }
  641. /*
  642. * New Cx State?
  643. * -------------
  644. * If we're going to start using a new Cx state we must clean up
  645. * from the previous and prepare to use the new.
  646. */
  647. if (next_state != pr->power.state)
  648. acpi_processor_power_activate(pr, next_state);
  649. }
  650. static int acpi_processor_set_power_policy(struct acpi_processor *pr)
  651. {
  652. unsigned int i;
  653. unsigned int state_is_set = 0;
  654. struct acpi_processor_cx *lower = NULL;
  655. struct acpi_processor_cx *higher = NULL;
  656. struct acpi_processor_cx *cx;
  657. if (!pr)
  658. return -EINVAL;
  659. /*
  660. * This function sets the default Cx state policy (OS idle handler).
  661. * Our scheme is to promote quickly to C2 but more conservatively
  662. * to C3. We're favoring C2 for its characteristics of low latency
  663. * (quick response), good power savings, and ability to allow bus
  664. * mastering activity. Note that the Cx state policy is completely
  665. * customizable and can be altered dynamically.
  666. */
  667. /* startup state */
  668. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  669. cx = &pr->power.states[i];
  670. if (!cx->valid)
  671. continue;
  672. if (!state_is_set)
  673. pr->power.state = cx;
  674. state_is_set++;
  675. break;
  676. }
  677. if (!state_is_set)
  678. return -ENODEV;
  679. /* demotion */
  680. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  681. cx = &pr->power.states[i];
  682. if (!cx->valid)
  683. continue;
  684. if (lower) {
  685. cx->demotion.state = lower;
  686. cx->demotion.threshold.ticks = cx->latency_ticks;
  687. cx->demotion.threshold.count = 1;
  688. if (cx->type == ACPI_STATE_C3)
  689. cx->demotion.threshold.bm = bm_history;
  690. }
  691. lower = cx;
  692. }
  693. /* promotion */
  694. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  695. cx = &pr->power.states[i];
  696. if (!cx->valid)
  697. continue;
  698. if (higher) {
  699. cx->promotion.state = higher;
  700. cx->promotion.threshold.ticks = cx->latency_ticks;
  701. if (cx->type >= ACPI_STATE_C2)
  702. cx->promotion.threshold.count = 4;
  703. else
  704. cx->promotion.threshold.count = 10;
  705. if (higher->type == ACPI_STATE_C3)
  706. cx->promotion.threshold.bm = bm_history;
  707. }
  708. higher = cx;
  709. }
  710. return 0;
  711. }
  712. #endif /* !CONFIG_CPU_IDLE */
  713. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  714. {
  715. if (!pr)
  716. return -EINVAL;
  717. if (!pr->pblk)
  718. return -ENODEV;
  719. /* if info is obtained from pblk/fadt, type equals state */
  720. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  721. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  722. #ifndef CONFIG_HOTPLUG_CPU
  723. /*
  724. * Check for P_LVL2_UP flag before entering C2 and above on
  725. * an SMP system.
  726. */
  727. if ((num_online_cpus() > 1) &&
  728. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  729. return -ENODEV;
  730. #endif
  731. /* determine C2 and C3 address from pblk */
  732. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  733. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  734. /* determine latencies from FADT */
  735. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
  736. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
  737. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  738. "lvl2[0x%08x] lvl3[0x%08x]\n",
  739. pr->power.states[ACPI_STATE_C2].address,
  740. pr->power.states[ACPI_STATE_C3].address));
  741. return 0;
  742. }
  743. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  744. {
  745. if (!pr->power.states[ACPI_STATE_C1].valid) {
  746. /* set the first C-State to C1 */
  747. /* all processors need to support C1 */
  748. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  749. pr->power.states[ACPI_STATE_C1].valid = 1;
  750. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  751. }
  752. /* the C0 state only exists as a filler in our array */
  753. pr->power.states[ACPI_STATE_C0].valid = 1;
  754. return 0;
  755. }
  756. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  757. {
  758. acpi_status status = 0;
  759. acpi_integer count;
  760. int current_count;
  761. int i;
  762. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  763. union acpi_object *cst;
  764. if (nocst)
  765. return -ENODEV;
  766. current_count = 0;
  767. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  768. if (ACPI_FAILURE(status)) {
  769. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  770. return -ENODEV;
  771. }
  772. cst = buffer.pointer;
  773. /* There must be at least 2 elements */
  774. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  775. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  776. status = -EFAULT;
  777. goto end;
  778. }
  779. count = cst->package.elements[0].integer.value;
  780. /* Validate number of power states. */
  781. if (count < 1 || count != cst->package.count - 1) {
  782. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  783. status = -EFAULT;
  784. goto end;
  785. }
  786. /* Tell driver that at least _CST is supported. */
  787. pr->flags.has_cst = 1;
  788. for (i = 1; i <= count; i++) {
  789. union acpi_object *element;
  790. union acpi_object *obj;
  791. struct acpi_power_register *reg;
  792. struct acpi_processor_cx cx;
  793. memset(&cx, 0, sizeof(cx));
  794. element = &(cst->package.elements[i]);
  795. if (element->type != ACPI_TYPE_PACKAGE)
  796. continue;
  797. if (element->package.count != 4)
  798. continue;
  799. obj = &(element->package.elements[0]);
  800. if (obj->type != ACPI_TYPE_BUFFER)
  801. continue;
  802. reg = (struct acpi_power_register *)obj->buffer.pointer;
  803. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  804. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  805. continue;
  806. /* There should be an easy way to extract an integer... */
  807. obj = &(element->package.elements[1]);
  808. if (obj->type != ACPI_TYPE_INTEGER)
  809. continue;
  810. cx.type = obj->integer.value;
  811. /*
  812. * Some buggy BIOSes won't list C1 in _CST -
  813. * Let acpi_processor_get_power_info_default() handle them later
  814. */
  815. if (i == 1 && cx.type != ACPI_STATE_C1)
  816. current_count++;
  817. cx.address = reg->address;
  818. cx.index = current_count + 1;
  819. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  820. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  821. if (acpi_processor_ffh_cstate_probe
  822. (pr->id, &cx, reg) == 0) {
  823. cx.entry_method = ACPI_CSTATE_FFH;
  824. } else if (cx.type == ACPI_STATE_C1) {
  825. /*
  826. * C1 is a special case where FIXED_HARDWARE
  827. * can be handled in non-MWAIT way as well.
  828. * In that case, save this _CST entry info.
  829. * Otherwise, ignore this info and continue.
  830. */
  831. cx.entry_method = ACPI_CSTATE_HALT;
  832. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  833. } else {
  834. continue;
  835. }
  836. if (cx.type == ACPI_STATE_C1 &&
  837. (idle_halt || idle_nomwait)) {
  838. /*
  839. * In most cases the C1 space_id obtained from
  840. * _CST object is FIXED_HARDWARE access mode.
  841. * But when the option of idle=halt is added,
  842. * the entry_method type should be changed from
  843. * CSTATE_FFH to CSTATE_HALT.
  844. * When the option of idle=nomwait is added,
  845. * the C1 entry_method type should be
  846. * CSTATE_HALT.
  847. */
  848. cx.entry_method = ACPI_CSTATE_HALT;
  849. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  850. }
  851. } else {
  852. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  853. cx.address);
  854. }
  855. if (cx.type == ACPI_STATE_C1) {
  856. cx.valid = 1;
  857. }
  858. obj = &(element->package.elements[2]);
  859. if (obj->type != ACPI_TYPE_INTEGER)
  860. continue;
  861. cx.latency = obj->integer.value;
  862. obj = &(element->package.elements[3]);
  863. if (obj->type != ACPI_TYPE_INTEGER)
  864. continue;
  865. cx.power = obj->integer.value;
  866. current_count++;
  867. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  868. /*
  869. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  870. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  871. */
  872. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  873. printk(KERN_WARNING
  874. "Limiting number of power states to max (%d)\n",
  875. ACPI_PROCESSOR_MAX_POWER);
  876. printk(KERN_WARNING
  877. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  878. break;
  879. }
  880. }
  881. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  882. current_count));
  883. /* Validate number of power states discovered */
  884. if (current_count < 2)
  885. status = -EFAULT;
  886. end:
  887. kfree(buffer.pointer);
  888. return status;
  889. }
  890. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  891. {
  892. if (!cx->address)
  893. return;
  894. /*
  895. * C2 latency must be less than or equal to 100
  896. * microseconds.
  897. */
  898. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  899. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  900. "latency too large [%d]\n", cx->latency));
  901. return;
  902. }
  903. /*
  904. * Otherwise we've met all of our C2 requirements.
  905. * Normalize the C2 latency to expidite policy
  906. */
  907. cx->valid = 1;
  908. #ifndef CONFIG_CPU_IDLE
  909. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  910. #else
  911. cx->latency_ticks = cx->latency;
  912. #endif
  913. return;
  914. }
  915. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  916. struct acpi_processor_cx *cx)
  917. {
  918. static int bm_check_flag;
  919. if (!cx->address)
  920. return;
  921. /*
  922. * C3 latency must be less than or equal to 1000
  923. * microseconds.
  924. */
  925. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  926. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  927. "latency too large [%d]\n", cx->latency));
  928. return;
  929. }
  930. /*
  931. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  932. * DMA transfers are used by any ISA device to avoid livelock.
  933. * Note that we could disable Type-F DMA (as recommended by
  934. * the erratum), but this is known to disrupt certain ISA
  935. * devices thus we take the conservative approach.
  936. */
  937. else if (errata.piix4.fdma) {
  938. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  939. "C3 not supported on PIIX4 with Type-F DMA\n"));
  940. return;
  941. }
  942. /* All the logic here assumes flags.bm_check is same across all CPUs */
  943. if (!bm_check_flag) {
  944. /* Determine whether bm_check is needed based on CPU */
  945. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  946. bm_check_flag = pr->flags.bm_check;
  947. } else {
  948. pr->flags.bm_check = bm_check_flag;
  949. }
  950. if (pr->flags.bm_check) {
  951. if (!pr->flags.bm_control) {
  952. if (pr->flags.has_cst != 1) {
  953. /* bus mastering control is necessary */
  954. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  955. "C3 support requires BM control\n"));
  956. return;
  957. } else {
  958. /* Here we enter C3 without bus mastering */
  959. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  960. "C3 support without BM control\n"));
  961. }
  962. }
  963. } else {
  964. /*
  965. * WBINVD should be set in fadt, for C3 state to be
  966. * supported on when bm_check is not required.
  967. */
  968. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  969. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  970. "Cache invalidation should work properly"
  971. " for C3 to be enabled on SMP systems\n"));
  972. return;
  973. }
  974. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  975. }
  976. /*
  977. * Otherwise we've met all of our C3 requirements.
  978. * Normalize the C3 latency to expidite policy. Enable
  979. * checking of bus mastering status (bm_check) so we can
  980. * use this in our C3 policy
  981. */
  982. cx->valid = 1;
  983. #ifndef CONFIG_CPU_IDLE
  984. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  985. #else
  986. cx->latency_ticks = cx->latency;
  987. #endif
  988. return;
  989. }
  990. static int acpi_processor_power_verify(struct acpi_processor *pr)
  991. {
  992. unsigned int i;
  993. unsigned int working = 0;
  994. pr->power.timer_broadcast_on_state = INT_MAX;
  995. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  996. struct acpi_processor_cx *cx = &pr->power.states[i];
  997. switch (cx->type) {
  998. case ACPI_STATE_C1:
  999. cx->valid = 1;
  1000. break;
  1001. case ACPI_STATE_C2:
  1002. acpi_processor_power_verify_c2(cx);
  1003. if (cx->valid)
  1004. acpi_timer_check_state(i, pr, cx);
  1005. break;
  1006. case ACPI_STATE_C3:
  1007. acpi_processor_power_verify_c3(pr, cx);
  1008. if (cx->valid)
  1009. acpi_timer_check_state(i, pr, cx);
  1010. break;
  1011. }
  1012. if (cx->valid)
  1013. working++;
  1014. }
  1015. acpi_propagate_timer_broadcast(pr);
  1016. return (working);
  1017. }
  1018. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  1019. {
  1020. unsigned int i;
  1021. int result;
  1022. /* NOTE: the idle thread may not be running while calling
  1023. * this function */
  1024. /* Zero initialize all the C-states info. */
  1025. memset(pr->power.states, 0, sizeof(pr->power.states));
  1026. result = acpi_processor_get_power_info_cst(pr);
  1027. if (result == -ENODEV)
  1028. result = acpi_processor_get_power_info_fadt(pr);
  1029. if (result)
  1030. return result;
  1031. acpi_processor_get_power_info_default(pr);
  1032. pr->power.count = acpi_processor_power_verify(pr);
  1033. #ifndef CONFIG_CPU_IDLE
  1034. /*
  1035. * Set Default Policy
  1036. * ------------------
  1037. * Now that we know which states are supported, set the default
  1038. * policy. Note that this policy can be changed dynamically
  1039. * (e.g. encourage deeper sleeps to conserve battery life when
  1040. * not on AC).
  1041. */
  1042. result = acpi_processor_set_power_policy(pr);
  1043. if (result)
  1044. return result;
  1045. #endif
  1046. /*
  1047. * if one state of type C2 or C3 is available, mark this
  1048. * CPU as being "idle manageable"
  1049. */
  1050. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  1051. if (pr->power.states[i].valid) {
  1052. pr->power.count = i;
  1053. if (pr->power.states[i].type >= ACPI_STATE_C2)
  1054. pr->flags.power = 1;
  1055. }
  1056. }
  1057. return 0;
  1058. }
  1059. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  1060. {
  1061. struct acpi_processor *pr = seq->private;
  1062. unsigned int i;
  1063. if (!pr)
  1064. goto end;
  1065. seq_printf(seq, "active state: C%zd\n"
  1066. "max_cstate: C%d\n"
  1067. "bus master activity: %08x\n"
  1068. "maximum allowed latency: %d usec\n",
  1069. pr->power.state ? pr->power.state - pr->power.states : 0,
  1070. max_cstate, (unsigned)pr->power.bm_activity,
  1071. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
  1072. seq_puts(seq, "states:\n");
  1073. for (i = 1; i <= pr->power.count; i++) {
  1074. seq_printf(seq, " %cC%d: ",
  1075. (&pr->power.states[i] ==
  1076. pr->power.state ? '*' : ' '), i);
  1077. if (!pr->power.states[i].valid) {
  1078. seq_puts(seq, "<not supported>\n");
  1079. continue;
  1080. }
  1081. switch (pr->power.states[i].type) {
  1082. case ACPI_STATE_C1:
  1083. seq_printf(seq, "type[C1] ");
  1084. break;
  1085. case ACPI_STATE_C2:
  1086. seq_printf(seq, "type[C2] ");
  1087. break;
  1088. case ACPI_STATE_C3:
  1089. seq_printf(seq, "type[C3] ");
  1090. break;
  1091. default:
  1092. seq_printf(seq, "type[--] ");
  1093. break;
  1094. }
  1095. if (pr->power.states[i].promotion.state)
  1096. seq_printf(seq, "promotion[C%zd] ",
  1097. (pr->power.states[i].promotion.state -
  1098. pr->power.states));
  1099. else
  1100. seq_puts(seq, "promotion[--] ");
  1101. if (pr->power.states[i].demotion.state)
  1102. seq_printf(seq, "demotion[C%zd] ",
  1103. (pr->power.states[i].demotion.state -
  1104. pr->power.states));
  1105. else
  1106. seq_puts(seq, "demotion[--] ");
  1107. seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
  1108. pr->power.states[i].latency,
  1109. pr->power.states[i].usage,
  1110. (unsigned long long)pr->power.states[i].time);
  1111. }
  1112. end:
  1113. return 0;
  1114. }
  1115. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  1116. {
  1117. return single_open(file, acpi_processor_power_seq_show,
  1118. PDE(inode)->data);
  1119. }
  1120. static const struct file_operations acpi_processor_power_fops = {
  1121. .owner = THIS_MODULE,
  1122. .open = acpi_processor_power_open_fs,
  1123. .read = seq_read,
  1124. .llseek = seq_lseek,
  1125. .release = single_release,
  1126. };
  1127. #ifndef CONFIG_CPU_IDLE
  1128. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1129. {
  1130. int result = 0;
  1131. if (boot_option_idle_override)
  1132. return 0;
  1133. if (!pr)
  1134. return -EINVAL;
  1135. if (nocst) {
  1136. return -ENODEV;
  1137. }
  1138. if (!pr->flags.power_setup_done)
  1139. return -ENODEV;
  1140. /*
  1141. * Fall back to the default idle loop, when pm_idle_save had
  1142. * been initialized.
  1143. */
  1144. if (pm_idle_save) {
  1145. pm_idle = pm_idle_save;
  1146. /* Relies on interrupts forcing exit from idle. */
  1147. synchronize_sched();
  1148. }
  1149. pr->flags.power = 0;
  1150. result = acpi_processor_get_power_info(pr);
  1151. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  1152. pm_idle = acpi_processor_idle;
  1153. return result;
  1154. }
  1155. #ifdef CONFIG_SMP
  1156. static void smp_callback(void *v)
  1157. {
  1158. /* we already woke the CPU up, nothing more to do */
  1159. }
  1160. /*
  1161. * This function gets called when a part of the kernel has a new latency
  1162. * requirement. This means we need to get all processors out of their C-state,
  1163. * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
  1164. * wakes them all right up.
  1165. */
  1166. static int acpi_processor_latency_notify(struct notifier_block *b,
  1167. unsigned long l, void *v)
  1168. {
  1169. smp_call_function(smp_callback, NULL, 1);
  1170. return NOTIFY_OK;
  1171. }
  1172. static struct notifier_block acpi_processor_latency_notifier = {
  1173. .notifier_call = acpi_processor_latency_notify,
  1174. };
  1175. #endif
  1176. #else /* CONFIG_CPU_IDLE */
  1177. /**
  1178. * acpi_idle_bm_check - checks if bus master activity was detected
  1179. */
  1180. static int acpi_idle_bm_check(void)
  1181. {
  1182. u32 bm_status = 0;
  1183. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  1184. if (bm_status)
  1185. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  1186. /*
  1187. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  1188. * the true state of bus mastering activity; forcing us to
  1189. * manually check the BMIDEA bit of each IDE channel.
  1190. */
  1191. else if (errata.piix4.bmisx) {
  1192. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  1193. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  1194. bm_status = 1;
  1195. }
  1196. return bm_status;
  1197. }
  1198. /**
  1199. * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
  1200. * @pr: the processor
  1201. * @target: the new target state
  1202. */
  1203. static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
  1204. struct acpi_processor_cx *target)
  1205. {
  1206. if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
  1207. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  1208. pr->flags.bm_rld_set = 0;
  1209. }
  1210. if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
  1211. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  1212. pr->flags.bm_rld_set = 1;
  1213. }
  1214. }
  1215. /**
  1216. * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  1217. * @cx: cstate data
  1218. *
  1219. * Caller disables interrupt before call and enables interrupt after return.
  1220. */
  1221. static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  1222. {
  1223. /* Don't trace irqs off for idle */
  1224. stop_critical_timings();
  1225. if (cx->entry_method == ACPI_CSTATE_FFH) {
  1226. /* Call into architectural FFH based C-state */
  1227. acpi_processor_ffh_cstate_enter(cx);
  1228. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  1229. acpi_safe_halt();
  1230. } else {
  1231. int unused;
  1232. /* IO port based C-state */
  1233. inb(cx->address);
  1234. /* Dummy wait op - must do something useless after P_LVL2 read
  1235. because chipsets cannot guarantee that STPCLK# signal
  1236. gets asserted in time to freeze execution properly. */
  1237. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1238. }
  1239. start_critical_timings();
  1240. }
  1241. /**
  1242. * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  1243. * @dev: the target CPU
  1244. * @state: the state data
  1245. *
  1246. * This is equivalent to the HALT instruction.
  1247. */
  1248. static int acpi_idle_enter_c1(struct cpuidle_device *dev,
  1249. struct cpuidle_state *state)
  1250. {
  1251. u32 t1, t2;
  1252. struct acpi_processor *pr;
  1253. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1254. pr = __get_cpu_var(processors);
  1255. if (unlikely(!pr))
  1256. return 0;
  1257. local_irq_disable();
  1258. /* Do not access any ACPI IO ports in suspend path */
  1259. if (acpi_idle_suspend) {
  1260. acpi_safe_halt();
  1261. local_irq_enable();
  1262. return 0;
  1263. }
  1264. if (pr->flags.bm_check)
  1265. acpi_idle_update_bm_rld(pr, cx);
  1266. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1267. acpi_idle_do_entry(cx);
  1268. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1269. local_irq_enable();
  1270. cx->usage++;
  1271. return ticks_elapsed_in_us(t1, t2);
  1272. }
  1273. /**
  1274. * acpi_idle_enter_simple - enters an ACPI state without BM handling
  1275. * @dev: the target CPU
  1276. * @state: the state data
  1277. */
  1278. static int acpi_idle_enter_simple(struct cpuidle_device *dev,
  1279. struct cpuidle_state *state)
  1280. {
  1281. struct acpi_processor *pr;
  1282. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1283. u32 t1, t2;
  1284. int sleep_ticks = 0;
  1285. pr = __get_cpu_var(processors);
  1286. if (unlikely(!pr))
  1287. return 0;
  1288. if (acpi_idle_suspend)
  1289. return(acpi_idle_enter_c1(dev, state));
  1290. local_irq_disable();
  1291. current_thread_info()->status &= ~TS_POLLING;
  1292. /*
  1293. * TS_POLLING-cleared state must be visible before we test
  1294. * NEED_RESCHED:
  1295. */
  1296. smp_mb();
  1297. if (unlikely(need_resched())) {
  1298. current_thread_info()->status |= TS_POLLING;
  1299. local_irq_enable();
  1300. return 0;
  1301. }
  1302. /*
  1303. * Must be done before busmaster disable as we might need to
  1304. * access HPET !
  1305. */
  1306. acpi_state_timer_broadcast(pr, cx, 1);
  1307. if (pr->flags.bm_check)
  1308. acpi_idle_update_bm_rld(pr, cx);
  1309. if (cx->type == ACPI_STATE_C3)
  1310. ACPI_FLUSH_CPU_CACHE();
  1311. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1312. /* Tell the scheduler that we are going deep-idle: */
  1313. sched_clock_idle_sleep_event();
  1314. acpi_idle_do_entry(cx);
  1315. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1316. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1317. /* TSC could halt in idle, so notify users */
  1318. if (tsc_halts_in_c(cx->type))
  1319. mark_tsc_unstable("TSC halts in idle");;
  1320. #endif
  1321. sleep_ticks = ticks_elapsed(t1, t2);
  1322. /* Tell the scheduler how much we idled: */
  1323. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1324. local_irq_enable();
  1325. current_thread_info()->status |= TS_POLLING;
  1326. cx->usage++;
  1327. acpi_state_timer_broadcast(pr, cx, 0);
  1328. cx->time += sleep_ticks;
  1329. return ticks_elapsed_in_us(t1, t2);
  1330. }
  1331. static int c3_cpu_count;
  1332. static DEFINE_SPINLOCK(c3_lock);
  1333. /**
  1334. * acpi_idle_enter_bm - enters C3 with proper BM handling
  1335. * @dev: the target CPU
  1336. * @state: the state data
  1337. *
  1338. * If BM is detected, the deepest non-C3 idle state is entered instead.
  1339. */
  1340. static int acpi_idle_enter_bm(struct cpuidle_device *dev,
  1341. struct cpuidle_state *state)
  1342. {
  1343. struct acpi_processor *pr;
  1344. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1345. u32 t1, t2;
  1346. int sleep_ticks = 0;
  1347. pr = __get_cpu_var(processors);
  1348. if (unlikely(!pr))
  1349. return 0;
  1350. if (acpi_idle_suspend)
  1351. return(acpi_idle_enter_c1(dev, state));
  1352. if (acpi_idle_bm_check()) {
  1353. if (dev->safe_state) {
  1354. return dev->safe_state->enter(dev, dev->safe_state);
  1355. } else {
  1356. local_irq_disable();
  1357. acpi_safe_halt();
  1358. local_irq_enable();
  1359. return 0;
  1360. }
  1361. }
  1362. local_irq_disable();
  1363. current_thread_info()->status &= ~TS_POLLING;
  1364. /*
  1365. * TS_POLLING-cleared state must be visible before we test
  1366. * NEED_RESCHED:
  1367. */
  1368. smp_mb();
  1369. if (unlikely(need_resched())) {
  1370. current_thread_info()->status |= TS_POLLING;
  1371. local_irq_enable();
  1372. return 0;
  1373. }
  1374. acpi_unlazy_tlb(smp_processor_id());
  1375. /* Tell the scheduler that we are going deep-idle: */
  1376. sched_clock_idle_sleep_event();
  1377. /*
  1378. * Must be done before busmaster disable as we might need to
  1379. * access HPET !
  1380. */
  1381. acpi_state_timer_broadcast(pr, cx, 1);
  1382. acpi_idle_update_bm_rld(pr, cx);
  1383. /*
  1384. * disable bus master
  1385. * bm_check implies we need ARB_DIS
  1386. * !bm_check implies we need cache flush
  1387. * bm_control implies whether we can do ARB_DIS
  1388. *
  1389. * That leaves a case where bm_check is set and bm_control is
  1390. * not set. In that case we cannot do much, we enter C3
  1391. * without doing anything.
  1392. */
  1393. if (pr->flags.bm_check && pr->flags.bm_control) {
  1394. spin_lock(&c3_lock);
  1395. c3_cpu_count++;
  1396. /* Disable bus master arbitration when all CPUs are in C3 */
  1397. if (c3_cpu_count == num_online_cpus())
  1398. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  1399. spin_unlock(&c3_lock);
  1400. } else if (!pr->flags.bm_check) {
  1401. ACPI_FLUSH_CPU_CACHE();
  1402. }
  1403. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1404. acpi_idle_do_entry(cx);
  1405. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1406. /* Re-enable bus master arbitration */
  1407. if (pr->flags.bm_check && pr->flags.bm_control) {
  1408. spin_lock(&c3_lock);
  1409. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  1410. c3_cpu_count--;
  1411. spin_unlock(&c3_lock);
  1412. }
  1413. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1414. /* TSC could halt in idle, so notify users */
  1415. if (tsc_halts_in_c(ACPI_STATE_C3))
  1416. mark_tsc_unstable("TSC halts in idle");
  1417. #endif
  1418. sleep_ticks = ticks_elapsed(t1, t2);
  1419. /* Tell the scheduler how much we idled: */
  1420. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1421. local_irq_enable();
  1422. current_thread_info()->status |= TS_POLLING;
  1423. cx->usage++;
  1424. acpi_state_timer_broadcast(pr, cx, 0);
  1425. cx->time += sleep_ticks;
  1426. return ticks_elapsed_in_us(t1, t2);
  1427. }
  1428. struct cpuidle_driver acpi_idle_driver = {
  1429. .name = "acpi_idle",
  1430. .owner = THIS_MODULE,
  1431. };
  1432. /**
  1433. * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
  1434. * @pr: the ACPI processor
  1435. */
  1436. static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
  1437. {
  1438. int i, count = CPUIDLE_DRIVER_STATE_START;
  1439. struct acpi_processor_cx *cx;
  1440. struct cpuidle_state *state;
  1441. struct cpuidle_device *dev = &pr->power.dev;
  1442. if (!pr->flags.power_setup_done)
  1443. return -EINVAL;
  1444. if (pr->flags.power == 0) {
  1445. return -EINVAL;
  1446. }
  1447. dev->cpu = pr->id;
  1448. for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
  1449. dev->states[i].name[0] = '\0';
  1450. dev->states[i].desc[0] = '\0';
  1451. }
  1452. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  1453. cx = &pr->power.states[i];
  1454. state = &dev->states[count];
  1455. if (!cx->valid)
  1456. continue;
  1457. #ifdef CONFIG_HOTPLUG_CPU
  1458. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  1459. !pr->flags.has_cst &&
  1460. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  1461. continue;
  1462. #endif
  1463. cpuidle_set_statedata(state, cx);
  1464. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  1465. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  1466. state->exit_latency = cx->latency;
  1467. state->target_residency = cx->latency * latency_factor;
  1468. state->power_usage = cx->power;
  1469. state->flags = 0;
  1470. switch (cx->type) {
  1471. case ACPI_STATE_C1:
  1472. state->flags |= CPUIDLE_FLAG_SHALLOW;
  1473. if (cx->entry_method == ACPI_CSTATE_FFH)
  1474. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1475. state->enter = acpi_idle_enter_c1;
  1476. dev->safe_state = state;
  1477. break;
  1478. case ACPI_STATE_C2:
  1479. state->flags |= CPUIDLE_FLAG_BALANCED;
  1480. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1481. state->enter = acpi_idle_enter_simple;
  1482. dev->safe_state = state;
  1483. break;
  1484. case ACPI_STATE_C3:
  1485. state->flags |= CPUIDLE_FLAG_DEEP;
  1486. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1487. state->flags |= CPUIDLE_FLAG_CHECK_BM;
  1488. state->enter = pr->flags.bm_check ?
  1489. acpi_idle_enter_bm :
  1490. acpi_idle_enter_simple;
  1491. break;
  1492. }
  1493. count++;
  1494. if (count == CPUIDLE_STATE_MAX)
  1495. break;
  1496. }
  1497. dev->state_count = count;
  1498. if (!count)
  1499. return -EINVAL;
  1500. return 0;
  1501. }
  1502. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1503. {
  1504. int ret = 0;
  1505. if (boot_option_idle_override)
  1506. return 0;
  1507. if (!pr)
  1508. return -EINVAL;
  1509. if (nocst) {
  1510. return -ENODEV;
  1511. }
  1512. if (!pr->flags.power_setup_done)
  1513. return -ENODEV;
  1514. cpuidle_pause_and_lock();
  1515. cpuidle_disable_device(&pr->power.dev);
  1516. acpi_processor_get_power_info(pr);
  1517. if (pr->flags.power) {
  1518. acpi_processor_setup_cpuidle(pr);
  1519. ret = cpuidle_enable_device(&pr->power.dev);
  1520. }
  1521. cpuidle_resume_and_unlock();
  1522. return ret;
  1523. }
  1524. #endif /* CONFIG_CPU_IDLE */
  1525. int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
  1526. struct acpi_device *device)
  1527. {
  1528. acpi_status status = 0;
  1529. static int first_run;
  1530. struct proc_dir_entry *entry = NULL;
  1531. unsigned int i;
  1532. if (boot_option_idle_override)
  1533. return 0;
  1534. if (!first_run) {
  1535. if (idle_halt) {
  1536. /*
  1537. * When the boot option of "idle=halt" is added, halt
  1538. * is used for CPU IDLE.
  1539. * In such case C2/C3 is meaningless. So the max_cstate
  1540. * is set to one.
  1541. */
  1542. max_cstate = 1;
  1543. }
  1544. dmi_check_system(processor_power_dmi_table);
  1545. max_cstate = acpi_processor_cstate_check(max_cstate);
  1546. if (max_cstate < ACPI_C_STATES_MAX)
  1547. printk(KERN_NOTICE
  1548. "ACPI: processor limited to max C-state %d\n",
  1549. max_cstate);
  1550. first_run++;
  1551. #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
  1552. pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
  1553. &acpi_processor_latency_notifier);
  1554. #endif
  1555. }
  1556. if (!pr)
  1557. return -EINVAL;
  1558. if (acpi_gbl_FADT.cst_control && !nocst) {
  1559. status =
  1560. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  1561. if (ACPI_FAILURE(status)) {
  1562. ACPI_EXCEPTION((AE_INFO, status,
  1563. "Notifying BIOS of _CST ability failed"));
  1564. }
  1565. }
  1566. acpi_processor_get_power_info(pr);
  1567. pr->flags.power_setup_done = 1;
  1568. /*
  1569. * Install the idle handler if processor power management is supported.
  1570. * Note that we use previously set idle handler will be used on
  1571. * platforms that only support C1.
  1572. */
  1573. if (pr->flags.power) {
  1574. #ifdef CONFIG_CPU_IDLE
  1575. acpi_processor_setup_cpuidle(pr);
  1576. if (cpuidle_register_device(&pr->power.dev))
  1577. return -EIO;
  1578. #endif
  1579. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  1580. for (i = 1; i <= pr->power.count; i++)
  1581. if (pr->power.states[i].valid)
  1582. printk(" C%d[C%d]", i,
  1583. pr->power.states[i].type);
  1584. printk(")\n");
  1585. #ifndef CONFIG_CPU_IDLE
  1586. if (pr->id == 0) {
  1587. pm_idle_save = pm_idle;
  1588. pm_idle = acpi_processor_idle;
  1589. }
  1590. #endif
  1591. }
  1592. /* 'power' [R] */
  1593. entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
  1594. S_IRUGO, acpi_device_dir(device),
  1595. &acpi_processor_power_fops,
  1596. acpi_driver_data(device));
  1597. if (!entry)
  1598. return -EIO;
  1599. return 0;
  1600. }
  1601. int acpi_processor_power_exit(struct acpi_processor *pr,
  1602. struct acpi_device *device)
  1603. {
  1604. if (boot_option_idle_override)
  1605. return 0;
  1606. #ifdef CONFIG_CPU_IDLE
  1607. cpuidle_unregister_device(&pr->power.dev);
  1608. #endif
  1609. pr->flags.power_setup_done = 0;
  1610. if (acpi_device_dir(device))
  1611. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  1612. acpi_device_dir(device));
  1613. #ifndef CONFIG_CPU_IDLE
  1614. /* Unregister the idle handler when processor #0 is removed. */
  1615. if (pr->id == 0) {
  1616. if (pm_idle_save)
  1617. pm_idle = pm_idle_save;
  1618. /*
  1619. * We are about to unload the current idle thread pm callback
  1620. * (pm_idle), Wait for all processors to update cached/local
  1621. * copies of pm_idle before proceeding.
  1622. */
  1623. cpu_idle_wait();
  1624. #ifdef CONFIG_SMP
  1625. pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
  1626. &acpi_processor_latency_notifier);
  1627. #endif
  1628. }
  1629. #endif
  1630. return 0;
  1631. }