processor_idle.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/sched.h> /* need_resched() */
  40. #include <linux/pm_qos_params.h>
  41. #include <linux/clockchips.h>
  42. #include <linux/cpuidle.h>
  43. #include <linux/irqflags.h>
  44. /*
  45. * Include the apic definitions for x86 to have the APIC timer related defines
  46. * available also for UP (on SMP it gets magically included via linux/smp.h).
  47. * asm/acpi.h is not an option, as it would require more include magic. Also
  48. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  49. */
  50. #ifdef CONFIG_X86
  51. #include <asm/apic.h>
  52. #endif
  53. #include <asm/io.h>
  54. #include <asm/uaccess.h>
  55. #include <acpi/acpi_bus.h>
  56. #include <acpi/processor.h>
  57. #include <asm/processor.h>
  58. #define ACPI_PROCESSOR_CLASS "processor"
  59. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  60. ACPI_MODULE_NAME("processor_idle");
  61. #define ACPI_PROCESSOR_FILE_POWER "power"
  62. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  63. #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
  64. #ifndef CONFIG_CPU_IDLE
  65. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  66. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  67. static void (*pm_idle_save) (void) __read_mostly;
  68. #else
  69. #define C2_OVERHEAD 1 /* 1us */
  70. #define C3_OVERHEAD 1 /* 1us */
  71. #endif
  72. #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
  73. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  74. #ifdef CONFIG_CPU_IDLE
  75. module_param(max_cstate, uint, 0000);
  76. #else
  77. module_param(max_cstate, uint, 0644);
  78. #endif
  79. static unsigned int nocst __read_mostly;
  80. module_param(nocst, uint, 0000);
  81. #ifndef CONFIG_CPU_IDLE
  82. /*
  83. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  84. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  85. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  86. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  87. * reduce history for more aggressive entry into C3
  88. */
  89. static unsigned int bm_history __read_mostly =
  90. (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  91. module_param(bm_history, uint, 0644);
  92. static int acpi_processor_set_power_policy(struct acpi_processor *pr);
  93. #else /* CONFIG_CPU_IDLE */
  94. static unsigned int latency_factor __read_mostly = 2;
  95. module_param(latency_factor, uint, 0644);
  96. #endif
  97. /*
  98. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  99. * For now disable this. Probably a bug somewhere else.
  100. *
  101. * To skip this limit, boot/load with a large max_cstate limit.
  102. */
  103. static int set_max_cstate(const struct dmi_system_id *id)
  104. {
  105. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  106. return 0;
  107. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  108. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  109. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  110. max_cstate = (long)id->driver_data;
  111. return 0;
  112. }
  113. /* Actually this shouldn't be __cpuinitdata, would be better to fix the
  114. callers to only run once -AK */
  115. static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
  116. { set_max_cstate, "IBM ThinkPad R40e", {
  117. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  118. DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
  119. { set_max_cstate, "IBM ThinkPad R40e", {
  120. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  121. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
  122. { set_max_cstate, "IBM ThinkPad R40e", {
  123. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  124. DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
  125. { set_max_cstate, "IBM ThinkPad R40e", {
  126. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  127. DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
  128. { set_max_cstate, "IBM ThinkPad R40e", {
  129. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  130. DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
  131. { set_max_cstate, "IBM ThinkPad R40e", {
  132. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  133. DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
  134. { set_max_cstate, "IBM ThinkPad R40e", {
  135. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  136. DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
  137. { set_max_cstate, "IBM ThinkPad R40e", {
  138. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  139. DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
  140. { set_max_cstate, "IBM ThinkPad R40e", {
  141. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  142. DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
  143. { set_max_cstate, "IBM ThinkPad R40e", {
  144. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  145. DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
  146. { set_max_cstate, "IBM ThinkPad R40e", {
  147. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  148. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
  149. { set_max_cstate, "IBM ThinkPad R40e", {
  150. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  151. DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
  152. { set_max_cstate, "IBM ThinkPad R40e", {
  153. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  154. DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
  155. { set_max_cstate, "IBM ThinkPad R40e", {
  156. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  157. DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
  158. { set_max_cstate, "IBM ThinkPad R40e", {
  159. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  160. DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
  161. { set_max_cstate, "IBM ThinkPad R40e", {
  162. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  163. DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
  164. { set_max_cstate, "Medion 41700", {
  165. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  166. DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
  167. { set_max_cstate, "Clevo 5600D", {
  168. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  169. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  170. (void *)2},
  171. {},
  172. };
  173. static inline u32 ticks_elapsed(u32 t1, u32 t2)
  174. {
  175. if (t2 >= t1)
  176. return (t2 - t1);
  177. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  178. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  179. else
  180. return ((0xFFFFFFFF - t1) + t2);
  181. }
  182. static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
  183. {
  184. if (t2 >= t1)
  185. return PM_TIMER_TICKS_TO_US(t2 - t1);
  186. else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
  187. return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  188. else
  189. return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
  190. }
  191. /*
  192. * Callers should disable interrupts before the call and enable
  193. * interrupts after return.
  194. */
  195. static void acpi_safe_halt(void)
  196. {
  197. current_thread_info()->status &= ~TS_POLLING;
  198. /*
  199. * TS_POLLING-cleared state must be visible before we
  200. * test NEED_RESCHED:
  201. */
  202. smp_mb();
  203. if (!need_resched()) {
  204. safe_halt();
  205. local_irq_disable();
  206. }
  207. current_thread_info()->status |= TS_POLLING;
  208. }
  209. #ifndef CONFIG_CPU_IDLE
  210. static void
  211. acpi_processor_power_activate(struct acpi_processor *pr,
  212. struct acpi_processor_cx *new)
  213. {
  214. struct acpi_processor_cx *old;
  215. if (!pr || !new)
  216. return;
  217. old = pr->power.state;
  218. if (old)
  219. old->promotion.count = 0;
  220. new->demotion.count = 0;
  221. /* Cleanup from old state. */
  222. if (old) {
  223. switch (old->type) {
  224. case ACPI_STATE_C3:
  225. /* Disable bus master reload */
  226. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  227. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  228. break;
  229. }
  230. }
  231. /* Prepare to use new state. */
  232. switch (new->type) {
  233. case ACPI_STATE_C3:
  234. /* Enable bus master reload */
  235. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  236. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  237. break;
  238. }
  239. pr->power.state = new;
  240. return;
  241. }
  242. static atomic_t c3_cpu_count;
  243. /* Common C-state entry for C2, C3, .. */
  244. static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
  245. {
  246. /* Don't trace irqs off for idle */
  247. stop_critical_timings();
  248. if (cstate->entry_method == ACPI_CSTATE_FFH) {
  249. /* Call into architectural FFH based C-state */
  250. acpi_processor_ffh_cstate_enter(cstate);
  251. } else {
  252. int unused;
  253. /* IO port based C-state */
  254. inb(cstate->address);
  255. /* Dummy wait op - must do something useless after P_LVL2 read
  256. because chipsets cannot guarantee that STPCLK# signal
  257. gets asserted in time to freeze execution properly. */
  258. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  259. }
  260. start_critical_timings();
  261. }
  262. #endif /* !CONFIG_CPU_IDLE */
  263. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  264. /*
  265. * Some BIOS implementations switch to C3 in the published C2 state.
  266. * This seems to be a common problem on AMD boxen, but other vendors
  267. * are affected too. We pick the most conservative approach: we assume
  268. * that the local APIC stops in both C2 and C3.
  269. */
  270. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  271. struct acpi_processor_cx *cx)
  272. {
  273. struct acpi_processor_power *pwr = &pr->power;
  274. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  275. /*
  276. * Check, if one of the previous states already marked the lapic
  277. * unstable
  278. */
  279. if (pwr->timer_broadcast_on_state < state)
  280. return;
  281. if (cx->type >= type)
  282. pr->power.timer_broadcast_on_state = state;
  283. }
  284. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
  285. {
  286. unsigned long reason;
  287. reason = pr->power.timer_broadcast_on_state < INT_MAX ?
  288. CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  289. clockevents_notify(reason, &pr->id);
  290. }
  291. /* Power(C) State timer broadcast control */
  292. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  293. struct acpi_processor_cx *cx,
  294. int broadcast)
  295. {
  296. int state = cx - pr->power.states;
  297. if (state >= pr->power.timer_broadcast_on_state) {
  298. unsigned long reason;
  299. reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
  300. CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
  301. clockevents_notify(reason, &pr->id);
  302. }
  303. }
  304. #else
  305. static void acpi_timer_check_state(int state, struct acpi_processor *pr,
  306. struct acpi_processor_cx *cstate) { }
  307. static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
  308. static void acpi_state_timer_broadcast(struct acpi_processor *pr,
  309. struct acpi_processor_cx *cx,
  310. int broadcast)
  311. {
  312. }
  313. #endif
  314. /*
  315. * Suspend / resume control
  316. */
  317. static int acpi_idle_suspend;
  318. int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
  319. {
  320. acpi_idle_suspend = 1;
  321. return 0;
  322. }
  323. int acpi_processor_resume(struct acpi_device * device)
  324. {
  325. acpi_idle_suspend = 0;
  326. return 0;
  327. }
  328. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  329. static int tsc_halts_in_c(int state)
  330. {
  331. switch (boot_cpu_data.x86_vendor) {
  332. case X86_VENDOR_AMD:
  333. case X86_VENDOR_INTEL:
  334. /*
  335. * AMD Fam10h TSC will tick in all
  336. * C/P/S0/S1 states when this bit is set.
  337. */
  338. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  339. return 0;
  340. /*FALL THROUGH*/
  341. default:
  342. return state > ACPI_STATE_C1;
  343. }
  344. }
  345. #endif
  346. #ifndef CONFIG_CPU_IDLE
  347. static void acpi_processor_idle(void)
  348. {
  349. struct acpi_processor *pr = NULL;
  350. struct acpi_processor_cx *cx = NULL;
  351. struct acpi_processor_cx *next_state = NULL;
  352. int sleep_ticks = 0;
  353. u32 t1, t2 = 0;
  354. /*
  355. * Interrupts must be disabled during bus mastering calculations and
  356. * for C2/C3 transitions.
  357. */
  358. local_irq_disable();
  359. pr = __get_cpu_var(processors);
  360. if (!pr) {
  361. local_irq_enable();
  362. return;
  363. }
  364. /*
  365. * Check whether we truly need to go idle, or should
  366. * reschedule:
  367. */
  368. if (unlikely(need_resched())) {
  369. local_irq_enable();
  370. return;
  371. }
  372. cx = pr->power.state;
  373. if (!cx || acpi_idle_suspend) {
  374. if (pm_idle_save) {
  375. pm_idle_save(); /* enables IRQs */
  376. } else {
  377. acpi_safe_halt();
  378. local_irq_enable();
  379. }
  380. return;
  381. }
  382. /*
  383. * Check BM Activity
  384. * -----------------
  385. * Check for bus mastering activity (if required), record, and check
  386. * for demotion.
  387. */
  388. if (pr->flags.bm_check) {
  389. u32 bm_status = 0;
  390. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  391. if (diff > 31)
  392. diff = 31;
  393. pr->power.bm_activity <<= diff;
  394. acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  395. if (bm_status) {
  396. pr->power.bm_activity |= 0x1;
  397. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  398. }
  399. /*
  400. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  401. * the true state of bus mastering activity; forcing us to
  402. * manually check the BMIDEA bit of each IDE channel.
  403. */
  404. else if (errata.piix4.bmisx) {
  405. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  406. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  407. pr->power.bm_activity |= 0x1;
  408. }
  409. pr->power.bm_check_timestamp = jiffies;
  410. /*
  411. * If bus mastering is or was active this jiffy, demote
  412. * to avoid a faulty transition. Note that the processor
  413. * won't enter a low-power state during this call (to this
  414. * function) but should upon the next.
  415. *
  416. * TBD: A better policy might be to fallback to the demotion
  417. * state (use it for this quantum only) istead of
  418. * demoting -- and rely on duration as our sole demotion
  419. * qualification. This may, however, introduce DMA
  420. * issues (e.g. floppy DMA transfer overrun/underrun).
  421. */
  422. if ((pr->power.bm_activity & 0x1) &&
  423. cx->demotion.threshold.bm) {
  424. local_irq_enable();
  425. next_state = cx->demotion.state;
  426. goto end;
  427. }
  428. }
  429. #ifdef CONFIG_HOTPLUG_CPU
  430. /*
  431. * Check for P_LVL2_UP flag before entering C2 and above on
  432. * an SMP system. We do it here instead of doing it at _CST/P_LVL
  433. * detection phase, to work cleanly with logical CPU hotplug.
  434. */
  435. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  436. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  437. cx = &pr->power.states[ACPI_STATE_C1];
  438. #endif
  439. /*
  440. * Sleep:
  441. * ------
  442. * Invoke the current Cx state to put the processor to sleep.
  443. */
  444. if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
  445. current_thread_info()->status &= ~TS_POLLING;
  446. /*
  447. * TS_POLLING-cleared state must be visible before we
  448. * test NEED_RESCHED:
  449. */
  450. smp_mb();
  451. if (need_resched()) {
  452. current_thread_info()->status |= TS_POLLING;
  453. local_irq_enable();
  454. return;
  455. }
  456. }
  457. switch (cx->type) {
  458. case ACPI_STATE_C1:
  459. /*
  460. * Invoke C1.
  461. * Use the appropriate idle routine, the one that would
  462. * be used without acpi C-states.
  463. */
  464. if (pm_idle_save) {
  465. pm_idle_save(); /* enables IRQs */
  466. } else {
  467. acpi_safe_halt();
  468. local_irq_enable();
  469. }
  470. /*
  471. * TBD: Can't get time duration while in C1, as resumes
  472. * go to an ISR rather than here. Need to instrument
  473. * base interrupt handler.
  474. *
  475. * Note: the TSC better not stop in C1, sched_clock() will
  476. * skew otherwise.
  477. */
  478. sleep_ticks = 0xFFFFFFFF;
  479. break;
  480. case ACPI_STATE_C2:
  481. /* Get start time (ticks) */
  482. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  483. /* Tell the scheduler that we are going deep-idle: */
  484. sched_clock_idle_sleep_event();
  485. /* Invoke C2 */
  486. acpi_state_timer_broadcast(pr, cx, 1);
  487. acpi_cstate_enter(cx);
  488. /* Get end time (ticks) */
  489. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  490. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  491. /* TSC halts in C2, so notify users */
  492. if (tsc_halts_in_c(ACPI_STATE_C2))
  493. mark_tsc_unstable("possible TSC halt in C2");
  494. #endif
  495. /* Compute time (ticks) that we were actually asleep */
  496. sleep_ticks = ticks_elapsed(t1, t2);
  497. /* Tell the scheduler how much we idled: */
  498. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  499. /* Re-enable interrupts */
  500. local_irq_enable();
  501. /* Do not account our idle-switching overhead: */
  502. sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
  503. current_thread_info()->status |= TS_POLLING;
  504. acpi_state_timer_broadcast(pr, cx, 0);
  505. break;
  506. case ACPI_STATE_C3:
  507. acpi_unlazy_tlb(smp_processor_id());
  508. /*
  509. * Must be done before busmaster disable as we might
  510. * need to access HPET !
  511. */
  512. acpi_state_timer_broadcast(pr, cx, 1);
  513. /*
  514. * disable bus master
  515. * bm_check implies we need ARB_DIS
  516. * !bm_check implies we need cache flush
  517. * bm_control implies whether we can do ARB_DIS
  518. *
  519. * That leaves a case where bm_check is set and bm_control is
  520. * not set. In that case we cannot do much, we enter C3
  521. * without doing anything.
  522. */
  523. if (pr->flags.bm_check && pr->flags.bm_control) {
  524. if (atomic_inc_return(&c3_cpu_count) ==
  525. num_online_cpus()) {
  526. /*
  527. * All CPUs are trying to go to C3
  528. * Disable bus master arbitration
  529. */
  530. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  531. }
  532. } else if (!pr->flags.bm_check) {
  533. /* SMP with no shared cache... Invalidate cache */
  534. ACPI_FLUSH_CPU_CACHE();
  535. }
  536. /* Get start time (ticks) */
  537. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  538. /* Invoke C3 */
  539. /* Tell the scheduler that we are going deep-idle: */
  540. sched_clock_idle_sleep_event();
  541. acpi_cstate_enter(cx);
  542. /* Get end time (ticks) */
  543. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  544. if (pr->flags.bm_check && pr->flags.bm_control) {
  545. /* Enable bus master arbitration */
  546. atomic_dec(&c3_cpu_count);
  547. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  548. }
  549. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  550. /* TSC halts in C3, so notify users */
  551. if (tsc_halts_in_c(ACPI_STATE_C3))
  552. mark_tsc_unstable("TSC halts in C3");
  553. #endif
  554. /* Compute time (ticks) that we were actually asleep */
  555. sleep_ticks = ticks_elapsed(t1, t2);
  556. /* Tell the scheduler how much we idled: */
  557. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  558. /* Re-enable interrupts */
  559. local_irq_enable();
  560. /* Do not account our idle-switching overhead: */
  561. sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
  562. current_thread_info()->status |= TS_POLLING;
  563. acpi_state_timer_broadcast(pr, cx, 0);
  564. break;
  565. default:
  566. local_irq_enable();
  567. return;
  568. }
  569. cx->usage++;
  570. if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
  571. cx->time += sleep_ticks;
  572. next_state = pr->power.state;
  573. #ifdef CONFIG_HOTPLUG_CPU
  574. /* Don't do promotion/demotion */
  575. if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  576. !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
  577. next_state = cx;
  578. goto end;
  579. }
  580. #endif
  581. /*
  582. * Promotion?
  583. * ----------
  584. * Track the number of longs (time asleep is greater than threshold)
  585. * and promote when the count threshold is reached. Note that bus
  586. * mastering activity may prevent promotions.
  587. * Do not promote above max_cstate.
  588. */
  589. if (cx->promotion.state &&
  590. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  591. if (sleep_ticks > cx->promotion.threshold.ticks &&
  592. cx->promotion.state->latency <=
  593. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  594. cx->promotion.count++;
  595. cx->demotion.count = 0;
  596. if (cx->promotion.count >=
  597. cx->promotion.threshold.count) {
  598. if (pr->flags.bm_check) {
  599. if (!
  600. (pr->power.bm_activity & cx->
  601. promotion.threshold.bm)) {
  602. next_state =
  603. cx->promotion.state;
  604. goto end;
  605. }
  606. } else {
  607. next_state = cx->promotion.state;
  608. goto end;
  609. }
  610. }
  611. }
  612. }
  613. /*
  614. * Demotion?
  615. * ---------
  616. * Track the number of shorts (time asleep is less than time threshold)
  617. * and demote when the usage threshold is reached.
  618. */
  619. if (cx->demotion.state) {
  620. if (sleep_ticks < cx->demotion.threshold.ticks) {
  621. cx->demotion.count++;
  622. cx->promotion.count = 0;
  623. if (cx->demotion.count >= cx->demotion.threshold.count) {
  624. next_state = cx->demotion.state;
  625. goto end;
  626. }
  627. }
  628. }
  629. end:
  630. /*
  631. * Demote if current state exceeds max_cstate
  632. * or if the latency of the current state is unacceptable
  633. */
  634. if ((pr->power.state - pr->power.states) > max_cstate ||
  635. pr->power.state->latency >
  636. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
  637. if (cx->demotion.state)
  638. next_state = cx->demotion.state;
  639. }
  640. /*
  641. * New Cx State?
  642. * -------------
  643. * If we're going to start using a new Cx state we must clean up
  644. * from the previous and prepare to use the new.
  645. */
  646. if (next_state != pr->power.state)
  647. acpi_processor_power_activate(pr, next_state);
  648. }
  649. static int acpi_processor_set_power_policy(struct acpi_processor *pr)
  650. {
  651. unsigned int i;
  652. unsigned int state_is_set = 0;
  653. struct acpi_processor_cx *lower = NULL;
  654. struct acpi_processor_cx *higher = NULL;
  655. struct acpi_processor_cx *cx;
  656. if (!pr)
  657. return -EINVAL;
  658. /*
  659. * This function sets the default Cx state policy (OS idle handler).
  660. * Our scheme is to promote quickly to C2 but more conservatively
  661. * to C3. We're favoring C2 for its characteristics of low latency
  662. * (quick response), good power savings, and ability to allow bus
  663. * mastering activity. Note that the Cx state policy is completely
  664. * customizable and can be altered dynamically.
  665. */
  666. /* startup state */
  667. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  668. cx = &pr->power.states[i];
  669. if (!cx->valid)
  670. continue;
  671. if (!state_is_set)
  672. pr->power.state = cx;
  673. state_is_set++;
  674. break;
  675. }
  676. if (!state_is_set)
  677. return -ENODEV;
  678. /* demotion */
  679. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  680. cx = &pr->power.states[i];
  681. if (!cx->valid)
  682. continue;
  683. if (lower) {
  684. cx->demotion.state = lower;
  685. cx->demotion.threshold.ticks = cx->latency_ticks;
  686. cx->demotion.threshold.count = 1;
  687. if (cx->type == ACPI_STATE_C3)
  688. cx->demotion.threshold.bm = bm_history;
  689. }
  690. lower = cx;
  691. }
  692. /* promotion */
  693. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  694. cx = &pr->power.states[i];
  695. if (!cx->valid)
  696. continue;
  697. if (higher) {
  698. cx->promotion.state = higher;
  699. cx->promotion.threshold.ticks = cx->latency_ticks;
  700. if (cx->type >= ACPI_STATE_C2)
  701. cx->promotion.threshold.count = 4;
  702. else
  703. cx->promotion.threshold.count = 10;
  704. if (higher->type == ACPI_STATE_C3)
  705. cx->promotion.threshold.bm = bm_history;
  706. }
  707. higher = cx;
  708. }
  709. return 0;
  710. }
  711. #endif /* !CONFIG_CPU_IDLE */
  712. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  713. {
  714. if (!pr)
  715. return -EINVAL;
  716. if (!pr->pblk)
  717. return -ENODEV;
  718. /* if info is obtained from pblk/fadt, type equals state */
  719. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  720. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  721. #ifndef CONFIG_HOTPLUG_CPU
  722. /*
  723. * Check for P_LVL2_UP flag before entering C2 and above on
  724. * an SMP system.
  725. */
  726. if ((num_online_cpus() > 1) &&
  727. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  728. return -ENODEV;
  729. #endif
  730. /* determine C2 and C3 address from pblk */
  731. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  732. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  733. /* determine latencies from FADT */
  734. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
  735. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
  736. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  737. "lvl2[0x%08x] lvl3[0x%08x]\n",
  738. pr->power.states[ACPI_STATE_C2].address,
  739. pr->power.states[ACPI_STATE_C3].address));
  740. return 0;
  741. }
  742. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  743. {
  744. if (!pr->power.states[ACPI_STATE_C1].valid) {
  745. /* set the first C-State to C1 */
  746. /* all processors need to support C1 */
  747. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  748. pr->power.states[ACPI_STATE_C1].valid = 1;
  749. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  750. }
  751. /* the C0 state only exists as a filler in our array */
  752. pr->power.states[ACPI_STATE_C0].valid = 1;
  753. return 0;
  754. }
  755. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  756. {
  757. acpi_status status = 0;
  758. acpi_integer count;
  759. int current_count;
  760. int i;
  761. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  762. union acpi_object *cst;
  763. if (nocst)
  764. return -ENODEV;
  765. current_count = 0;
  766. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  767. if (ACPI_FAILURE(status)) {
  768. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  769. return -ENODEV;
  770. }
  771. cst = buffer.pointer;
  772. /* There must be at least 2 elements */
  773. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  774. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  775. status = -EFAULT;
  776. goto end;
  777. }
  778. count = cst->package.elements[0].integer.value;
  779. /* Validate number of power states. */
  780. if (count < 1 || count != cst->package.count - 1) {
  781. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  782. status = -EFAULT;
  783. goto end;
  784. }
  785. /* Tell driver that at least _CST is supported. */
  786. pr->flags.has_cst = 1;
  787. for (i = 1; i <= count; i++) {
  788. union acpi_object *element;
  789. union acpi_object *obj;
  790. struct acpi_power_register *reg;
  791. struct acpi_processor_cx cx;
  792. memset(&cx, 0, sizeof(cx));
  793. element = &(cst->package.elements[i]);
  794. if (element->type != ACPI_TYPE_PACKAGE)
  795. continue;
  796. if (element->package.count != 4)
  797. continue;
  798. obj = &(element->package.elements[0]);
  799. if (obj->type != ACPI_TYPE_BUFFER)
  800. continue;
  801. reg = (struct acpi_power_register *)obj->buffer.pointer;
  802. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  803. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  804. continue;
  805. /* There should be an easy way to extract an integer... */
  806. obj = &(element->package.elements[1]);
  807. if (obj->type != ACPI_TYPE_INTEGER)
  808. continue;
  809. cx.type = obj->integer.value;
  810. /*
  811. * Some buggy BIOSes won't list C1 in _CST -
  812. * Let acpi_processor_get_power_info_default() handle them later
  813. */
  814. if (i == 1 && cx.type != ACPI_STATE_C1)
  815. current_count++;
  816. cx.address = reg->address;
  817. cx.index = current_count + 1;
  818. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  819. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  820. if (acpi_processor_ffh_cstate_probe
  821. (pr->id, &cx, reg) == 0) {
  822. cx.entry_method = ACPI_CSTATE_FFH;
  823. } else if (cx.type == ACPI_STATE_C1) {
  824. /*
  825. * C1 is a special case where FIXED_HARDWARE
  826. * can be handled in non-MWAIT way as well.
  827. * In that case, save this _CST entry info.
  828. * Otherwise, ignore this info and continue.
  829. */
  830. cx.entry_method = ACPI_CSTATE_HALT;
  831. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  832. } else {
  833. continue;
  834. }
  835. if (cx.type == ACPI_STATE_C1 &&
  836. (idle_halt || idle_nomwait)) {
  837. /*
  838. * In most cases the C1 space_id obtained from
  839. * _CST object is FIXED_HARDWARE access mode.
  840. * But when the option of idle=halt is added,
  841. * the entry_method type should be changed from
  842. * CSTATE_FFH to CSTATE_HALT.
  843. * When the option of idle=nomwait is added,
  844. * the C1 entry_method type should be
  845. * CSTATE_HALT.
  846. */
  847. cx.entry_method = ACPI_CSTATE_HALT;
  848. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  849. }
  850. } else {
  851. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  852. cx.address);
  853. }
  854. if (cx.type == ACPI_STATE_C1) {
  855. cx.valid = 1;
  856. }
  857. obj = &(element->package.elements[2]);
  858. if (obj->type != ACPI_TYPE_INTEGER)
  859. continue;
  860. cx.latency = obj->integer.value;
  861. obj = &(element->package.elements[3]);
  862. if (obj->type != ACPI_TYPE_INTEGER)
  863. continue;
  864. cx.power = obj->integer.value;
  865. current_count++;
  866. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  867. /*
  868. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  869. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  870. */
  871. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  872. printk(KERN_WARNING
  873. "Limiting number of power states to max (%d)\n",
  874. ACPI_PROCESSOR_MAX_POWER);
  875. printk(KERN_WARNING
  876. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  877. break;
  878. }
  879. }
  880. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  881. current_count));
  882. /* Validate number of power states discovered */
  883. if (current_count < 2)
  884. status = -EFAULT;
  885. end:
  886. kfree(buffer.pointer);
  887. return status;
  888. }
  889. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  890. {
  891. if (!cx->address)
  892. return;
  893. /*
  894. * C2 latency must be less than or equal to 100
  895. * microseconds.
  896. */
  897. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  898. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  899. "latency too large [%d]\n", cx->latency));
  900. return;
  901. }
  902. /*
  903. * Otherwise we've met all of our C2 requirements.
  904. * Normalize the C2 latency to expidite policy
  905. */
  906. cx->valid = 1;
  907. #ifndef CONFIG_CPU_IDLE
  908. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  909. #else
  910. cx->latency_ticks = cx->latency;
  911. #endif
  912. return;
  913. }
  914. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  915. struct acpi_processor_cx *cx)
  916. {
  917. static int bm_check_flag;
  918. if (!cx->address)
  919. return;
  920. /*
  921. * C3 latency must be less than or equal to 1000
  922. * microseconds.
  923. */
  924. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  925. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  926. "latency too large [%d]\n", cx->latency));
  927. return;
  928. }
  929. /*
  930. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  931. * DMA transfers are used by any ISA device to avoid livelock.
  932. * Note that we could disable Type-F DMA (as recommended by
  933. * the erratum), but this is known to disrupt certain ISA
  934. * devices thus we take the conservative approach.
  935. */
  936. else if (errata.piix4.fdma) {
  937. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  938. "C3 not supported on PIIX4 with Type-F DMA\n"));
  939. return;
  940. }
  941. /* All the logic here assumes flags.bm_check is same across all CPUs */
  942. if (!bm_check_flag) {
  943. /* Determine whether bm_check is needed based on CPU */
  944. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  945. bm_check_flag = pr->flags.bm_check;
  946. } else {
  947. pr->flags.bm_check = bm_check_flag;
  948. }
  949. if (pr->flags.bm_check) {
  950. if (!pr->flags.bm_control) {
  951. if (pr->flags.has_cst != 1) {
  952. /* bus mastering control is necessary */
  953. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  954. "C3 support requires BM control\n"));
  955. return;
  956. } else {
  957. /* Here we enter C3 without bus mastering */
  958. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  959. "C3 support without BM control\n"));
  960. }
  961. }
  962. } else {
  963. /*
  964. * WBINVD should be set in fadt, for C3 state to be
  965. * supported on when bm_check is not required.
  966. */
  967. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  968. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  969. "Cache invalidation should work properly"
  970. " for C3 to be enabled on SMP systems\n"));
  971. return;
  972. }
  973. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  974. }
  975. /*
  976. * Otherwise we've met all of our C3 requirements.
  977. * Normalize the C3 latency to expidite policy. Enable
  978. * checking of bus mastering status (bm_check) so we can
  979. * use this in our C3 policy
  980. */
  981. cx->valid = 1;
  982. #ifndef CONFIG_CPU_IDLE
  983. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  984. #else
  985. cx->latency_ticks = cx->latency;
  986. #endif
  987. return;
  988. }
  989. static int acpi_processor_power_verify(struct acpi_processor *pr)
  990. {
  991. unsigned int i;
  992. unsigned int working = 0;
  993. pr->power.timer_broadcast_on_state = INT_MAX;
  994. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  995. struct acpi_processor_cx *cx = &pr->power.states[i];
  996. switch (cx->type) {
  997. case ACPI_STATE_C1:
  998. cx->valid = 1;
  999. break;
  1000. case ACPI_STATE_C2:
  1001. acpi_processor_power_verify_c2(cx);
  1002. if (cx->valid)
  1003. acpi_timer_check_state(i, pr, cx);
  1004. break;
  1005. case ACPI_STATE_C3:
  1006. acpi_processor_power_verify_c3(pr, cx);
  1007. if (cx->valid)
  1008. acpi_timer_check_state(i, pr, cx);
  1009. break;
  1010. }
  1011. if (cx->valid)
  1012. working++;
  1013. }
  1014. acpi_propagate_timer_broadcast(pr);
  1015. return (working);
  1016. }
  1017. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  1018. {
  1019. unsigned int i;
  1020. int result;
  1021. /* NOTE: the idle thread may not be running while calling
  1022. * this function */
  1023. /* Zero initialize all the C-states info. */
  1024. memset(pr->power.states, 0, sizeof(pr->power.states));
  1025. result = acpi_processor_get_power_info_cst(pr);
  1026. if (result == -ENODEV)
  1027. result = acpi_processor_get_power_info_fadt(pr);
  1028. if (result)
  1029. return result;
  1030. acpi_processor_get_power_info_default(pr);
  1031. pr->power.count = acpi_processor_power_verify(pr);
  1032. #ifndef CONFIG_CPU_IDLE
  1033. /*
  1034. * Set Default Policy
  1035. * ------------------
  1036. * Now that we know which states are supported, set the default
  1037. * policy. Note that this policy can be changed dynamically
  1038. * (e.g. encourage deeper sleeps to conserve battery life when
  1039. * not on AC).
  1040. */
  1041. result = acpi_processor_set_power_policy(pr);
  1042. if (result)
  1043. return result;
  1044. #endif
  1045. /*
  1046. * if one state of type C2 or C3 is available, mark this
  1047. * CPU as being "idle manageable"
  1048. */
  1049. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  1050. if (pr->power.states[i].valid) {
  1051. pr->power.count = i;
  1052. if (pr->power.states[i].type >= ACPI_STATE_C2)
  1053. pr->flags.power = 1;
  1054. }
  1055. }
  1056. return 0;
  1057. }
  1058. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  1059. {
  1060. struct acpi_processor *pr = seq->private;
  1061. unsigned int i;
  1062. if (!pr)
  1063. goto end;
  1064. seq_printf(seq, "active state: C%zd\n"
  1065. "max_cstate: C%d\n"
  1066. "bus master activity: %08x\n"
  1067. "maximum allowed latency: %d usec\n",
  1068. pr->power.state ? pr->power.state - pr->power.states : 0,
  1069. max_cstate, (unsigned)pr->power.bm_activity,
  1070. pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
  1071. seq_puts(seq, "states:\n");
  1072. for (i = 1; i <= pr->power.count; i++) {
  1073. seq_printf(seq, " %cC%d: ",
  1074. (&pr->power.states[i] ==
  1075. pr->power.state ? '*' : ' '), i);
  1076. if (!pr->power.states[i].valid) {
  1077. seq_puts(seq, "<not supported>\n");
  1078. continue;
  1079. }
  1080. switch (pr->power.states[i].type) {
  1081. case ACPI_STATE_C1:
  1082. seq_printf(seq, "type[C1] ");
  1083. break;
  1084. case ACPI_STATE_C2:
  1085. seq_printf(seq, "type[C2] ");
  1086. break;
  1087. case ACPI_STATE_C3:
  1088. seq_printf(seq, "type[C3] ");
  1089. break;
  1090. default:
  1091. seq_printf(seq, "type[--] ");
  1092. break;
  1093. }
  1094. if (pr->power.states[i].promotion.state)
  1095. seq_printf(seq, "promotion[C%zd] ",
  1096. (pr->power.states[i].promotion.state -
  1097. pr->power.states));
  1098. else
  1099. seq_puts(seq, "promotion[--] ");
  1100. if (pr->power.states[i].demotion.state)
  1101. seq_printf(seq, "demotion[C%zd] ",
  1102. (pr->power.states[i].demotion.state -
  1103. pr->power.states));
  1104. else
  1105. seq_puts(seq, "demotion[--] ");
  1106. seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
  1107. pr->power.states[i].latency,
  1108. pr->power.states[i].usage,
  1109. (unsigned long long)pr->power.states[i].time);
  1110. }
  1111. end:
  1112. return 0;
  1113. }
  1114. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  1115. {
  1116. return single_open(file, acpi_processor_power_seq_show,
  1117. PDE(inode)->data);
  1118. }
  1119. static const struct file_operations acpi_processor_power_fops = {
  1120. .owner = THIS_MODULE,
  1121. .open = acpi_processor_power_open_fs,
  1122. .read = seq_read,
  1123. .llseek = seq_lseek,
  1124. .release = single_release,
  1125. };
  1126. #ifndef CONFIG_CPU_IDLE
  1127. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1128. {
  1129. int result = 0;
  1130. if (boot_option_idle_override)
  1131. return 0;
  1132. if (!pr)
  1133. return -EINVAL;
  1134. if (nocst) {
  1135. return -ENODEV;
  1136. }
  1137. if (!pr->flags.power_setup_done)
  1138. return -ENODEV;
  1139. /*
  1140. * Fall back to the default idle loop, when pm_idle_save had
  1141. * been initialized.
  1142. */
  1143. if (pm_idle_save) {
  1144. pm_idle = pm_idle_save;
  1145. /* Relies on interrupts forcing exit from idle. */
  1146. synchronize_sched();
  1147. }
  1148. pr->flags.power = 0;
  1149. result = acpi_processor_get_power_info(pr);
  1150. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  1151. pm_idle = acpi_processor_idle;
  1152. return result;
  1153. }
  1154. #ifdef CONFIG_SMP
  1155. static void smp_callback(void *v)
  1156. {
  1157. /* we already woke the CPU up, nothing more to do */
  1158. }
  1159. /*
  1160. * This function gets called when a part of the kernel has a new latency
  1161. * requirement. This means we need to get all processors out of their C-state,
  1162. * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
  1163. * wakes them all right up.
  1164. */
  1165. static int acpi_processor_latency_notify(struct notifier_block *b,
  1166. unsigned long l, void *v)
  1167. {
  1168. smp_call_function(smp_callback, NULL, 1);
  1169. return NOTIFY_OK;
  1170. }
  1171. static struct notifier_block acpi_processor_latency_notifier = {
  1172. .notifier_call = acpi_processor_latency_notify,
  1173. };
  1174. #endif
  1175. #else /* CONFIG_CPU_IDLE */
  1176. /**
  1177. * acpi_idle_bm_check - checks if bus master activity was detected
  1178. */
  1179. static int acpi_idle_bm_check(void)
  1180. {
  1181. u32 bm_status = 0;
  1182. acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  1183. if (bm_status)
  1184. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  1185. /*
  1186. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  1187. * the true state of bus mastering activity; forcing us to
  1188. * manually check the BMIDEA bit of each IDE channel.
  1189. */
  1190. else if (errata.piix4.bmisx) {
  1191. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  1192. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  1193. bm_status = 1;
  1194. }
  1195. return bm_status;
  1196. }
  1197. /**
  1198. * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
  1199. * @pr: the processor
  1200. * @target: the new target state
  1201. */
  1202. static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
  1203. struct acpi_processor_cx *target)
  1204. {
  1205. if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
  1206. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
  1207. pr->flags.bm_rld_set = 0;
  1208. }
  1209. if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
  1210. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  1211. pr->flags.bm_rld_set = 1;
  1212. }
  1213. }
  1214. /**
  1215. * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  1216. * @cx: cstate data
  1217. *
  1218. * Caller disables interrupt before call and enables interrupt after return.
  1219. */
  1220. static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  1221. {
  1222. /* Don't trace irqs off for idle */
  1223. stop_critical_timings();
  1224. if (cx->entry_method == ACPI_CSTATE_FFH) {
  1225. /* Call into architectural FFH based C-state */
  1226. acpi_processor_ffh_cstate_enter(cx);
  1227. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  1228. acpi_safe_halt();
  1229. } else {
  1230. int unused;
  1231. /* IO port based C-state */
  1232. inb(cx->address);
  1233. /* Dummy wait op - must do something useless after P_LVL2 read
  1234. because chipsets cannot guarantee that STPCLK# signal
  1235. gets asserted in time to freeze execution properly. */
  1236. unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1237. }
  1238. start_critical_timings();
  1239. }
  1240. /**
  1241. * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  1242. * @dev: the target CPU
  1243. * @state: the state data
  1244. *
  1245. * This is equivalent to the HALT instruction.
  1246. */
  1247. static int acpi_idle_enter_c1(struct cpuidle_device *dev,
  1248. struct cpuidle_state *state)
  1249. {
  1250. u32 t1, t2;
  1251. struct acpi_processor *pr;
  1252. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1253. pr = __get_cpu_var(processors);
  1254. if (unlikely(!pr))
  1255. return 0;
  1256. local_irq_disable();
  1257. /* Do not access any ACPI IO ports in suspend path */
  1258. if (acpi_idle_suspend) {
  1259. acpi_safe_halt();
  1260. local_irq_enable();
  1261. return 0;
  1262. }
  1263. if (pr->flags.bm_check)
  1264. acpi_idle_update_bm_rld(pr, cx);
  1265. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1266. acpi_idle_do_entry(cx);
  1267. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1268. local_irq_enable();
  1269. cx->usage++;
  1270. return ticks_elapsed_in_us(t1, t2);
  1271. }
  1272. /**
  1273. * acpi_idle_enter_simple - enters an ACPI state without BM handling
  1274. * @dev: the target CPU
  1275. * @state: the state data
  1276. */
  1277. static int acpi_idle_enter_simple(struct cpuidle_device *dev,
  1278. struct cpuidle_state *state)
  1279. {
  1280. struct acpi_processor *pr;
  1281. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1282. u32 t1, t2;
  1283. int sleep_ticks = 0;
  1284. pr = __get_cpu_var(processors);
  1285. if (unlikely(!pr))
  1286. return 0;
  1287. if (acpi_idle_suspend)
  1288. return(acpi_idle_enter_c1(dev, state));
  1289. local_irq_disable();
  1290. current_thread_info()->status &= ~TS_POLLING;
  1291. /*
  1292. * TS_POLLING-cleared state must be visible before we test
  1293. * NEED_RESCHED:
  1294. */
  1295. smp_mb();
  1296. if (unlikely(need_resched())) {
  1297. current_thread_info()->status |= TS_POLLING;
  1298. local_irq_enable();
  1299. return 0;
  1300. }
  1301. /*
  1302. * Must be done before busmaster disable as we might need to
  1303. * access HPET !
  1304. */
  1305. acpi_state_timer_broadcast(pr, cx, 1);
  1306. if (pr->flags.bm_check)
  1307. acpi_idle_update_bm_rld(pr, cx);
  1308. if (cx->type == ACPI_STATE_C3)
  1309. ACPI_FLUSH_CPU_CACHE();
  1310. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1311. /* Tell the scheduler that we are going deep-idle: */
  1312. sched_clock_idle_sleep_event();
  1313. acpi_idle_do_entry(cx);
  1314. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1315. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1316. /* TSC could halt in idle, so notify users */
  1317. if (tsc_halts_in_c(cx->type))
  1318. mark_tsc_unstable("TSC halts in idle");;
  1319. #endif
  1320. sleep_ticks = ticks_elapsed(t1, t2);
  1321. /* Tell the scheduler how much we idled: */
  1322. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1323. local_irq_enable();
  1324. current_thread_info()->status |= TS_POLLING;
  1325. cx->usage++;
  1326. acpi_state_timer_broadcast(pr, cx, 0);
  1327. cx->time += sleep_ticks;
  1328. return ticks_elapsed_in_us(t1, t2);
  1329. }
  1330. static int c3_cpu_count;
  1331. static DEFINE_SPINLOCK(c3_lock);
  1332. /**
  1333. * acpi_idle_enter_bm - enters C3 with proper BM handling
  1334. * @dev: the target CPU
  1335. * @state: the state data
  1336. *
  1337. * If BM is detected, the deepest non-C3 idle state is entered instead.
  1338. */
  1339. static int acpi_idle_enter_bm(struct cpuidle_device *dev,
  1340. struct cpuidle_state *state)
  1341. {
  1342. struct acpi_processor *pr;
  1343. struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1344. u32 t1, t2;
  1345. int sleep_ticks = 0;
  1346. pr = __get_cpu_var(processors);
  1347. if (unlikely(!pr))
  1348. return 0;
  1349. if (acpi_idle_suspend)
  1350. return(acpi_idle_enter_c1(dev, state));
  1351. if (acpi_idle_bm_check()) {
  1352. if (dev->safe_state) {
  1353. dev->last_state = dev->safe_state;
  1354. return dev->safe_state->enter(dev, dev->safe_state);
  1355. } else {
  1356. local_irq_disable();
  1357. acpi_safe_halt();
  1358. local_irq_enable();
  1359. return 0;
  1360. }
  1361. }
  1362. local_irq_disable();
  1363. current_thread_info()->status &= ~TS_POLLING;
  1364. /*
  1365. * TS_POLLING-cleared state must be visible before we test
  1366. * NEED_RESCHED:
  1367. */
  1368. smp_mb();
  1369. if (unlikely(need_resched())) {
  1370. current_thread_info()->status |= TS_POLLING;
  1371. local_irq_enable();
  1372. return 0;
  1373. }
  1374. acpi_unlazy_tlb(smp_processor_id());
  1375. /* Tell the scheduler that we are going deep-idle: */
  1376. sched_clock_idle_sleep_event();
  1377. /*
  1378. * Must be done before busmaster disable as we might need to
  1379. * access HPET !
  1380. */
  1381. acpi_state_timer_broadcast(pr, cx, 1);
  1382. acpi_idle_update_bm_rld(pr, cx);
  1383. /*
  1384. * disable bus master
  1385. * bm_check implies we need ARB_DIS
  1386. * !bm_check implies we need cache flush
  1387. * bm_control implies whether we can do ARB_DIS
  1388. *
  1389. * That leaves a case where bm_check is set and bm_control is
  1390. * not set. In that case we cannot do much, we enter C3
  1391. * without doing anything.
  1392. */
  1393. if (pr->flags.bm_check && pr->flags.bm_control) {
  1394. spin_lock(&c3_lock);
  1395. c3_cpu_count++;
  1396. /* Disable bus master arbitration when all CPUs are in C3 */
  1397. if (c3_cpu_count == num_online_cpus())
  1398. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
  1399. spin_unlock(&c3_lock);
  1400. } else if (!pr->flags.bm_check) {
  1401. ACPI_FLUSH_CPU_CACHE();
  1402. }
  1403. t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1404. acpi_idle_do_entry(cx);
  1405. t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1406. /* Re-enable bus master arbitration */
  1407. if (pr->flags.bm_check && pr->flags.bm_control) {
  1408. spin_lock(&c3_lock);
  1409. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
  1410. c3_cpu_count--;
  1411. spin_unlock(&c3_lock);
  1412. }
  1413. #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
  1414. /* TSC could halt in idle, so notify users */
  1415. if (tsc_halts_in_c(ACPI_STATE_C3))
  1416. mark_tsc_unstable("TSC halts in idle");
  1417. #endif
  1418. sleep_ticks = ticks_elapsed(t1, t2);
  1419. /* Tell the scheduler how much we idled: */
  1420. sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  1421. local_irq_enable();
  1422. current_thread_info()->status |= TS_POLLING;
  1423. cx->usage++;
  1424. acpi_state_timer_broadcast(pr, cx, 0);
  1425. cx->time += sleep_ticks;
  1426. return ticks_elapsed_in_us(t1, t2);
  1427. }
  1428. struct cpuidle_driver acpi_idle_driver = {
  1429. .name = "acpi_idle",
  1430. .owner = THIS_MODULE,
  1431. };
  1432. /**
  1433. * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
  1434. * @pr: the ACPI processor
  1435. */
  1436. static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
  1437. {
  1438. int i, count = CPUIDLE_DRIVER_STATE_START;
  1439. struct acpi_processor_cx *cx;
  1440. struct cpuidle_state *state;
  1441. struct cpuidle_device *dev = &pr->power.dev;
  1442. if (!pr->flags.power_setup_done)
  1443. return -EINVAL;
  1444. if (pr->flags.power == 0) {
  1445. return -EINVAL;
  1446. }
  1447. dev->cpu = pr->id;
  1448. for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
  1449. dev->states[i].name[0] = '\0';
  1450. dev->states[i].desc[0] = '\0';
  1451. }
  1452. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  1453. cx = &pr->power.states[i];
  1454. state = &dev->states[count];
  1455. if (!cx->valid)
  1456. continue;
  1457. #ifdef CONFIG_HOTPLUG_CPU
  1458. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  1459. !pr->flags.has_cst &&
  1460. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  1461. continue;
  1462. #endif
  1463. cpuidle_set_statedata(state, cx);
  1464. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  1465. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  1466. state->exit_latency = cx->latency;
  1467. state->target_residency = cx->latency * latency_factor;
  1468. state->power_usage = cx->power;
  1469. state->flags = 0;
  1470. switch (cx->type) {
  1471. case ACPI_STATE_C1:
  1472. state->flags |= CPUIDLE_FLAG_SHALLOW;
  1473. if (cx->entry_method == ACPI_CSTATE_FFH)
  1474. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1475. state->enter = acpi_idle_enter_c1;
  1476. dev->safe_state = state;
  1477. break;
  1478. case ACPI_STATE_C2:
  1479. state->flags |= CPUIDLE_FLAG_BALANCED;
  1480. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1481. state->enter = acpi_idle_enter_simple;
  1482. dev->safe_state = state;
  1483. break;
  1484. case ACPI_STATE_C3:
  1485. state->flags |= CPUIDLE_FLAG_DEEP;
  1486. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  1487. state->flags |= CPUIDLE_FLAG_CHECK_BM;
  1488. state->enter = pr->flags.bm_check ?
  1489. acpi_idle_enter_bm :
  1490. acpi_idle_enter_simple;
  1491. break;
  1492. }
  1493. count++;
  1494. if (count == CPUIDLE_STATE_MAX)
  1495. break;
  1496. }
  1497. dev->state_count = count;
  1498. if (!count)
  1499. return -EINVAL;
  1500. return 0;
  1501. }
  1502. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  1503. {
  1504. int ret = 0;
  1505. if (boot_option_idle_override)
  1506. return 0;
  1507. if (!pr)
  1508. return -EINVAL;
  1509. if (nocst) {
  1510. return -ENODEV;
  1511. }
  1512. if (!pr->flags.power_setup_done)
  1513. return -ENODEV;
  1514. cpuidle_pause_and_lock();
  1515. cpuidle_disable_device(&pr->power.dev);
  1516. acpi_processor_get_power_info(pr);
  1517. if (pr->flags.power) {
  1518. acpi_processor_setup_cpuidle(pr);
  1519. ret = cpuidle_enable_device(&pr->power.dev);
  1520. }
  1521. cpuidle_resume_and_unlock();
  1522. return ret;
  1523. }
  1524. #endif /* CONFIG_CPU_IDLE */
  1525. int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
  1526. struct acpi_device *device)
  1527. {
  1528. acpi_status status = 0;
  1529. static int first_run;
  1530. struct proc_dir_entry *entry = NULL;
  1531. unsigned int i;
  1532. if (boot_option_idle_override)
  1533. return 0;
  1534. if (!first_run) {
  1535. if (idle_halt) {
  1536. /*
  1537. * When the boot option of "idle=halt" is added, halt
  1538. * is used for CPU IDLE.
  1539. * In such case C2/C3 is meaningless. So the max_cstate
  1540. * is set to one.
  1541. */
  1542. max_cstate = 1;
  1543. }
  1544. dmi_check_system(processor_power_dmi_table);
  1545. max_cstate = acpi_processor_cstate_check(max_cstate);
  1546. if (max_cstate < ACPI_C_STATES_MAX)
  1547. printk(KERN_NOTICE
  1548. "ACPI: processor limited to max C-state %d\n",
  1549. max_cstate);
  1550. first_run++;
  1551. #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
  1552. pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
  1553. &acpi_processor_latency_notifier);
  1554. #endif
  1555. }
  1556. if (!pr)
  1557. return -EINVAL;
  1558. if (acpi_gbl_FADT.cst_control && !nocst) {
  1559. status =
  1560. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  1561. if (ACPI_FAILURE(status)) {
  1562. ACPI_EXCEPTION((AE_INFO, status,
  1563. "Notifying BIOS of _CST ability failed"));
  1564. }
  1565. }
  1566. acpi_processor_get_power_info(pr);
  1567. pr->flags.power_setup_done = 1;
  1568. /*
  1569. * Install the idle handler if processor power management is supported.
  1570. * Note that we use previously set idle handler will be used on
  1571. * platforms that only support C1.
  1572. */
  1573. if (pr->flags.power) {
  1574. #ifdef CONFIG_CPU_IDLE
  1575. acpi_processor_setup_cpuidle(pr);
  1576. if (cpuidle_register_device(&pr->power.dev))
  1577. return -EIO;
  1578. #endif
  1579. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  1580. for (i = 1; i <= pr->power.count; i++)
  1581. if (pr->power.states[i].valid)
  1582. printk(" C%d[C%d]", i,
  1583. pr->power.states[i].type);
  1584. printk(")\n");
  1585. #ifndef CONFIG_CPU_IDLE
  1586. if (pr->id == 0) {
  1587. pm_idle_save = pm_idle;
  1588. pm_idle = acpi_processor_idle;
  1589. }
  1590. #endif
  1591. }
  1592. /* 'power' [R] */
  1593. entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
  1594. S_IRUGO, acpi_device_dir(device),
  1595. &acpi_processor_power_fops,
  1596. acpi_driver_data(device));
  1597. if (!entry)
  1598. return -EIO;
  1599. return 0;
  1600. }
  1601. int acpi_processor_power_exit(struct acpi_processor *pr,
  1602. struct acpi_device *device)
  1603. {
  1604. if (boot_option_idle_override)
  1605. return 0;
  1606. #ifdef CONFIG_CPU_IDLE
  1607. cpuidle_unregister_device(&pr->power.dev);
  1608. #endif
  1609. pr->flags.power_setup_done = 0;
  1610. if (acpi_device_dir(device))
  1611. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  1612. acpi_device_dir(device));
  1613. #ifndef CONFIG_CPU_IDLE
  1614. /* Unregister the idle handler when processor #0 is removed. */
  1615. if (pr->id == 0) {
  1616. if (pm_idle_save)
  1617. pm_idle = pm_idle_save;
  1618. /*
  1619. * We are about to unload the current idle thread pm callback
  1620. * (pm_idle), Wait for all processors to update cached/local
  1621. * copies of pm_idle before proceeding.
  1622. */
  1623. cpu_idle_wait();
  1624. #ifdef CONFIG_SMP
  1625. pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
  1626. &acpi_processor_latency_notifier);
  1627. #endif
  1628. }
  1629. #endif
  1630. return 0;
  1631. }