processor_idle.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/module.h>
  31. #include <linux/acpi.h>
  32. #include <linux/dmi.h>
  33. #include <linux/sched.h> /* need_resched() */
  34. #include <linux/clockchips.h>
  35. #include <linux/cpuidle.h>
  36. #include <linux/syscore_ops.h>
  37. /*
  38. * Include the apic definitions for x86 to have the APIC timer related defines
  39. * available also for UP (on SMP it gets magically included via linux/smp.h).
  40. * asm/acpi.h is not an option, as it would require more include magic. Also
  41. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  42. */
  43. #ifdef CONFIG_X86
  44. #include <asm/apic.h>
  45. #endif
  46. #include <acpi/acpi_bus.h>
  47. #include <acpi/processor.h>
  48. #define PREFIX "ACPI: "
  49. #define ACPI_PROCESSOR_CLASS "processor"
  50. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  51. ACPI_MODULE_NAME("processor_idle");
  52. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  53. module_param(max_cstate, uint, 0000);
  54. static unsigned int nocst __read_mostly;
  55. module_param(nocst, uint, 0000);
  56. static int bm_check_disable __read_mostly;
  57. module_param(bm_check_disable, uint, 0000);
  58. static unsigned int latency_factor __read_mostly = 2;
  59. module_param(latency_factor, uint, 0644);
  60. static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  61. static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
  62. acpi_cstate);
  63. static int disabled_by_idle_boot_param(void)
  64. {
  65. return boot_option_idle_override == IDLE_POLL ||
  66. boot_option_idle_override == IDLE_HALT;
  67. }
  68. /*
  69. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  70. * For now disable this. Probably a bug somewhere else.
  71. *
  72. * To skip this limit, boot/load with a large max_cstate limit.
  73. */
  74. static int set_max_cstate(const struct dmi_system_id *id)
  75. {
  76. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  77. return 0;
  78. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  79. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  80. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  81. max_cstate = (long)id->driver_data;
  82. return 0;
  83. }
  84. /* Actually this shouldn't be __cpuinitdata, would be better to fix the
  85. callers to only run once -AK */
  86. static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
  87. { set_max_cstate, "Clevo 5600D", {
  88. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  89. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  90. (void *)2},
  91. { set_max_cstate, "Pavilion zv5000", {
  92. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  93. DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  94. (void *)1},
  95. { set_max_cstate, "Asus L8400B", {
  96. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  97. DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
  98. (void *)1},
  99. {},
  100. };
  101. /*
  102. * Callers should disable interrupts before the call and enable
  103. * interrupts after return.
  104. */
  105. static void acpi_safe_halt(void)
  106. {
  107. current_thread_info()->status &= ~TS_POLLING;
  108. /*
  109. * TS_POLLING-cleared state must be visible before we
  110. * test NEED_RESCHED:
  111. */
  112. smp_mb();
  113. if (!need_resched()) {
  114. safe_halt();
  115. local_irq_disable();
  116. }
  117. current_thread_info()->status |= TS_POLLING;
  118. }
  119. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  120. /*
  121. * Some BIOS implementations switch to C3 in the published C2 state.
  122. * This seems to be a common problem on AMD boxen, but other vendors
  123. * are affected too. We pick the most conservative approach: we assume
  124. * that the local APIC stops in both C2 and C3.
  125. */
  126. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  127. struct acpi_processor_cx *cx)
  128. {
  129. struct acpi_processor_power *pwr = &pr->power;
  130. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  131. if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
  132. return;
  133. if (amd_e400_c1e_detected)
  134. type = ACPI_STATE_C1;
  135. /*
  136. * Check, if one of the previous states already marked the lapic
  137. * unstable
  138. */
  139. if (pwr->timer_broadcast_on_state < state)
  140. return;
  141. if (cx->type >= type)
  142. pr->power.timer_broadcast_on_state = state;
  143. }
  144. static void __lapic_timer_propagate_broadcast(void *arg)
  145. {
  146. struct acpi_processor *pr = (struct acpi_processor *) arg;
  147. unsigned long reason;
  148. reason = pr->power.timer_broadcast_on_state < INT_MAX ?
  149. CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  150. clockevents_notify(reason, &pr->id);
  151. }
  152. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
  153. {
  154. smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
  155. (void *)pr, 1);
  156. }
  157. /* Power(C) State timer broadcast control */
  158. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  159. struct acpi_processor_cx *cx,
  160. int broadcast)
  161. {
  162. int state = cx - pr->power.states;
  163. if (state >= pr->power.timer_broadcast_on_state) {
  164. unsigned long reason;
  165. reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
  166. CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
  167. clockevents_notify(reason, &pr->id);
  168. }
  169. }
  170. #else
  171. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  172. struct acpi_processor_cx *cstate) { }
  173. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
  174. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  175. struct acpi_processor_cx *cx,
  176. int broadcast)
  177. {
  178. }
  179. #endif
  180. #ifdef CONFIG_PM_SLEEP
  181. static u32 saved_bm_rld;
  182. int acpi_processor_suspend(void)
  183. {
  184. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
  185. return 0;
  186. }
  187. void acpi_processor_resume(void)
  188. {
  189. u32 resumed_bm_rld;
  190. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
  191. if (resumed_bm_rld == saved_bm_rld)
  192. return;
  193. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
  194. }
  195. static struct syscore_ops acpi_processor_syscore_ops = {
  196. .suspend = acpi_processor_suspend,
  197. .resume = acpi_processor_resume,
  198. };
  199. void acpi_processor_syscore_init(void)
  200. {
  201. register_syscore_ops(&acpi_processor_syscore_ops);
  202. }
  203. void acpi_processor_syscore_exit(void)
  204. {
  205. unregister_syscore_ops(&acpi_processor_syscore_ops);
  206. }
  207. #endif /* CONFIG_PM_SLEEP */
  208. #if defined(CONFIG_X86)
  209. static void tsc_check_state(int state)
  210. {
  211. switch (boot_cpu_data.x86_vendor) {
  212. case X86_VENDOR_AMD:
  213. case X86_VENDOR_INTEL:
  214. /*
  215. * AMD Fam10h TSC will tick in all
  216. * C/P/S0/S1 states when this bit is set.
  217. */
  218. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  219. return;
  220. /*FALL THROUGH*/
  221. default:
  222. /* TSC could halt in idle, so notify users */
  223. if (state > ACPI_STATE_C1)
  224. mark_tsc_unstable("TSC halts in idle");
  225. }
  226. }
  227. #else
  228. static void tsc_check_state(int state) { return; }
  229. #endif
  230. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  231. {
  232. if (!pr)
  233. return -EINVAL;
  234. if (!pr->pblk)
  235. return -ENODEV;
  236. /* if info is obtained from pblk/fadt, type equals state */
  237. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  238. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  239. #ifndef CONFIG_HOTPLUG_CPU
  240. /*
  241. * Check for P_LVL2_UP flag before entering C2 and above on
  242. * an SMP system.
  243. */
  244. if ((num_online_cpus() > 1) &&
  245. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  246. return -ENODEV;
  247. #endif
  248. /* determine C2 and C3 address from pblk */
  249. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  250. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  251. /* determine latencies from FADT */
  252. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
  253. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
  254. /*
  255. * FADT specified C2 latency must be less than or equal to
  256. * 100 microseconds.
  257. */
  258. if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  259. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  260. "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
  261. /* invalidate C2 */
  262. pr->power.states[ACPI_STATE_C2].address = 0;
  263. }
  264. /*
  265. * FADT supplied C3 latency must be less than or equal to
  266. * 1000 microseconds.
  267. */
  268. if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  269. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  270. "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
  271. /* invalidate C3 */
  272. pr->power.states[ACPI_STATE_C3].address = 0;
  273. }
  274. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  275. "lvl2[0x%08x] lvl3[0x%08x]\n",
  276. pr->power.states[ACPI_STATE_C2].address,
  277. pr->power.states[ACPI_STATE_C3].address));
  278. return 0;
  279. }
  280. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  281. {
  282. if (!pr->power.states[ACPI_STATE_C1].valid) {
  283. /* set the first C-State to C1 */
  284. /* all processors need to support C1 */
  285. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  286. pr->power.states[ACPI_STATE_C1].valid = 1;
  287. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  288. }
  289. /* the C0 state only exists as a filler in our array */
  290. pr->power.states[ACPI_STATE_C0].valid = 1;
  291. return 0;
  292. }
  293. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  294. {
  295. acpi_status status = 0;
  296. u64 count;
  297. int current_count;
  298. int i;
  299. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  300. union acpi_object *cst;
  301. if (nocst)
  302. return -ENODEV;
  303. current_count = 0;
  304. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  305. if (ACPI_FAILURE(status)) {
  306. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  307. return -ENODEV;
  308. }
  309. cst = buffer.pointer;
  310. /* There must be at least 2 elements */
  311. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  312. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  313. status = -EFAULT;
  314. goto end;
  315. }
  316. count = cst->package.elements[0].integer.value;
  317. /* Validate number of power states. */
  318. if (count < 1 || count != cst->package.count - 1) {
  319. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  320. status = -EFAULT;
  321. goto end;
  322. }
  323. /* Tell driver that at least _CST is supported. */
  324. pr->flags.has_cst = 1;
  325. for (i = 1; i <= count; i++) {
  326. union acpi_object *element;
  327. union acpi_object *obj;
  328. struct acpi_power_register *reg;
  329. struct acpi_processor_cx cx;
  330. memset(&cx, 0, sizeof(cx));
  331. element = &(cst->package.elements[i]);
  332. if (element->type != ACPI_TYPE_PACKAGE)
  333. continue;
  334. if (element->package.count != 4)
  335. continue;
  336. obj = &(element->package.elements[0]);
  337. if (obj->type != ACPI_TYPE_BUFFER)
  338. continue;
  339. reg = (struct acpi_power_register *)obj->buffer.pointer;
  340. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  341. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  342. continue;
  343. /* There should be an easy way to extract an integer... */
  344. obj = &(element->package.elements[1]);
  345. if (obj->type != ACPI_TYPE_INTEGER)
  346. continue;
  347. cx.type = obj->integer.value;
  348. /*
  349. * Some buggy BIOSes won't list C1 in _CST -
  350. * Let acpi_processor_get_power_info_default() handle them later
  351. */
  352. if (i == 1 && cx.type != ACPI_STATE_C1)
  353. current_count++;
  354. cx.address = reg->address;
  355. cx.index = current_count + 1;
  356. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  357. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  358. if (acpi_processor_ffh_cstate_probe
  359. (pr->id, &cx, reg) == 0) {
  360. cx.entry_method = ACPI_CSTATE_FFH;
  361. } else if (cx.type == ACPI_STATE_C1) {
  362. /*
  363. * C1 is a special case where FIXED_HARDWARE
  364. * can be handled in non-MWAIT way as well.
  365. * In that case, save this _CST entry info.
  366. * Otherwise, ignore this info and continue.
  367. */
  368. cx.entry_method = ACPI_CSTATE_HALT;
  369. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  370. } else {
  371. continue;
  372. }
  373. if (cx.type == ACPI_STATE_C1 &&
  374. (boot_option_idle_override == IDLE_NOMWAIT)) {
  375. /*
  376. * In most cases the C1 space_id obtained from
  377. * _CST object is FIXED_HARDWARE access mode.
  378. * But when the option of idle=halt is added,
  379. * the entry_method type should be changed from
  380. * CSTATE_FFH to CSTATE_HALT.
  381. * When the option of idle=nomwait is added,
  382. * the C1 entry_method type should be
  383. * CSTATE_HALT.
  384. */
  385. cx.entry_method = ACPI_CSTATE_HALT;
  386. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  387. }
  388. } else {
  389. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  390. cx.address);
  391. }
  392. if (cx.type == ACPI_STATE_C1) {
  393. cx.valid = 1;
  394. }
  395. obj = &(element->package.elements[2]);
  396. if (obj->type != ACPI_TYPE_INTEGER)
  397. continue;
  398. cx.latency = obj->integer.value;
  399. obj = &(element->package.elements[3]);
  400. if (obj->type != ACPI_TYPE_INTEGER)
  401. continue;
  402. current_count++;
  403. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  404. /*
  405. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  406. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  407. */
  408. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  409. printk(KERN_WARNING
  410. "Limiting number of power states to max (%d)\n",
  411. ACPI_PROCESSOR_MAX_POWER);
  412. printk(KERN_WARNING
  413. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  414. break;
  415. }
  416. }
  417. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  418. current_count));
  419. /* Validate number of power states discovered */
  420. if (current_count < 2)
  421. status = -EFAULT;
  422. end:
  423. kfree(buffer.pointer);
  424. return status;
  425. }
  426. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  427. struct acpi_processor_cx *cx)
  428. {
  429. static int bm_check_flag = -1;
  430. static int bm_control_flag = -1;
  431. if (!cx->address)
  432. return;
  433. /*
  434. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  435. * DMA transfers are used by any ISA device to avoid livelock.
  436. * Note that we could disable Type-F DMA (as recommended by
  437. * the erratum), but this is known to disrupt certain ISA
  438. * devices thus we take the conservative approach.
  439. */
  440. else if (errata.piix4.fdma) {
  441. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  442. "C3 not supported on PIIX4 with Type-F DMA\n"));
  443. return;
  444. }
  445. /* All the logic here assumes flags.bm_check is same across all CPUs */
  446. if (bm_check_flag == -1) {
  447. /* Determine whether bm_check is needed based on CPU */
  448. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  449. bm_check_flag = pr->flags.bm_check;
  450. bm_control_flag = pr->flags.bm_control;
  451. } else {
  452. pr->flags.bm_check = bm_check_flag;
  453. pr->flags.bm_control = bm_control_flag;
  454. }
  455. if (pr->flags.bm_check) {
  456. if (!pr->flags.bm_control) {
  457. if (pr->flags.has_cst != 1) {
  458. /* bus mastering control is necessary */
  459. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  460. "C3 support requires BM control\n"));
  461. return;
  462. } else {
  463. /* Here we enter C3 without bus mastering */
  464. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  465. "C3 support without BM control\n"));
  466. }
  467. }
  468. } else {
  469. /*
  470. * WBINVD should be set in fadt, for C3 state to be
  471. * supported on when bm_check is not required.
  472. */
  473. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  474. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  475. "Cache invalidation should work properly"
  476. " for C3 to be enabled on SMP systems\n"));
  477. return;
  478. }
  479. }
  480. /*
  481. * Otherwise we've met all of our C3 requirements.
  482. * Normalize the C3 latency to expidite policy. Enable
  483. * checking of bus mastering status (bm_check) so we can
  484. * use this in our C3 policy
  485. */
  486. cx->valid = 1;
  487. /*
  488. * On older chipsets, BM_RLD needs to be set
  489. * in order for Bus Master activity to wake the
  490. * system from C3. Newer chipsets handle DMA
  491. * during C3 automatically and BM_RLD is a NOP.
  492. * In either case, the proper way to
  493. * handle BM_RLD is to set it and leave it set.
  494. */
  495. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  496. return;
  497. }
  498. static int acpi_processor_power_verify(struct acpi_processor *pr)
  499. {
  500. unsigned int i;
  501. unsigned int working = 0;
  502. pr->power.timer_broadcast_on_state = INT_MAX;
  503. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  504. struct acpi_processor_cx *cx = &pr->power.states[i];
  505. switch (cx->type) {
  506. case ACPI_STATE_C1:
  507. cx->valid = 1;
  508. break;
  509. case ACPI_STATE_C2:
  510. if (!cx->address)
  511. break;
  512. cx->valid = 1;
  513. break;
  514. case ACPI_STATE_C3:
  515. acpi_processor_power_verify_c3(pr, cx);
  516. break;
  517. }
  518. if (!cx->valid)
  519. continue;
  520. lapic_timer_check_state(i, pr, cx);
  521. tsc_check_state(cx->type);
  522. working++;
  523. }
  524. lapic_timer_propagate_broadcast(pr);
  525. return (working);
  526. }
  527. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  528. {
  529. unsigned int i;
  530. int result;
  531. /* NOTE: the idle thread may not be running while calling
  532. * this function */
  533. /* Zero initialize all the C-states info. */
  534. memset(pr->power.states, 0, sizeof(pr->power.states));
  535. result = acpi_processor_get_power_info_cst(pr);
  536. if (result == -ENODEV)
  537. result = acpi_processor_get_power_info_fadt(pr);
  538. if (result)
  539. return result;
  540. acpi_processor_get_power_info_default(pr);
  541. pr->power.count = acpi_processor_power_verify(pr);
  542. /*
  543. * if one state of type C2 or C3 is available, mark this
  544. * CPU as being "idle manageable"
  545. */
  546. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  547. if (pr->power.states[i].valid) {
  548. pr->power.count = i;
  549. if (pr->power.states[i].type >= ACPI_STATE_C2)
  550. pr->flags.power = 1;
  551. }
  552. }
  553. return 0;
  554. }
  555. /**
  556. * acpi_idle_bm_check - checks if bus master activity was detected
  557. */
  558. static int acpi_idle_bm_check(void)
  559. {
  560. u32 bm_status = 0;
  561. if (bm_check_disable)
  562. return 0;
  563. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  564. if (bm_status)
  565. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  566. /*
  567. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  568. * the true state of bus mastering activity; forcing us to
  569. * manually check the BMIDEA bit of each IDE channel.
  570. */
  571. else if (errata.piix4.bmisx) {
  572. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  573. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  574. bm_status = 1;
  575. }
  576. return bm_status;
  577. }
  578. /**
  579. * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  580. * @cx: cstate data
  581. *
  582. * Caller disables interrupt before call and enables interrupt after return.
  583. */
  584. static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  585. {
  586. /* Don't trace irqs off for idle */
  587. stop_critical_timings();
  588. if (cx->entry_method == ACPI_CSTATE_FFH) {
  589. /* Call into architectural FFH based C-state */
  590. acpi_processor_ffh_cstate_enter(cx);
  591. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  592. acpi_safe_halt();
  593. } else {
  594. /* IO port based C-state */
  595. inb(cx->address);
  596. /* Dummy wait op - must do something useless after P_LVL2 read
  597. because chipsets cannot guarantee that STPCLK# signal
  598. gets asserted in time to freeze execution properly. */
  599. inl(acpi_gbl_FADT.xpm_timer_block.address);
  600. }
  601. start_critical_timings();
  602. }
  603. /**
  604. * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  605. * @dev: the target CPU
  606. * @drv: cpuidle driver containing cpuidle state info
  607. * @index: index of target state
  608. *
  609. * This is equivalent to the HALT instruction.
  610. */
  611. static int acpi_idle_enter_c1(struct cpuidle_device *dev,
  612. struct cpuidle_driver *drv, int index)
  613. {
  614. struct acpi_processor *pr;
  615. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  616. pr = __this_cpu_read(processors);
  617. if (unlikely(!pr))
  618. return -EINVAL;
  619. lapic_timer_state_broadcast(pr, cx, 1);
  620. acpi_idle_do_entry(cx);
  621. lapic_timer_state_broadcast(pr, cx, 0);
  622. return index;
  623. }
  624. /**
  625. * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  626. * @dev: the target CPU
  627. * @index: the index of suggested state
  628. */
  629. static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
  630. {
  631. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  632. ACPI_FLUSH_CPU_CACHE();
  633. while (1) {
  634. if (cx->entry_method == ACPI_CSTATE_HALT)
  635. safe_halt();
  636. else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
  637. inb(cx->address);
  638. /* See comment in acpi_idle_do_entry() */
  639. inl(acpi_gbl_FADT.xpm_timer_block.address);
  640. } else
  641. return -ENODEV;
  642. }
  643. /* Never reached */
  644. return 0;
  645. }
  646. /**
  647. * acpi_idle_enter_simple - enters an ACPI state without BM handling
  648. * @dev: the target CPU
  649. * @drv: cpuidle driver with cpuidle state information
  650. * @index: the index of suggested state
  651. */
  652. static int acpi_idle_enter_simple(struct cpuidle_device *dev,
  653. struct cpuidle_driver *drv, int index)
  654. {
  655. struct acpi_processor *pr;
  656. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  657. pr = __this_cpu_read(processors);
  658. if (unlikely(!pr))
  659. return -EINVAL;
  660. if (cx->entry_method != ACPI_CSTATE_FFH) {
  661. current_thread_info()->status &= ~TS_POLLING;
  662. /*
  663. * TS_POLLING-cleared state must be visible before we test
  664. * NEED_RESCHED:
  665. */
  666. smp_mb();
  667. if (unlikely(need_resched())) {
  668. current_thread_info()->status |= TS_POLLING;
  669. return -EINVAL;
  670. }
  671. }
  672. /*
  673. * Must be done before busmaster disable as we might need to
  674. * access HPET !
  675. */
  676. lapic_timer_state_broadcast(pr, cx, 1);
  677. if (cx->type == ACPI_STATE_C3)
  678. ACPI_FLUSH_CPU_CACHE();
  679. /* Tell the scheduler that we are going deep-idle: */
  680. sched_clock_idle_sleep_event();
  681. acpi_idle_do_entry(cx);
  682. sched_clock_idle_wakeup_event(0);
  683. if (cx->entry_method != ACPI_CSTATE_FFH)
  684. current_thread_info()->status |= TS_POLLING;
  685. lapic_timer_state_broadcast(pr, cx, 0);
  686. return index;
  687. }
  688. static int c3_cpu_count;
  689. static DEFINE_RAW_SPINLOCK(c3_lock);
  690. /**
  691. * acpi_idle_enter_bm - enters C3 with proper BM handling
  692. * @dev: the target CPU
  693. * @drv: cpuidle driver containing state data
  694. * @index: the index of suggested state
  695. *
  696. * If BM is detected, the deepest non-C3 idle state is entered instead.
  697. */
  698. static int acpi_idle_enter_bm(struct cpuidle_device *dev,
  699. struct cpuidle_driver *drv, int index)
  700. {
  701. struct acpi_processor *pr;
  702. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  703. pr = __this_cpu_read(processors);
  704. if (unlikely(!pr))
  705. return -EINVAL;
  706. if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
  707. if (drv->safe_state_index >= 0) {
  708. return drv->states[drv->safe_state_index].enter(dev,
  709. drv, drv->safe_state_index);
  710. } else {
  711. acpi_safe_halt();
  712. return -EBUSY;
  713. }
  714. }
  715. if (cx->entry_method != ACPI_CSTATE_FFH) {
  716. current_thread_info()->status &= ~TS_POLLING;
  717. /*
  718. * TS_POLLING-cleared state must be visible before we test
  719. * NEED_RESCHED:
  720. */
  721. smp_mb();
  722. if (unlikely(need_resched())) {
  723. current_thread_info()->status |= TS_POLLING;
  724. return -EINVAL;
  725. }
  726. }
  727. acpi_unlazy_tlb(smp_processor_id());
  728. /* Tell the scheduler that we are going deep-idle: */
  729. sched_clock_idle_sleep_event();
  730. /*
  731. * Must be done before busmaster disable as we might need to
  732. * access HPET !
  733. */
  734. lapic_timer_state_broadcast(pr, cx, 1);
  735. /*
  736. * disable bus master
  737. * bm_check implies we need ARB_DIS
  738. * !bm_check implies we need cache flush
  739. * bm_control implies whether we can do ARB_DIS
  740. *
  741. * That leaves a case where bm_check is set and bm_control is
  742. * not set. In that case we cannot do much, we enter C3
  743. * without doing anything.
  744. */
  745. if (pr->flags.bm_check && pr->flags.bm_control) {
  746. raw_spin_lock(&c3_lock);
  747. c3_cpu_count++;
  748. /* Disable bus master arbitration when all CPUs are in C3 */
  749. if (c3_cpu_count == num_online_cpus())
  750. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
  751. raw_spin_unlock(&c3_lock);
  752. } else if (!pr->flags.bm_check) {
  753. ACPI_FLUSH_CPU_CACHE();
  754. }
  755. acpi_idle_do_entry(cx);
  756. /* Re-enable bus master arbitration */
  757. if (pr->flags.bm_check && pr->flags.bm_control) {
  758. raw_spin_lock(&c3_lock);
  759. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
  760. c3_cpu_count--;
  761. raw_spin_unlock(&c3_lock);
  762. }
  763. sched_clock_idle_wakeup_event(0);
  764. if (cx->entry_method != ACPI_CSTATE_FFH)
  765. current_thread_info()->status |= TS_POLLING;
  766. lapic_timer_state_broadcast(pr, cx, 0);
  767. return index;
  768. }
  769. struct cpuidle_driver acpi_idle_driver = {
  770. .name = "acpi_idle",
  771. .owner = THIS_MODULE,
  772. };
  773. /**
  774. * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
  775. * device i.e. per-cpu data
  776. *
  777. * @pr: the ACPI processor
  778. * @dev : the cpuidle device
  779. */
  780. static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
  781. struct cpuidle_device *dev)
  782. {
  783. int i, count = CPUIDLE_DRIVER_STATE_START;
  784. struct acpi_processor_cx *cx;
  785. if (!pr->flags.power_setup_done)
  786. return -EINVAL;
  787. if (pr->flags.power == 0) {
  788. return -EINVAL;
  789. }
  790. if (!dev)
  791. return -EINVAL;
  792. dev->cpu = pr->id;
  793. if (max_cstate == 0)
  794. max_cstate = 1;
  795. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  796. cx = &pr->power.states[i];
  797. if (!cx->valid)
  798. continue;
  799. #ifdef CONFIG_HOTPLUG_CPU
  800. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  801. !pr->flags.has_cst &&
  802. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  803. continue;
  804. #endif
  805. per_cpu(acpi_cstate[count], dev->cpu) = cx;
  806. count++;
  807. if (count == CPUIDLE_STATE_MAX)
  808. break;
  809. }
  810. dev->state_count = count;
  811. if (!count)
  812. return -EINVAL;
  813. return 0;
  814. }
  815. /**
  816. * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
  817. * global state data i.e. idle routines
  818. *
  819. * @pr: the ACPI processor
  820. */
  821. static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
  822. {
  823. int i, count = CPUIDLE_DRIVER_STATE_START;
  824. struct acpi_processor_cx *cx;
  825. struct cpuidle_state *state;
  826. struct cpuidle_driver *drv = &acpi_idle_driver;
  827. if (!pr->flags.power_setup_done)
  828. return -EINVAL;
  829. if (pr->flags.power == 0)
  830. return -EINVAL;
  831. drv->safe_state_index = -1;
  832. for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
  833. drv->states[i].name[0] = '\0';
  834. drv->states[i].desc[0] = '\0';
  835. }
  836. if (max_cstate == 0)
  837. max_cstate = 1;
  838. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  839. cx = &pr->power.states[i];
  840. if (!cx->valid)
  841. continue;
  842. #ifdef CONFIG_HOTPLUG_CPU
  843. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  844. !pr->flags.has_cst &&
  845. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  846. continue;
  847. #endif
  848. state = &drv->states[count];
  849. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  850. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  851. state->exit_latency = cx->latency;
  852. state->target_residency = cx->latency * latency_factor;
  853. state->flags = 0;
  854. switch (cx->type) {
  855. case ACPI_STATE_C1:
  856. if (cx->entry_method == ACPI_CSTATE_FFH)
  857. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  858. state->enter = acpi_idle_enter_c1;
  859. state->enter_dead = acpi_idle_play_dead;
  860. drv->safe_state_index = count;
  861. break;
  862. case ACPI_STATE_C2:
  863. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  864. state->enter = acpi_idle_enter_simple;
  865. state->enter_dead = acpi_idle_play_dead;
  866. drv->safe_state_index = count;
  867. break;
  868. case ACPI_STATE_C3:
  869. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  870. state->enter = pr->flags.bm_check ?
  871. acpi_idle_enter_bm :
  872. acpi_idle_enter_simple;
  873. break;
  874. }
  875. count++;
  876. if (count == CPUIDLE_STATE_MAX)
  877. break;
  878. }
  879. drv->state_count = count;
  880. if (!count)
  881. return -EINVAL;
  882. return 0;
  883. }
  884. int acpi_processor_hotplug(struct acpi_processor *pr)
  885. {
  886. int ret = 0;
  887. struct cpuidle_device *dev;
  888. if (disabled_by_idle_boot_param())
  889. return 0;
  890. if (!pr)
  891. return -EINVAL;
  892. if (nocst) {
  893. return -ENODEV;
  894. }
  895. if (!pr->flags.power_setup_done)
  896. return -ENODEV;
  897. dev = per_cpu(acpi_cpuidle_device, pr->id);
  898. cpuidle_pause_and_lock();
  899. cpuidle_disable_device(dev);
  900. acpi_processor_get_power_info(pr);
  901. if (pr->flags.power) {
  902. acpi_processor_setup_cpuidle_cx(pr, dev);
  903. ret = cpuidle_enable_device(dev);
  904. }
  905. cpuidle_resume_and_unlock();
  906. return ret;
  907. }
  908. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  909. {
  910. int cpu;
  911. struct acpi_processor *_pr;
  912. struct cpuidle_device *dev;
  913. if (disabled_by_idle_boot_param())
  914. return 0;
  915. if (!pr)
  916. return -EINVAL;
  917. if (nocst)
  918. return -ENODEV;
  919. if (!pr->flags.power_setup_done)
  920. return -ENODEV;
  921. /*
  922. * FIXME: Design the ACPI notification to make it once per
  923. * system instead of once per-cpu. This condition is a hack
  924. * to make the code that updates C-States be called once.
  925. */
  926. if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
  927. cpuidle_pause_and_lock();
  928. /* Protect against cpu-hotplug */
  929. get_online_cpus();
  930. /* Disable all cpuidle devices */
  931. for_each_online_cpu(cpu) {
  932. _pr = per_cpu(processors, cpu);
  933. if (!_pr || !_pr->flags.power_setup_done)
  934. continue;
  935. dev = per_cpu(acpi_cpuidle_device, cpu);
  936. cpuidle_disable_device(dev);
  937. }
  938. /* Populate Updated C-state information */
  939. acpi_processor_get_power_info(pr);
  940. acpi_processor_setup_cpuidle_states(pr);
  941. /* Enable all cpuidle devices */
  942. for_each_online_cpu(cpu) {
  943. _pr = per_cpu(processors, cpu);
  944. if (!_pr || !_pr->flags.power_setup_done)
  945. continue;
  946. acpi_processor_get_power_info(_pr);
  947. if (_pr->flags.power) {
  948. dev = per_cpu(acpi_cpuidle_device, cpu);
  949. acpi_processor_setup_cpuidle_cx(_pr, dev);
  950. cpuidle_enable_device(dev);
  951. }
  952. }
  953. put_online_cpus();
  954. cpuidle_resume_and_unlock();
  955. }
  956. return 0;
  957. }
  958. static int acpi_processor_registered;
  959. int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
  960. {
  961. acpi_status status = 0;
  962. int retval;
  963. struct cpuidle_device *dev;
  964. static int first_run;
  965. if (disabled_by_idle_boot_param())
  966. return 0;
  967. if (!first_run) {
  968. dmi_check_system(processor_power_dmi_table);
  969. max_cstate = acpi_processor_cstate_check(max_cstate);
  970. if (max_cstate < ACPI_C_STATES_MAX)
  971. printk(KERN_NOTICE
  972. "ACPI: processor limited to max C-state %d\n",
  973. max_cstate);
  974. first_run++;
  975. }
  976. if (!pr)
  977. return -EINVAL;
  978. if (acpi_gbl_FADT.cst_control && !nocst) {
  979. status =
  980. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  981. if (ACPI_FAILURE(status)) {
  982. ACPI_EXCEPTION((AE_INFO, status,
  983. "Notifying BIOS of _CST ability failed"));
  984. }
  985. }
  986. acpi_processor_get_power_info(pr);
  987. pr->flags.power_setup_done = 1;
  988. /*
  989. * Install the idle handler if processor power management is supported.
  990. * Note that we use previously set idle handler will be used on
  991. * platforms that only support C1.
  992. */
  993. if (pr->flags.power) {
  994. /* Register acpi_idle_driver if not already registered */
  995. if (!acpi_processor_registered) {
  996. acpi_processor_setup_cpuidle_states(pr);
  997. retval = cpuidle_register_driver(&acpi_idle_driver);
  998. if (retval)
  999. return retval;
  1000. printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
  1001. acpi_idle_driver.name);
  1002. }
  1003. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1004. if (!dev)
  1005. return -ENOMEM;
  1006. per_cpu(acpi_cpuidle_device, pr->id) = dev;
  1007. acpi_processor_setup_cpuidle_cx(pr, dev);
  1008. /* Register per-cpu cpuidle_device. Cpuidle driver
  1009. * must already be registered before registering device
  1010. */
  1011. retval = cpuidle_register_device(dev);
  1012. if (retval) {
  1013. if (acpi_processor_registered == 0)
  1014. cpuidle_unregister_driver(&acpi_idle_driver);
  1015. return retval;
  1016. }
  1017. acpi_processor_registered++;
  1018. }
  1019. return 0;
  1020. }
  1021. int acpi_processor_power_exit(struct acpi_processor *pr)
  1022. {
  1023. struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
  1024. if (disabled_by_idle_boot_param())
  1025. return 0;
  1026. if (pr->flags.power) {
  1027. cpuidle_unregister_device(dev);
  1028. acpi_processor_registered--;
  1029. if (acpi_processor_registered == 0)
  1030. cpuidle_unregister_driver(&acpi_idle_driver);
  1031. }
  1032. pr->flags.power_setup_done = 0;
  1033. return 0;
  1034. }