processor_idle.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/module.h>
  31. #include <linux/acpi.h>
  32. #include <linux/dmi.h>
  33. #include <linux/sched.h> /* need_resched() */
  34. #include <linux/clockchips.h>
  35. #include <linux/cpuidle.h>
  36. #include <linux/syscore_ops.h>
  37. /*
  38. * Include the apic definitions for x86 to have the APIC timer related defines
  39. * available also for UP (on SMP it gets magically included via linux/smp.h).
  40. * asm/acpi.h is not an option, as it would require more include magic. Also
  41. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  42. */
  43. #ifdef CONFIG_X86
  44. #include <asm/apic.h>
  45. #endif
  46. #include <acpi/acpi_bus.h>
  47. #include <acpi/processor.h>
  48. #define PREFIX "ACPI: "
  49. #define ACPI_PROCESSOR_CLASS "processor"
  50. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  51. ACPI_MODULE_NAME("processor_idle");
  52. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  53. module_param(max_cstate, uint, 0000);
  54. static unsigned int nocst __read_mostly;
  55. module_param(nocst, uint, 0000);
  56. static int bm_check_disable __read_mostly;
  57. module_param(bm_check_disable, uint, 0000);
  58. static unsigned int latency_factor __read_mostly = 2;
  59. module_param(latency_factor, uint, 0644);
  60. static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  61. static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
  62. acpi_cstate);
  63. static int disabled_by_idle_boot_param(void)
  64. {
  65. return boot_option_idle_override == IDLE_POLL ||
  66. boot_option_idle_override == IDLE_HALT;
  67. }
  68. /*
  69. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  70. * For now disable this. Probably a bug somewhere else.
  71. *
  72. * To skip this limit, boot/load with a large max_cstate limit.
  73. */
  74. static int set_max_cstate(const struct dmi_system_id *id)
  75. {
  76. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  77. return 0;
  78. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  79. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  80. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  81. max_cstate = (long)id->driver_data;
  82. return 0;
  83. }
  84. static struct dmi_system_id processor_power_dmi_table[] = {
  85. { set_max_cstate, "Clevo 5600D", {
  86. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  87. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  88. (void *)2},
  89. { set_max_cstate, "Pavilion zv5000", {
  90. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  91. DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  92. (void *)1},
  93. { set_max_cstate, "Asus L8400B", {
  94. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  95. DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
  96. (void *)1},
  97. {},
  98. };
  99. /*
  100. * Callers should disable interrupts before the call and enable
  101. * interrupts after return.
  102. */
  103. static void acpi_safe_halt(void)
  104. {
  105. if (!tif_need_resched()) {
  106. safe_halt();
  107. local_irq_disable();
  108. }
  109. }
  110. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  111. /*
  112. * Some BIOS implementations switch to C3 in the published C2 state.
  113. * This seems to be a common problem on AMD boxen, but other vendors
  114. * are affected too. We pick the most conservative approach: we assume
  115. * that the local APIC stops in both C2 and C3.
  116. */
  117. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  118. struct acpi_processor_cx *cx)
  119. {
  120. struct acpi_processor_power *pwr = &pr->power;
  121. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  122. if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
  123. return;
  124. if (amd_e400_c1e_detected)
  125. type = ACPI_STATE_C1;
  126. /*
  127. * Check, if one of the previous states already marked the lapic
  128. * unstable
  129. */
  130. if (pwr->timer_broadcast_on_state < state)
  131. return;
  132. if (cx->type >= type)
  133. pr->power.timer_broadcast_on_state = state;
  134. }
  135. static void __lapic_timer_propagate_broadcast(void *arg)
  136. {
  137. struct acpi_processor *pr = (struct acpi_processor *) arg;
  138. unsigned long reason;
  139. reason = pr->power.timer_broadcast_on_state < INT_MAX ?
  140. CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  141. clockevents_notify(reason, &pr->id);
  142. }
  143. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
  144. {
  145. smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
  146. (void *)pr, 1);
  147. }
  148. /* Power(C) State timer broadcast control */
  149. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  150. struct acpi_processor_cx *cx,
  151. int broadcast)
  152. {
  153. int state = cx - pr->power.states;
  154. if (state >= pr->power.timer_broadcast_on_state) {
  155. unsigned long reason;
  156. reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
  157. CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
  158. clockevents_notify(reason, &pr->id);
  159. }
  160. }
  161. #else
  162. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  163. struct acpi_processor_cx *cstate) { }
  164. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
  165. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  166. struct acpi_processor_cx *cx,
  167. int broadcast)
  168. {
  169. }
  170. #endif
  171. #ifdef CONFIG_PM_SLEEP
  172. static u32 saved_bm_rld;
  173. static int acpi_processor_suspend(void)
  174. {
  175. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
  176. return 0;
  177. }
  178. static void acpi_processor_resume(void)
  179. {
  180. u32 resumed_bm_rld;
  181. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
  182. if (resumed_bm_rld == saved_bm_rld)
  183. return;
  184. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
  185. }
  186. static struct syscore_ops acpi_processor_syscore_ops = {
  187. .suspend = acpi_processor_suspend,
  188. .resume = acpi_processor_resume,
  189. };
  190. void acpi_processor_syscore_init(void)
  191. {
  192. register_syscore_ops(&acpi_processor_syscore_ops);
  193. }
  194. void acpi_processor_syscore_exit(void)
  195. {
  196. unregister_syscore_ops(&acpi_processor_syscore_ops);
  197. }
  198. #endif /* CONFIG_PM_SLEEP */
  199. #if defined(CONFIG_X86)
  200. static void tsc_check_state(int state)
  201. {
  202. switch (boot_cpu_data.x86_vendor) {
  203. case X86_VENDOR_AMD:
  204. case X86_VENDOR_INTEL:
  205. /*
  206. * AMD Fam10h TSC will tick in all
  207. * C/P/S0/S1 states when this bit is set.
  208. */
  209. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  210. return;
  211. /*FALL THROUGH*/
  212. default:
  213. /* TSC could halt in idle, so notify users */
  214. if (state > ACPI_STATE_C1)
  215. mark_tsc_unstable("TSC halts in idle");
  216. }
  217. }
  218. #else
  219. static void tsc_check_state(int state) { return; }
  220. #endif
  221. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  222. {
  223. if (!pr->pblk)
  224. return -ENODEV;
  225. /* if info is obtained from pblk/fadt, type equals state */
  226. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  227. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  228. #ifndef CONFIG_HOTPLUG_CPU
  229. /*
  230. * Check for P_LVL2_UP flag before entering C2 and above on
  231. * an SMP system.
  232. */
  233. if ((num_online_cpus() > 1) &&
  234. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  235. return -ENODEV;
  236. #endif
  237. /* determine C2 and C3 address from pblk */
  238. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  239. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  240. /* determine latencies from FADT */
  241. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
  242. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
  243. /*
  244. * FADT specified C2 latency must be less than or equal to
  245. * 100 microseconds.
  246. */
  247. if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  248. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  249. "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
  250. /* invalidate C2 */
  251. pr->power.states[ACPI_STATE_C2].address = 0;
  252. }
  253. /*
  254. * FADT supplied C3 latency must be less than or equal to
  255. * 1000 microseconds.
  256. */
  257. if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  258. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  259. "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
  260. /* invalidate C3 */
  261. pr->power.states[ACPI_STATE_C3].address = 0;
  262. }
  263. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  264. "lvl2[0x%08x] lvl3[0x%08x]\n",
  265. pr->power.states[ACPI_STATE_C2].address,
  266. pr->power.states[ACPI_STATE_C3].address));
  267. return 0;
  268. }
  269. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  270. {
  271. if (!pr->power.states[ACPI_STATE_C1].valid) {
  272. /* set the first C-State to C1 */
  273. /* all processors need to support C1 */
  274. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  275. pr->power.states[ACPI_STATE_C1].valid = 1;
  276. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  277. }
  278. /* the C0 state only exists as a filler in our array */
  279. pr->power.states[ACPI_STATE_C0].valid = 1;
  280. return 0;
  281. }
  282. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  283. {
  284. acpi_status status = 0;
  285. u64 count;
  286. int current_count;
  287. int i;
  288. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  289. union acpi_object *cst;
  290. if (nocst)
  291. return -ENODEV;
  292. current_count = 0;
  293. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  294. if (ACPI_FAILURE(status)) {
  295. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  296. return -ENODEV;
  297. }
  298. cst = buffer.pointer;
  299. /* There must be at least 2 elements */
  300. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  301. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  302. status = -EFAULT;
  303. goto end;
  304. }
  305. count = cst->package.elements[0].integer.value;
  306. /* Validate number of power states. */
  307. if (count < 1 || count != cst->package.count - 1) {
  308. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  309. status = -EFAULT;
  310. goto end;
  311. }
  312. /* Tell driver that at least _CST is supported. */
  313. pr->flags.has_cst = 1;
  314. for (i = 1; i <= count; i++) {
  315. union acpi_object *element;
  316. union acpi_object *obj;
  317. struct acpi_power_register *reg;
  318. struct acpi_processor_cx cx;
  319. memset(&cx, 0, sizeof(cx));
  320. element = &(cst->package.elements[i]);
  321. if (element->type != ACPI_TYPE_PACKAGE)
  322. continue;
  323. if (element->package.count != 4)
  324. continue;
  325. obj = &(element->package.elements[0]);
  326. if (obj->type != ACPI_TYPE_BUFFER)
  327. continue;
  328. reg = (struct acpi_power_register *)obj->buffer.pointer;
  329. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  330. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  331. continue;
  332. /* There should be an easy way to extract an integer... */
  333. obj = &(element->package.elements[1]);
  334. if (obj->type != ACPI_TYPE_INTEGER)
  335. continue;
  336. cx.type = obj->integer.value;
  337. /*
  338. * Some buggy BIOSes won't list C1 in _CST -
  339. * Let acpi_processor_get_power_info_default() handle them later
  340. */
  341. if (i == 1 && cx.type != ACPI_STATE_C1)
  342. current_count++;
  343. cx.address = reg->address;
  344. cx.index = current_count + 1;
  345. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  346. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  347. if (acpi_processor_ffh_cstate_probe
  348. (pr->id, &cx, reg) == 0) {
  349. cx.entry_method = ACPI_CSTATE_FFH;
  350. } else if (cx.type == ACPI_STATE_C1) {
  351. /*
  352. * C1 is a special case where FIXED_HARDWARE
  353. * can be handled in non-MWAIT way as well.
  354. * In that case, save this _CST entry info.
  355. * Otherwise, ignore this info and continue.
  356. */
  357. cx.entry_method = ACPI_CSTATE_HALT;
  358. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  359. } else {
  360. continue;
  361. }
  362. if (cx.type == ACPI_STATE_C1 &&
  363. (boot_option_idle_override == IDLE_NOMWAIT)) {
  364. /*
  365. * In most cases the C1 space_id obtained from
  366. * _CST object is FIXED_HARDWARE access mode.
  367. * But when the option of idle=halt is added,
  368. * the entry_method type should be changed from
  369. * CSTATE_FFH to CSTATE_HALT.
  370. * When the option of idle=nomwait is added,
  371. * the C1 entry_method type should be
  372. * CSTATE_HALT.
  373. */
  374. cx.entry_method = ACPI_CSTATE_HALT;
  375. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  376. }
  377. } else {
  378. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  379. cx.address);
  380. }
  381. if (cx.type == ACPI_STATE_C1) {
  382. cx.valid = 1;
  383. }
  384. obj = &(element->package.elements[2]);
  385. if (obj->type != ACPI_TYPE_INTEGER)
  386. continue;
  387. cx.latency = obj->integer.value;
  388. obj = &(element->package.elements[3]);
  389. if (obj->type != ACPI_TYPE_INTEGER)
  390. continue;
  391. current_count++;
  392. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  393. /*
  394. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  395. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  396. */
  397. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  398. printk(KERN_WARNING
  399. "Limiting number of power states to max (%d)\n",
  400. ACPI_PROCESSOR_MAX_POWER);
  401. printk(KERN_WARNING
  402. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  403. break;
  404. }
  405. }
  406. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  407. current_count));
  408. /* Validate number of power states discovered */
  409. if (current_count < 2)
  410. status = -EFAULT;
  411. end:
  412. kfree(buffer.pointer);
  413. return status;
  414. }
  415. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  416. struct acpi_processor_cx *cx)
  417. {
  418. static int bm_check_flag = -1;
  419. static int bm_control_flag = -1;
  420. if (!cx->address)
  421. return;
  422. /*
  423. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  424. * DMA transfers are used by any ISA device to avoid livelock.
  425. * Note that we could disable Type-F DMA (as recommended by
  426. * the erratum), but this is known to disrupt certain ISA
  427. * devices thus we take the conservative approach.
  428. */
  429. else if (errata.piix4.fdma) {
  430. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  431. "C3 not supported on PIIX4 with Type-F DMA\n"));
  432. return;
  433. }
  434. /* All the logic here assumes flags.bm_check is same across all CPUs */
  435. if (bm_check_flag == -1) {
  436. /* Determine whether bm_check is needed based on CPU */
  437. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  438. bm_check_flag = pr->flags.bm_check;
  439. bm_control_flag = pr->flags.bm_control;
  440. } else {
  441. pr->flags.bm_check = bm_check_flag;
  442. pr->flags.bm_control = bm_control_flag;
  443. }
  444. if (pr->flags.bm_check) {
  445. if (!pr->flags.bm_control) {
  446. if (pr->flags.has_cst != 1) {
  447. /* bus mastering control is necessary */
  448. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  449. "C3 support requires BM control\n"));
  450. return;
  451. } else {
  452. /* Here we enter C3 without bus mastering */
  453. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  454. "C3 support without BM control\n"));
  455. }
  456. }
  457. } else {
  458. /*
  459. * WBINVD should be set in fadt, for C3 state to be
  460. * supported on when bm_check is not required.
  461. */
  462. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  463. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  464. "Cache invalidation should work properly"
  465. " for C3 to be enabled on SMP systems\n"));
  466. return;
  467. }
  468. }
  469. /*
  470. * Otherwise we've met all of our C3 requirements.
  471. * Normalize the C3 latency to expidite policy. Enable
  472. * checking of bus mastering status (bm_check) so we can
  473. * use this in our C3 policy
  474. */
  475. cx->valid = 1;
  476. /*
  477. * On older chipsets, BM_RLD needs to be set
  478. * in order for Bus Master activity to wake the
  479. * system from C3. Newer chipsets handle DMA
  480. * during C3 automatically and BM_RLD is a NOP.
  481. * In either case, the proper way to
  482. * handle BM_RLD is to set it and leave it set.
  483. */
  484. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  485. return;
  486. }
  487. static int acpi_processor_power_verify(struct acpi_processor *pr)
  488. {
  489. unsigned int i;
  490. unsigned int working = 0;
  491. pr->power.timer_broadcast_on_state = INT_MAX;
  492. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  493. struct acpi_processor_cx *cx = &pr->power.states[i];
  494. switch (cx->type) {
  495. case ACPI_STATE_C1:
  496. cx->valid = 1;
  497. break;
  498. case ACPI_STATE_C2:
  499. if (!cx->address)
  500. break;
  501. cx->valid = 1;
  502. break;
  503. case ACPI_STATE_C3:
  504. acpi_processor_power_verify_c3(pr, cx);
  505. break;
  506. }
  507. if (!cx->valid)
  508. continue;
  509. lapic_timer_check_state(i, pr, cx);
  510. tsc_check_state(cx->type);
  511. working++;
  512. }
  513. lapic_timer_propagate_broadcast(pr);
  514. return (working);
  515. }
  516. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  517. {
  518. unsigned int i;
  519. int result;
  520. /* NOTE: the idle thread may not be running while calling
  521. * this function */
  522. /* Zero initialize all the C-states info. */
  523. memset(pr->power.states, 0, sizeof(pr->power.states));
  524. result = acpi_processor_get_power_info_cst(pr);
  525. if (result == -ENODEV)
  526. result = acpi_processor_get_power_info_fadt(pr);
  527. if (result)
  528. return result;
  529. acpi_processor_get_power_info_default(pr);
  530. pr->power.count = acpi_processor_power_verify(pr);
  531. /*
  532. * if one state of type C2 or C3 is available, mark this
  533. * CPU as being "idle manageable"
  534. */
  535. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  536. if (pr->power.states[i].valid) {
  537. pr->power.count = i;
  538. if (pr->power.states[i].type >= ACPI_STATE_C2)
  539. pr->flags.power = 1;
  540. }
  541. }
  542. return 0;
  543. }
  544. /**
  545. * acpi_idle_bm_check - checks if bus master activity was detected
  546. */
  547. static int acpi_idle_bm_check(void)
  548. {
  549. u32 bm_status = 0;
  550. if (bm_check_disable)
  551. return 0;
  552. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  553. if (bm_status)
  554. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  555. /*
  556. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  557. * the true state of bus mastering activity; forcing us to
  558. * manually check the BMIDEA bit of each IDE channel.
  559. */
  560. else if (errata.piix4.bmisx) {
  561. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  562. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  563. bm_status = 1;
  564. }
  565. return bm_status;
  566. }
  567. /**
  568. * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  569. * @cx: cstate data
  570. *
  571. * Caller disables interrupt before call and enables interrupt after return.
  572. */
  573. static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  574. {
  575. /* Don't trace irqs off for idle */
  576. stop_critical_timings();
  577. if (cx->entry_method == ACPI_CSTATE_FFH) {
  578. /* Call into architectural FFH based C-state */
  579. acpi_processor_ffh_cstate_enter(cx);
  580. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  581. acpi_safe_halt();
  582. } else {
  583. /* IO port based C-state */
  584. inb(cx->address);
  585. /* Dummy wait op - must do something useless after P_LVL2 read
  586. because chipsets cannot guarantee that STPCLK# signal
  587. gets asserted in time to freeze execution properly. */
  588. inl(acpi_gbl_FADT.xpm_timer_block.address);
  589. }
  590. start_critical_timings();
  591. }
  592. /**
  593. * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  594. * @dev: the target CPU
  595. * @drv: cpuidle driver containing cpuidle state info
  596. * @index: index of target state
  597. *
  598. * This is equivalent to the HALT instruction.
  599. */
  600. static int acpi_idle_enter_c1(struct cpuidle_device *dev,
  601. struct cpuidle_driver *drv, int index)
  602. {
  603. struct acpi_processor *pr;
  604. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  605. pr = __this_cpu_read(processors);
  606. if (unlikely(!pr))
  607. return -EINVAL;
  608. if (cx->entry_method == ACPI_CSTATE_FFH) {
  609. if (current_set_polling_and_test())
  610. return -EINVAL;
  611. }
  612. lapic_timer_state_broadcast(pr, cx, 1);
  613. acpi_idle_do_entry(cx);
  614. lapic_timer_state_broadcast(pr, cx, 0);
  615. return index;
  616. }
  617. /**
  618. * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  619. * @dev: the target CPU
  620. * @index: the index of suggested state
  621. */
  622. static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
  623. {
  624. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  625. ACPI_FLUSH_CPU_CACHE();
  626. while (1) {
  627. if (cx->entry_method == ACPI_CSTATE_HALT)
  628. safe_halt();
  629. else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
  630. inb(cx->address);
  631. /* See comment in acpi_idle_do_entry() */
  632. inl(acpi_gbl_FADT.xpm_timer_block.address);
  633. } else
  634. return -ENODEV;
  635. }
  636. /* Never reached */
  637. return 0;
  638. }
  639. /**
  640. * acpi_idle_enter_simple - enters an ACPI state without BM handling
  641. * @dev: the target CPU
  642. * @drv: cpuidle driver with cpuidle state information
  643. * @index: the index of suggested state
  644. */
  645. static int acpi_idle_enter_simple(struct cpuidle_device *dev,
  646. struct cpuidle_driver *drv, int index)
  647. {
  648. struct acpi_processor *pr;
  649. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  650. pr = __this_cpu_read(processors);
  651. if (unlikely(!pr))
  652. return -EINVAL;
  653. if (cx->entry_method == ACPI_CSTATE_FFH) {
  654. if (current_set_polling_and_test())
  655. return -EINVAL;
  656. }
  657. /*
  658. * Must be done before busmaster disable as we might need to
  659. * access HPET !
  660. */
  661. lapic_timer_state_broadcast(pr, cx, 1);
  662. if (cx->type == ACPI_STATE_C3)
  663. ACPI_FLUSH_CPU_CACHE();
  664. /* Tell the scheduler that we are going deep-idle: */
  665. sched_clock_idle_sleep_event();
  666. acpi_idle_do_entry(cx);
  667. sched_clock_idle_wakeup_event(0);
  668. lapic_timer_state_broadcast(pr, cx, 0);
  669. return index;
  670. }
  671. static int c3_cpu_count;
  672. static DEFINE_RAW_SPINLOCK(c3_lock);
  673. /**
  674. * acpi_idle_enter_bm - enters C3 with proper BM handling
  675. * @dev: the target CPU
  676. * @drv: cpuidle driver containing state data
  677. * @index: the index of suggested state
  678. *
  679. * If BM is detected, the deepest non-C3 idle state is entered instead.
  680. */
  681. static int acpi_idle_enter_bm(struct cpuidle_device *dev,
  682. struct cpuidle_driver *drv, int index)
  683. {
  684. struct acpi_processor *pr;
  685. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  686. pr = __this_cpu_read(processors);
  687. if (unlikely(!pr))
  688. return -EINVAL;
  689. if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
  690. if (drv->safe_state_index >= 0) {
  691. return drv->states[drv->safe_state_index].enter(dev,
  692. drv, drv->safe_state_index);
  693. } else {
  694. acpi_safe_halt();
  695. return -EBUSY;
  696. }
  697. }
  698. if (cx->entry_method == ACPI_CSTATE_FFH) {
  699. if (current_set_polling_and_test())
  700. return -EINVAL;
  701. }
  702. acpi_unlazy_tlb(smp_processor_id());
  703. /* Tell the scheduler that we are going deep-idle: */
  704. sched_clock_idle_sleep_event();
  705. /*
  706. * Must be done before busmaster disable as we might need to
  707. * access HPET !
  708. */
  709. lapic_timer_state_broadcast(pr, cx, 1);
  710. /*
  711. * disable bus master
  712. * bm_check implies we need ARB_DIS
  713. * !bm_check implies we need cache flush
  714. * bm_control implies whether we can do ARB_DIS
  715. *
  716. * That leaves a case where bm_check is set and bm_control is
  717. * not set. In that case we cannot do much, we enter C3
  718. * without doing anything.
  719. */
  720. if (pr->flags.bm_check && pr->flags.bm_control) {
  721. raw_spin_lock(&c3_lock);
  722. c3_cpu_count++;
  723. /* Disable bus master arbitration when all CPUs are in C3 */
  724. if (c3_cpu_count == num_online_cpus())
  725. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
  726. raw_spin_unlock(&c3_lock);
  727. } else if (!pr->flags.bm_check) {
  728. ACPI_FLUSH_CPU_CACHE();
  729. }
  730. acpi_idle_do_entry(cx);
  731. /* Re-enable bus master arbitration */
  732. if (pr->flags.bm_check && pr->flags.bm_control) {
  733. raw_spin_lock(&c3_lock);
  734. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
  735. c3_cpu_count--;
  736. raw_spin_unlock(&c3_lock);
  737. }
  738. sched_clock_idle_wakeup_event(0);
  739. lapic_timer_state_broadcast(pr, cx, 0);
  740. return index;
  741. }
  742. struct cpuidle_driver acpi_idle_driver = {
  743. .name = "acpi_idle",
  744. .owner = THIS_MODULE,
  745. };
  746. /**
  747. * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
  748. * device i.e. per-cpu data
  749. *
  750. * @pr: the ACPI processor
  751. * @dev : the cpuidle device
  752. */
  753. static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
  754. struct cpuidle_device *dev)
  755. {
  756. int i, count = CPUIDLE_DRIVER_STATE_START;
  757. struct acpi_processor_cx *cx;
  758. if (!pr->flags.power_setup_done)
  759. return -EINVAL;
  760. if (pr->flags.power == 0) {
  761. return -EINVAL;
  762. }
  763. if (!dev)
  764. return -EINVAL;
  765. dev->cpu = pr->id;
  766. if (max_cstate == 0)
  767. max_cstate = 1;
  768. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  769. cx = &pr->power.states[i];
  770. if (!cx->valid)
  771. continue;
  772. #ifdef CONFIG_HOTPLUG_CPU
  773. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  774. !pr->flags.has_cst &&
  775. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  776. continue;
  777. #endif
  778. per_cpu(acpi_cstate[count], dev->cpu) = cx;
  779. count++;
  780. if (count == CPUIDLE_STATE_MAX)
  781. break;
  782. }
  783. dev->state_count = count;
  784. if (!count)
  785. return -EINVAL;
  786. return 0;
  787. }
  788. /**
  789. * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
  790. * global state data i.e. idle routines
  791. *
  792. * @pr: the ACPI processor
  793. */
  794. static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
  795. {
  796. int i, count = CPUIDLE_DRIVER_STATE_START;
  797. struct acpi_processor_cx *cx;
  798. struct cpuidle_state *state;
  799. struct cpuidle_driver *drv = &acpi_idle_driver;
  800. if (!pr->flags.power_setup_done)
  801. return -EINVAL;
  802. if (pr->flags.power == 0)
  803. return -EINVAL;
  804. drv->safe_state_index = -1;
  805. for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
  806. drv->states[i].name[0] = '\0';
  807. drv->states[i].desc[0] = '\0';
  808. }
  809. if (max_cstate == 0)
  810. max_cstate = 1;
  811. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  812. cx = &pr->power.states[i];
  813. if (!cx->valid)
  814. continue;
  815. #ifdef CONFIG_HOTPLUG_CPU
  816. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  817. !pr->flags.has_cst &&
  818. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  819. continue;
  820. #endif
  821. state = &drv->states[count];
  822. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  823. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  824. state->exit_latency = cx->latency;
  825. state->target_residency = cx->latency * latency_factor;
  826. state->flags = 0;
  827. switch (cx->type) {
  828. case ACPI_STATE_C1:
  829. if (cx->entry_method == ACPI_CSTATE_FFH)
  830. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  831. state->enter = acpi_idle_enter_c1;
  832. state->enter_dead = acpi_idle_play_dead;
  833. drv->safe_state_index = count;
  834. break;
  835. case ACPI_STATE_C2:
  836. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  837. state->enter = acpi_idle_enter_simple;
  838. state->enter_dead = acpi_idle_play_dead;
  839. drv->safe_state_index = count;
  840. break;
  841. case ACPI_STATE_C3:
  842. state->flags |= CPUIDLE_FLAG_TIME_VALID;
  843. state->enter = pr->flags.bm_check ?
  844. acpi_idle_enter_bm :
  845. acpi_idle_enter_simple;
  846. break;
  847. }
  848. count++;
  849. if (count == CPUIDLE_STATE_MAX)
  850. break;
  851. }
  852. drv->state_count = count;
  853. if (!count)
  854. return -EINVAL;
  855. return 0;
  856. }
  857. int acpi_processor_hotplug(struct acpi_processor *pr)
  858. {
  859. int ret = 0;
  860. struct cpuidle_device *dev;
  861. if (disabled_by_idle_boot_param())
  862. return 0;
  863. if (nocst)
  864. return -ENODEV;
  865. if (!pr->flags.power_setup_done)
  866. return -ENODEV;
  867. dev = per_cpu(acpi_cpuidle_device, pr->id);
  868. cpuidle_pause_and_lock();
  869. cpuidle_disable_device(dev);
  870. acpi_processor_get_power_info(pr);
  871. if (pr->flags.power) {
  872. acpi_processor_setup_cpuidle_cx(pr, dev);
  873. ret = cpuidle_enable_device(dev);
  874. }
  875. cpuidle_resume_and_unlock();
  876. return ret;
  877. }
  878. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  879. {
  880. int cpu;
  881. struct acpi_processor *_pr;
  882. struct cpuidle_device *dev;
  883. if (disabled_by_idle_boot_param())
  884. return 0;
  885. if (nocst)
  886. return -ENODEV;
  887. if (!pr->flags.power_setup_done)
  888. return -ENODEV;
  889. /*
  890. * FIXME: Design the ACPI notification to make it once per
  891. * system instead of once per-cpu. This condition is a hack
  892. * to make the code that updates C-States be called once.
  893. */
  894. if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
  895. cpuidle_pause_and_lock();
  896. /* Protect against cpu-hotplug */
  897. get_online_cpus();
  898. /* Disable all cpuidle devices */
  899. for_each_online_cpu(cpu) {
  900. _pr = per_cpu(processors, cpu);
  901. if (!_pr || !_pr->flags.power_setup_done)
  902. continue;
  903. dev = per_cpu(acpi_cpuidle_device, cpu);
  904. cpuidle_disable_device(dev);
  905. }
  906. /* Populate Updated C-state information */
  907. acpi_processor_get_power_info(pr);
  908. acpi_processor_setup_cpuidle_states(pr);
  909. /* Enable all cpuidle devices */
  910. for_each_online_cpu(cpu) {
  911. _pr = per_cpu(processors, cpu);
  912. if (!_pr || !_pr->flags.power_setup_done)
  913. continue;
  914. acpi_processor_get_power_info(_pr);
  915. if (_pr->flags.power) {
  916. dev = per_cpu(acpi_cpuidle_device, cpu);
  917. acpi_processor_setup_cpuidle_cx(_pr, dev);
  918. cpuidle_enable_device(dev);
  919. }
  920. }
  921. put_online_cpus();
  922. cpuidle_resume_and_unlock();
  923. }
  924. return 0;
  925. }
  926. static int acpi_processor_registered;
  927. int acpi_processor_power_init(struct acpi_processor *pr)
  928. {
  929. acpi_status status = 0;
  930. int retval;
  931. struct cpuidle_device *dev;
  932. static int first_run;
  933. if (disabled_by_idle_boot_param())
  934. return 0;
  935. if (!first_run) {
  936. dmi_check_system(processor_power_dmi_table);
  937. max_cstate = acpi_processor_cstate_check(max_cstate);
  938. if (max_cstate < ACPI_C_STATES_MAX)
  939. printk(KERN_NOTICE
  940. "ACPI: processor limited to max C-state %d\n",
  941. max_cstate);
  942. first_run++;
  943. }
  944. if (acpi_gbl_FADT.cst_control && !nocst) {
  945. status =
  946. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  947. if (ACPI_FAILURE(status)) {
  948. ACPI_EXCEPTION((AE_INFO, status,
  949. "Notifying BIOS of _CST ability failed"));
  950. }
  951. }
  952. acpi_processor_get_power_info(pr);
  953. pr->flags.power_setup_done = 1;
  954. /*
  955. * Install the idle handler if processor power management is supported.
  956. * Note that we use previously set idle handler will be used on
  957. * platforms that only support C1.
  958. */
  959. if (pr->flags.power) {
  960. /* Register acpi_idle_driver if not already registered */
  961. if (!acpi_processor_registered) {
  962. acpi_processor_setup_cpuidle_states(pr);
  963. retval = cpuidle_register_driver(&acpi_idle_driver);
  964. if (retval)
  965. return retval;
  966. printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
  967. acpi_idle_driver.name);
  968. }
  969. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  970. if (!dev)
  971. return -ENOMEM;
  972. per_cpu(acpi_cpuidle_device, pr->id) = dev;
  973. acpi_processor_setup_cpuidle_cx(pr, dev);
  974. /* Register per-cpu cpuidle_device. Cpuidle driver
  975. * must already be registered before registering device
  976. */
  977. retval = cpuidle_register_device(dev);
  978. if (retval) {
  979. if (acpi_processor_registered == 0)
  980. cpuidle_unregister_driver(&acpi_idle_driver);
  981. return retval;
  982. }
  983. acpi_processor_registered++;
  984. }
  985. return 0;
  986. }
  987. int acpi_processor_power_exit(struct acpi_processor *pr)
  988. {
  989. struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
  990. if (disabled_by_idle_boot_param())
  991. return 0;
  992. if (pr->flags.power) {
  993. cpuidle_unregister_device(dev);
  994. acpi_processor_registered--;
  995. if (acpi_processor_registered == 0)
  996. cpuidle_unregister_driver(&acpi_idle_driver);
  997. }
  998. pr->flags.power_setup_done = 0;
  999. return 0;
  1000. }