processor_idle.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <asm/io.h>
  40. #include <asm/uaccess.h>
  41. #include <acpi/acpi_bus.h>
  42. #include <acpi/processor.h>
  43. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  44. #define ACPI_PROCESSOR_CLASS "processor"
  45. #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
  46. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  47. ACPI_MODULE_NAME("acpi_processor")
  48. #define ACPI_PROCESSOR_FILE_POWER "power"
  49. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  50. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  51. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  52. static void (*pm_idle_save) (void);
  53. module_param(max_cstate, uint, 0644);
  54. static unsigned int nocst = 0;
  55. module_param(nocst, uint, 0000);
  56. /*
  57. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  58. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  59. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  60. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  61. * reduce history for more aggressive entry into C3
  62. */
  63. static unsigned int bm_history =
  64. (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  65. module_param(bm_history, uint, 0644);
  66. /* --------------------------------------------------------------------------
  67. Power Management
  68. -------------------------------------------------------------------------- */
  69. /*
  70. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  71. * For now disable this. Probably a bug somewhere else.
  72. *
  73. * To skip this limit, boot/load with a large max_cstate limit.
  74. */
  75. static int set_max_cstate(struct dmi_system_id *id)
  76. {
  77. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  78. return 0;
  79. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  80. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  81. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  82. max_cstate = (long)id->driver_data;
  83. return 0;
  84. }
  85. static struct dmi_system_id __initdata processor_power_dmi_table[] = {
  86. {set_max_cstate, "IBM ThinkPad R40e", {
  87. DMI_MATCH(DMI_BIOS_VENDOR,
  88. "IBM"),
  89. DMI_MATCH(DMI_BIOS_VERSION,
  90. "1SET60WW")},
  91. (void *)1},
  92. {set_max_cstate, "Medion 41700", {
  93. DMI_MATCH(DMI_BIOS_VENDOR,
  94. "Phoenix Technologies LTD"),
  95. DMI_MATCH(DMI_BIOS_VERSION,
  96. "R01-A1J")}, (void *)1},
  97. {set_max_cstate, "Clevo 5600D", {
  98. DMI_MATCH(DMI_BIOS_VENDOR,
  99. "Phoenix Technologies LTD"),
  100. DMI_MATCH(DMI_BIOS_VERSION,
  101. "SHE845M0.86C.0013.D.0302131307")},
  102. (void *)2},
  103. {},
  104. };
  105. static inline u32 ticks_elapsed(u32 t1, u32 t2)
  106. {
  107. if (t2 >= t1)
  108. return (t2 - t1);
  109. else if (!acpi_fadt.tmr_val_ext)
  110. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  111. else
  112. return ((0xFFFFFFFF - t1) + t2);
  113. }
  114. static void
  115. acpi_processor_power_activate(struct acpi_processor *pr,
  116. struct acpi_processor_cx *new)
  117. {
  118. struct acpi_processor_cx *old;
  119. if (!pr || !new)
  120. return;
  121. old = pr->power.state;
  122. if (old)
  123. old->promotion.count = 0;
  124. new->demotion.count = 0;
  125. /* Cleanup from old state. */
  126. if (old) {
  127. switch (old->type) {
  128. case ACPI_STATE_C3:
  129. /* Disable bus master reload */
  130. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  131. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
  132. ACPI_MTX_DO_NOT_LOCK);
  133. break;
  134. }
  135. }
  136. /* Prepare to use new state. */
  137. switch (new->type) {
  138. case ACPI_STATE_C3:
  139. /* Enable bus master reload */
  140. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  141. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1,
  142. ACPI_MTX_DO_NOT_LOCK);
  143. break;
  144. }
  145. pr->power.state = new;
  146. return;
  147. }
  148. static atomic_t c3_cpu_count;
  149. static void acpi_processor_idle(void)
  150. {
  151. struct acpi_processor *pr = NULL;
  152. struct acpi_processor_cx *cx = NULL;
  153. struct acpi_processor_cx *next_state = NULL;
  154. int sleep_ticks = 0;
  155. u32 t1, t2 = 0;
  156. pr = processors[raw_smp_processor_id()];
  157. if (!pr)
  158. return;
  159. /*
  160. * Interrupts must be disabled during bus mastering calculations and
  161. * for C2/C3 transitions.
  162. */
  163. local_irq_disable();
  164. /*
  165. * Check whether we truly need to go idle, or should
  166. * reschedule:
  167. */
  168. if (unlikely(need_resched())) {
  169. local_irq_enable();
  170. return;
  171. }
  172. cx = pr->power.state;
  173. if (!cx)
  174. goto easy_out;
  175. /*
  176. * Check BM Activity
  177. * -----------------
  178. * Check for bus mastering activity (if required), record, and check
  179. * for demotion.
  180. */
  181. if (pr->flags.bm_check) {
  182. u32 bm_status = 0;
  183. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  184. if (diff > 32)
  185. diff = 32;
  186. while (diff) {
  187. /* if we didn't get called, assume there was busmaster activity */
  188. diff--;
  189. if (diff)
  190. pr->power.bm_activity |= 0x1;
  191. pr->power.bm_activity <<= 1;
  192. }
  193. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
  194. &bm_status, ACPI_MTX_DO_NOT_LOCK);
  195. if (bm_status) {
  196. pr->power.bm_activity++;
  197. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
  198. 1, ACPI_MTX_DO_NOT_LOCK);
  199. }
  200. /*
  201. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  202. * the true state of bus mastering activity; forcing us to
  203. * manually check the BMIDEA bit of each IDE channel.
  204. */
  205. else if (errata.piix4.bmisx) {
  206. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  207. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  208. pr->power.bm_activity++;
  209. }
  210. pr->power.bm_check_timestamp = jiffies;
  211. /*
  212. * Apply bus mastering demotion policy. Automatically demote
  213. * to avoid a faulty transition. Note that the processor
  214. * won't enter a low-power state during this call (to this
  215. * funciton) but should upon the next.
  216. *
  217. * TBD: A better policy might be to fallback to the demotion
  218. * state (use it for this quantum only) istead of
  219. * demoting -- and rely on duration as our sole demotion
  220. * qualification. This may, however, introduce DMA
  221. * issues (e.g. floppy DMA transfer overrun/underrun).
  222. */
  223. if (pr->power.bm_activity & cx->demotion.threshold.bm) {
  224. local_irq_enable();
  225. next_state = cx->demotion.state;
  226. goto end;
  227. }
  228. }
  229. cx->usage++;
  230. /*
  231. * Sleep:
  232. * ------
  233. * Invoke the current Cx state to put the processor to sleep.
  234. */
  235. switch (cx->type) {
  236. case ACPI_STATE_C1:
  237. /*
  238. * Invoke C1.
  239. * Use the appropriate idle routine, the one that would
  240. * be used without acpi C-states.
  241. */
  242. if (pm_idle_save)
  243. pm_idle_save();
  244. else
  245. safe_halt();
  246. /*
  247. * TBD: Can't get time duration while in C1, as resumes
  248. * go to an ISR rather than here. Need to instrument
  249. * base interrupt handler.
  250. */
  251. sleep_ticks = 0xFFFFFFFF;
  252. break;
  253. case ACPI_STATE_C2:
  254. /* Get start time (ticks) */
  255. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  256. /* Invoke C2 */
  257. inb(cx->address);
  258. /* Dummy op - must do something useless after P_LVL2 read */
  259. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  260. /* Get end time (ticks) */
  261. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  262. /* Re-enable interrupts */
  263. local_irq_enable();
  264. /* Compute time (ticks) that we were actually asleep */
  265. sleep_ticks =
  266. ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
  267. break;
  268. case ACPI_STATE_C3:
  269. if (pr->flags.bm_check) {
  270. if (atomic_inc_return(&c3_cpu_count) ==
  271. num_online_cpus()) {
  272. /*
  273. * All CPUs are trying to go to C3
  274. * Disable bus master arbitration
  275. */
  276. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
  277. ACPI_MTX_DO_NOT_LOCK);
  278. }
  279. } else {
  280. /* SMP with no shared cache... Invalidate cache */
  281. ACPI_FLUSH_CPU_CACHE();
  282. }
  283. /* Get start time (ticks) */
  284. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  285. /* Invoke C3 */
  286. inb(cx->address);
  287. /* Dummy op - must do something useless after P_LVL3 read */
  288. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  289. /* Get end time (ticks) */
  290. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  291. if (pr->flags.bm_check) {
  292. /* Enable bus master arbitration */
  293. atomic_dec(&c3_cpu_count);
  294. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
  295. ACPI_MTX_DO_NOT_LOCK);
  296. }
  297. /* Re-enable interrupts */
  298. local_irq_enable();
  299. /* Compute time (ticks) that we were actually asleep */
  300. sleep_ticks =
  301. ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
  302. break;
  303. default:
  304. local_irq_enable();
  305. return;
  306. }
  307. next_state = pr->power.state;
  308. /*
  309. * Promotion?
  310. * ----------
  311. * Track the number of longs (time asleep is greater than threshold)
  312. * and promote when the count threshold is reached. Note that bus
  313. * mastering activity may prevent promotions.
  314. * Do not promote above max_cstate.
  315. */
  316. if (cx->promotion.state &&
  317. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  318. if (sleep_ticks > cx->promotion.threshold.ticks) {
  319. cx->promotion.count++;
  320. cx->demotion.count = 0;
  321. if (cx->promotion.count >=
  322. cx->promotion.threshold.count) {
  323. if (pr->flags.bm_check) {
  324. if (!
  325. (pr->power.bm_activity & cx->
  326. promotion.threshold.bm)) {
  327. next_state =
  328. cx->promotion.state;
  329. goto end;
  330. }
  331. } else {
  332. next_state = cx->promotion.state;
  333. goto end;
  334. }
  335. }
  336. }
  337. }
  338. /*
  339. * Demotion?
  340. * ---------
  341. * Track the number of shorts (time asleep is less than time threshold)
  342. * and demote when the usage threshold is reached.
  343. */
  344. if (cx->demotion.state) {
  345. if (sleep_ticks < cx->demotion.threshold.ticks) {
  346. cx->demotion.count++;
  347. cx->promotion.count = 0;
  348. if (cx->demotion.count >= cx->demotion.threshold.count) {
  349. next_state = cx->demotion.state;
  350. goto end;
  351. }
  352. }
  353. }
  354. end:
  355. /*
  356. * Demote if current state exceeds max_cstate
  357. */
  358. if ((pr->power.state - pr->power.states) > max_cstate) {
  359. if (cx->demotion.state)
  360. next_state = cx->demotion.state;
  361. }
  362. /*
  363. * New Cx State?
  364. * -------------
  365. * If we're going to start using a new Cx state we must clean up
  366. * from the previous and prepare to use the new.
  367. */
  368. if (next_state != pr->power.state)
  369. acpi_processor_power_activate(pr, next_state);
  370. return;
  371. easy_out:
  372. /* do C1 instead of busy loop */
  373. if (pm_idle_save)
  374. pm_idle_save();
  375. else
  376. safe_halt();
  377. return;
  378. }
  379. static int acpi_processor_set_power_policy(struct acpi_processor *pr)
  380. {
  381. unsigned int i;
  382. unsigned int state_is_set = 0;
  383. struct acpi_processor_cx *lower = NULL;
  384. struct acpi_processor_cx *higher = NULL;
  385. struct acpi_processor_cx *cx;
  386. ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
  387. if (!pr)
  388. return_VALUE(-EINVAL);
  389. /*
  390. * This function sets the default Cx state policy (OS idle handler).
  391. * Our scheme is to promote quickly to C2 but more conservatively
  392. * to C3. We're favoring C2 for its characteristics of low latency
  393. * (quick response), good power savings, and ability to allow bus
  394. * mastering activity. Note that the Cx state policy is completely
  395. * customizable and can be altered dynamically.
  396. */
  397. /* startup state */
  398. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  399. cx = &pr->power.states[i];
  400. if (!cx->valid)
  401. continue;
  402. if (!state_is_set)
  403. pr->power.state = cx;
  404. state_is_set++;
  405. break;
  406. }
  407. if (!state_is_set)
  408. return_VALUE(-ENODEV);
  409. /* demotion */
  410. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  411. cx = &pr->power.states[i];
  412. if (!cx->valid)
  413. continue;
  414. if (lower) {
  415. cx->demotion.state = lower;
  416. cx->demotion.threshold.ticks = cx->latency_ticks;
  417. cx->demotion.threshold.count = 1;
  418. if (cx->type == ACPI_STATE_C3)
  419. cx->demotion.threshold.bm = bm_history;
  420. }
  421. lower = cx;
  422. }
  423. /* promotion */
  424. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  425. cx = &pr->power.states[i];
  426. if (!cx->valid)
  427. continue;
  428. if (higher) {
  429. cx->promotion.state = higher;
  430. cx->promotion.threshold.ticks = cx->latency_ticks;
  431. if (cx->type >= ACPI_STATE_C2)
  432. cx->promotion.threshold.count = 4;
  433. else
  434. cx->promotion.threshold.count = 10;
  435. if (higher->type == ACPI_STATE_C3)
  436. cx->promotion.threshold.bm = bm_history;
  437. }
  438. higher = cx;
  439. }
  440. return_VALUE(0);
  441. }
  442. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  443. {
  444. int i;
  445. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
  446. if (!pr)
  447. return_VALUE(-EINVAL);
  448. if (!pr->pblk)
  449. return_VALUE(-ENODEV);
  450. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  451. memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
  452. /* if info is obtained from pblk/fadt, type equals state */
  453. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  454. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  455. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  456. /* the C0 state only exists as a filler in our array,
  457. * and all processors need to support C1 */
  458. pr->power.states[ACPI_STATE_C0].valid = 1;
  459. pr->power.states[ACPI_STATE_C1].valid = 1;
  460. /* determine C2 and C3 address from pblk */
  461. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  462. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  463. /* determine latencies from FADT */
  464. pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
  465. pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
  466. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  467. "lvl2[0x%08x] lvl3[0x%08x]\n",
  468. pr->power.states[ACPI_STATE_C2].address,
  469. pr->power.states[ACPI_STATE_C3].address));
  470. return_VALUE(0);
  471. }
  472. static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
  473. {
  474. int i;
  475. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
  476. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  477. memset(&(pr->power.states[i]), 0,
  478. sizeof(struct acpi_processor_cx));
  479. /* if info is obtained from pblk/fadt, type equals state */
  480. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  481. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  482. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  483. /* the C0 state only exists as a filler in our array,
  484. * and all processors need to support C1 */
  485. pr->power.states[ACPI_STATE_C0].valid = 1;
  486. pr->power.states[ACPI_STATE_C1].valid = 1;
  487. return_VALUE(0);
  488. }
  489. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  490. {
  491. acpi_status status = 0;
  492. acpi_integer count;
  493. int i;
  494. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  495. union acpi_object *cst;
  496. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
  497. if (nocst)
  498. return_VALUE(-ENODEV);
  499. pr->power.count = 0;
  500. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  501. memset(&(pr->power.states[i]), 0,
  502. sizeof(struct acpi_processor_cx));
  503. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  504. if (ACPI_FAILURE(status)) {
  505. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  506. return_VALUE(-ENODEV);
  507. }
  508. cst = (union acpi_object *)buffer.pointer;
  509. /* There must be at least 2 elements */
  510. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  511. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  512. "not enough elements in _CST\n"));
  513. status = -EFAULT;
  514. goto end;
  515. }
  516. count = cst->package.elements[0].integer.value;
  517. /* Validate number of power states. */
  518. if (count < 1 || count != cst->package.count - 1) {
  519. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  520. "count given by _CST is not valid\n"));
  521. status = -EFAULT;
  522. goto end;
  523. }
  524. /* We support up to ACPI_PROCESSOR_MAX_POWER. */
  525. if (count > ACPI_PROCESSOR_MAX_POWER) {
  526. printk(KERN_WARNING
  527. "Limiting number of power states to max (%d)\n",
  528. ACPI_PROCESSOR_MAX_POWER);
  529. printk(KERN_WARNING
  530. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  531. count = ACPI_PROCESSOR_MAX_POWER;
  532. }
  533. /* Tell driver that at least _CST is supported. */
  534. pr->flags.has_cst = 1;
  535. for (i = 1; i <= count; i++) {
  536. union acpi_object *element;
  537. union acpi_object *obj;
  538. struct acpi_power_register *reg;
  539. struct acpi_processor_cx cx;
  540. memset(&cx, 0, sizeof(cx));
  541. element = (union acpi_object *)&(cst->package.elements[i]);
  542. if (element->type != ACPI_TYPE_PACKAGE)
  543. continue;
  544. if (element->package.count != 4)
  545. continue;
  546. obj = (union acpi_object *)&(element->package.elements[0]);
  547. if (obj->type != ACPI_TYPE_BUFFER)
  548. continue;
  549. reg = (struct acpi_power_register *)obj->buffer.pointer;
  550. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  551. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  552. continue;
  553. cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
  554. 0 : reg->address;
  555. /* There should be an easy way to extract an integer... */
  556. obj = (union acpi_object *)&(element->package.elements[1]);
  557. if (obj->type != ACPI_TYPE_INTEGER)
  558. continue;
  559. cx.type = obj->integer.value;
  560. if ((cx.type != ACPI_STATE_C1) &&
  561. (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
  562. continue;
  563. if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3))
  564. continue;
  565. obj = (union acpi_object *)&(element->package.elements[2]);
  566. if (obj->type != ACPI_TYPE_INTEGER)
  567. continue;
  568. cx.latency = obj->integer.value;
  569. obj = (union acpi_object *)&(element->package.elements[3]);
  570. if (obj->type != ACPI_TYPE_INTEGER)
  571. continue;
  572. cx.power = obj->integer.value;
  573. (pr->power.count)++;
  574. memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx));
  575. }
  576. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  577. pr->power.count));
  578. /* Validate number of power states discovered */
  579. if (pr->power.count < 2)
  580. status = -ENODEV;
  581. end:
  582. acpi_os_free(buffer.pointer);
  583. return_VALUE(status);
  584. }
  585. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  586. {
  587. ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
  588. if (!cx->address)
  589. return_VOID;
  590. /*
  591. * C2 latency must be less than or equal to 100
  592. * microseconds.
  593. */
  594. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  595. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  596. "latency too large [%d]\n", cx->latency));
  597. return_VOID;
  598. }
  599. /*
  600. * Otherwise we've met all of our C2 requirements.
  601. * Normalize the C2 latency to expidite policy
  602. */
  603. cx->valid = 1;
  604. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  605. return_VOID;
  606. }
  607. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  608. struct acpi_processor_cx *cx)
  609. {
  610. static int bm_check_flag;
  611. ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
  612. if (!cx->address)
  613. return_VOID;
  614. /*
  615. * C3 latency must be less than or equal to 1000
  616. * microseconds.
  617. */
  618. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  619. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  620. "latency too large [%d]\n", cx->latency));
  621. return_VOID;
  622. }
  623. /*
  624. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  625. * DMA transfers are used by any ISA device to avoid livelock.
  626. * Note that we could disable Type-F DMA (as recommended by
  627. * the erratum), but this is known to disrupt certain ISA
  628. * devices thus we take the conservative approach.
  629. */
  630. else if (errata.piix4.fdma) {
  631. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  632. "C3 not supported on PIIX4 with Type-F DMA\n"));
  633. return_VOID;
  634. }
  635. /* All the logic here assumes flags.bm_check is same across all CPUs */
  636. if (!bm_check_flag) {
  637. /* Determine whether bm_check is needed based on CPU */
  638. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  639. bm_check_flag = pr->flags.bm_check;
  640. } else {
  641. pr->flags.bm_check = bm_check_flag;
  642. }
  643. if (pr->flags.bm_check) {
  644. /* bus mastering control is necessary */
  645. if (!pr->flags.bm_control) {
  646. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  647. "C3 support requires bus mastering control\n"));
  648. return_VOID;
  649. }
  650. } else {
  651. /*
  652. * WBINVD should be set in fadt, for C3 state to be
  653. * supported on when bm_check is not required.
  654. */
  655. if (acpi_fadt.wb_invd != 1) {
  656. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  657. "Cache invalidation should work properly"
  658. " for C3 to be enabled on SMP systems\n"));
  659. return_VOID;
  660. }
  661. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
  662. 0, ACPI_MTX_DO_NOT_LOCK);
  663. }
  664. /*
  665. * Otherwise we've met all of our C3 requirements.
  666. * Normalize the C3 latency to expidite policy. Enable
  667. * checking of bus mastering status (bm_check) so we can
  668. * use this in our C3 policy
  669. */
  670. cx->valid = 1;
  671. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  672. return_VOID;
  673. }
  674. static int acpi_processor_power_verify(struct acpi_processor *pr)
  675. {
  676. unsigned int i;
  677. unsigned int working = 0;
  678. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  679. struct acpi_processor_cx *cx = &pr->power.states[i];
  680. switch (cx->type) {
  681. case ACPI_STATE_C1:
  682. cx->valid = 1;
  683. break;
  684. case ACPI_STATE_C2:
  685. acpi_processor_power_verify_c2(cx);
  686. break;
  687. case ACPI_STATE_C3:
  688. acpi_processor_power_verify_c3(pr, cx);
  689. break;
  690. }
  691. if (cx->valid)
  692. working++;
  693. }
  694. return (working);
  695. }
  696. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  697. {
  698. unsigned int i;
  699. int result;
  700. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
  701. /* NOTE: the idle thread may not be running while calling
  702. * this function */
  703. result = acpi_processor_get_power_info_cst(pr);
  704. if ((result) || (acpi_processor_power_verify(pr) < 2)) {
  705. result = acpi_processor_get_power_info_fadt(pr);
  706. if ((result) || (acpi_processor_power_verify(pr) < 2))
  707. result = acpi_processor_get_power_info_default_c1(pr);
  708. }
  709. /*
  710. * Set Default Policy
  711. * ------------------
  712. * Now that we know which states are supported, set the default
  713. * policy. Note that this policy can be changed dynamically
  714. * (e.g. encourage deeper sleeps to conserve battery life when
  715. * not on AC).
  716. */
  717. result = acpi_processor_set_power_policy(pr);
  718. if (result)
  719. return_VALUE(result);
  720. /*
  721. * if one state of type C2 or C3 is available, mark this
  722. * CPU as being "idle manageable"
  723. */
  724. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  725. if (pr->power.states[i].valid) {
  726. pr->power.count = i;
  727. pr->flags.power = 1;
  728. }
  729. }
  730. return_VALUE(0);
  731. }
  732. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  733. {
  734. int result = 0;
  735. ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
  736. if (!pr)
  737. return_VALUE(-EINVAL);
  738. if (nocst) {
  739. return_VALUE(-ENODEV);
  740. }
  741. if (!pr->flags.power_setup_done)
  742. return_VALUE(-ENODEV);
  743. /* Fall back to the default idle loop */
  744. pm_idle = pm_idle_save;
  745. synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
  746. pr->flags.power = 0;
  747. result = acpi_processor_get_power_info(pr);
  748. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  749. pm_idle = acpi_processor_idle;
  750. return_VALUE(result);
  751. }
  752. /* proc interface */
  753. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  754. {
  755. struct acpi_processor *pr = (struct acpi_processor *)seq->private;
  756. unsigned int i;
  757. ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
  758. if (!pr)
  759. goto end;
  760. seq_printf(seq, "active state: C%zd\n"
  761. "max_cstate: C%d\n"
  762. "bus master activity: %08x\n",
  763. pr->power.state ? pr->power.state - pr->power.states : 0,
  764. max_cstate, (unsigned)pr->power.bm_activity);
  765. seq_puts(seq, "states:\n");
  766. for (i = 1; i <= pr->power.count; i++) {
  767. seq_printf(seq, " %cC%d: ",
  768. (&pr->power.states[i] ==
  769. pr->power.state ? '*' : ' '), i);
  770. if (!pr->power.states[i].valid) {
  771. seq_puts(seq, "<not supported>\n");
  772. continue;
  773. }
  774. switch (pr->power.states[i].type) {
  775. case ACPI_STATE_C1:
  776. seq_printf(seq, "type[C1] ");
  777. break;
  778. case ACPI_STATE_C2:
  779. seq_printf(seq, "type[C2] ");
  780. break;
  781. case ACPI_STATE_C3:
  782. seq_printf(seq, "type[C3] ");
  783. break;
  784. default:
  785. seq_printf(seq, "type[--] ");
  786. break;
  787. }
  788. if (pr->power.states[i].promotion.state)
  789. seq_printf(seq, "promotion[C%zd] ",
  790. (pr->power.states[i].promotion.state -
  791. pr->power.states));
  792. else
  793. seq_puts(seq, "promotion[--] ");
  794. if (pr->power.states[i].demotion.state)
  795. seq_printf(seq, "demotion[C%zd] ",
  796. (pr->power.states[i].demotion.state -
  797. pr->power.states));
  798. else
  799. seq_puts(seq, "demotion[--] ");
  800. seq_printf(seq, "latency[%03d] usage[%08d]\n",
  801. pr->power.states[i].latency,
  802. pr->power.states[i].usage);
  803. }
  804. end:
  805. return_VALUE(0);
  806. }
  807. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  808. {
  809. return single_open(file, acpi_processor_power_seq_show,
  810. PDE(inode)->data);
  811. }
  812. static struct file_operations acpi_processor_power_fops = {
  813. .open = acpi_processor_power_open_fs,
  814. .read = seq_read,
  815. .llseek = seq_lseek,
  816. .release = single_release,
  817. };
  818. int acpi_processor_power_init(struct acpi_processor *pr,
  819. struct acpi_device *device)
  820. {
  821. acpi_status status = 0;
  822. static int first_run = 0;
  823. struct proc_dir_entry *entry = NULL;
  824. unsigned int i;
  825. ACPI_FUNCTION_TRACE("acpi_processor_power_init");
  826. if (!first_run) {
  827. dmi_check_system(processor_power_dmi_table);
  828. if (max_cstate < ACPI_C_STATES_MAX)
  829. printk(KERN_NOTICE
  830. "ACPI: processor limited to max C-state %d\n",
  831. max_cstate);
  832. first_run++;
  833. }
  834. if (!pr)
  835. return_VALUE(-EINVAL);
  836. if (acpi_fadt.cst_cnt && !nocst) {
  837. status =
  838. acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
  839. if (ACPI_FAILURE(status)) {
  840. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  841. "Notifying BIOS of _CST ability failed\n"));
  842. }
  843. }
  844. acpi_processor_power_init_pdc(&(pr->power), pr->id);
  845. acpi_processor_set_pdc(pr, pr->power.pdc);
  846. acpi_processor_get_power_info(pr);
  847. /*
  848. * Install the idle handler if processor power management is supported.
  849. * Note that we use previously set idle handler will be used on
  850. * platforms that only support C1.
  851. */
  852. if ((pr->flags.power) && (!boot_option_idle_override)) {
  853. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  854. for (i = 1; i <= pr->power.count; i++)
  855. if (pr->power.states[i].valid)
  856. printk(" C%d[C%d]", i,
  857. pr->power.states[i].type);
  858. printk(")\n");
  859. if (pr->id == 0) {
  860. pm_idle_save = pm_idle;
  861. pm_idle = acpi_processor_idle;
  862. }
  863. }
  864. /* 'power' [R] */
  865. entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  866. S_IRUGO, acpi_device_dir(device));
  867. if (!entry)
  868. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  869. "Unable to create '%s' fs entry\n",
  870. ACPI_PROCESSOR_FILE_POWER));
  871. else {
  872. entry->proc_fops = &acpi_processor_power_fops;
  873. entry->data = acpi_driver_data(device);
  874. entry->owner = THIS_MODULE;
  875. }
  876. pr->flags.power_setup_done = 1;
  877. return_VALUE(0);
  878. }
  879. int acpi_processor_power_exit(struct acpi_processor *pr,
  880. struct acpi_device *device)
  881. {
  882. ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
  883. pr->flags.power_setup_done = 0;
  884. if (acpi_device_dir(device))
  885. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  886. acpi_device_dir(device));
  887. /* Unregister the idle handler when processor #0 is removed. */
  888. if (pr->id == 0) {
  889. pm_idle = pm_idle_save;
  890. /*
  891. * We are about to unload the current idle thread pm callback
  892. * (pm_idle), Wait for all processors to update cached/local
  893. * copies of pm_idle before proceeding.
  894. */
  895. cpu_idle_wait();
  896. }
  897. return_VALUE(0);
  898. }