processor_idle.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <asm/io.h>
  40. #include <asm/uaccess.h>
  41. #include <acpi/acpi_bus.h>
  42. #include <acpi/processor.h>
  43. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  44. #define ACPI_PROCESSOR_CLASS "processor"
  45. #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
  46. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  47. ACPI_MODULE_NAME ("acpi_processor")
  48. #define ACPI_PROCESSOR_FILE_POWER "power"
  49. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  50. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  51. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  52. static void (*pm_idle_save)(void);
  53. module_param(max_cstate, uint, 0644);
  54. static unsigned int nocst = 0;
  55. module_param(nocst, uint, 0000);
  56. /*
  57. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  58. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  59. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  60. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  61. * reduce history for more aggressive entry into C3
  62. */
  63. static unsigned int bm_history = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  64. module_param(bm_history, uint, 0644);
  65. /* --------------------------------------------------------------------------
  66. Power Management
  67. -------------------------------------------------------------------------- */
  68. /*
  69. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  70. * For now disable this. Probably a bug somewhere else.
  71. *
  72. * To skip this limit, boot/load with a large max_cstate limit.
  73. */
  74. static int set_max_cstate(struct dmi_system_id *id)
  75. {
  76. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  77. return 0;
  78. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  79. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  80. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  81. max_cstate = (long)id->driver_data;
  82. return 0;
  83. }
  84. static struct dmi_system_id __initdata processor_power_dmi_table[] = {
  85. { set_max_cstate, "IBM ThinkPad R40e", {
  86. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  87. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
  88. { set_max_cstate, "Medion 41700", {
  89. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  90. DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }, (void*)1},
  91. { set_max_cstate, "Clevo 5600D", {
  92. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  93. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307") },
  94. (void*)2},
  95. {},
  96. };
  97. static inline u32
  98. ticks_elapsed (
  99. u32 t1,
  100. u32 t2)
  101. {
  102. if (t2 >= t1)
  103. return (t2 - t1);
  104. else if (!acpi_fadt.tmr_val_ext)
  105. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  106. else
  107. return ((0xFFFFFFFF - t1) + t2);
  108. }
  109. static void
  110. acpi_processor_power_activate (
  111. struct acpi_processor *pr,
  112. struct acpi_processor_cx *new)
  113. {
  114. struct acpi_processor_cx *old;
  115. if (!pr || !new)
  116. return;
  117. old = pr->power.state;
  118. if (old)
  119. old->promotion.count = 0;
  120. new->demotion.count = 0;
  121. /* Cleanup from old state. */
  122. if (old) {
  123. switch (old->type) {
  124. case ACPI_STATE_C3:
  125. /* Disable bus master reload */
  126. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  127. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
  128. break;
  129. }
  130. }
  131. /* Prepare to use new state. */
  132. switch (new->type) {
  133. case ACPI_STATE_C3:
  134. /* Enable bus master reload */
  135. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  136. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
  137. break;
  138. }
  139. pr->power.state = new;
  140. return;
  141. }
  142. static atomic_t c3_cpu_count;
  143. static void acpi_processor_idle (void)
  144. {
  145. struct acpi_processor *pr = NULL;
  146. struct acpi_processor_cx *cx = NULL;
  147. struct acpi_processor_cx *next_state = NULL;
  148. int sleep_ticks = 0;
  149. u32 t1, t2 = 0;
  150. pr = processors[_smp_processor_id()];
  151. if (!pr)
  152. return;
  153. /*
  154. * Interrupts must be disabled during bus mastering calculations and
  155. * for C2/C3 transitions.
  156. */
  157. local_irq_disable();
  158. /*
  159. * Check whether we truly need to go idle, or should
  160. * reschedule:
  161. */
  162. if (unlikely(need_resched())) {
  163. local_irq_enable();
  164. return;
  165. }
  166. cx = pr->power.state;
  167. if (!cx)
  168. goto easy_out;
  169. /*
  170. * Check BM Activity
  171. * -----------------
  172. * Check for bus mastering activity (if required), record, and check
  173. * for demotion.
  174. */
  175. if (pr->flags.bm_check) {
  176. u32 bm_status = 0;
  177. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  178. if (diff > 32)
  179. diff = 32;
  180. while (diff) {
  181. /* if we didn't get called, assume there was busmaster activity */
  182. diff--;
  183. if (diff)
  184. pr->power.bm_activity |= 0x1;
  185. pr->power.bm_activity <<= 1;
  186. }
  187. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
  188. &bm_status, ACPI_MTX_DO_NOT_LOCK);
  189. if (bm_status) {
  190. pr->power.bm_activity++;
  191. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
  192. 1, ACPI_MTX_DO_NOT_LOCK);
  193. }
  194. /*
  195. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  196. * the true state of bus mastering activity; forcing us to
  197. * manually check the BMIDEA bit of each IDE channel.
  198. */
  199. else if (errata.piix4.bmisx) {
  200. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  201. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  202. pr->power.bm_activity++;
  203. }
  204. pr->power.bm_check_timestamp = jiffies;
  205. /*
  206. * Apply bus mastering demotion policy. Automatically demote
  207. * to avoid a faulty transition. Note that the processor
  208. * won't enter a low-power state during this call (to this
  209. * funciton) but should upon the next.
  210. *
  211. * TBD: A better policy might be to fallback to the demotion
  212. * state (use it for this quantum only) istead of
  213. * demoting -- and rely on duration as our sole demotion
  214. * qualification. This may, however, introduce DMA
  215. * issues (e.g. floppy DMA transfer overrun/underrun).
  216. */
  217. if (pr->power.bm_activity & cx->demotion.threshold.bm) {
  218. local_irq_enable();
  219. next_state = cx->demotion.state;
  220. goto end;
  221. }
  222. }
  223. cx->usage++;
  224. /*
  225. * Sleep:
  226. * ------
  227. * Invoke the current Cx state to put the processor to sleep.
  228. */
  229. switch (cx->type) {
  230. case ACPI_STATE_C1:
  231. /*
  232. * Invoke C1.
  233. * Use the appropriate idle routine, the one that would
  234. * be used without acpi C-states.
  235. */
  236. if (pm_idle_save)
  237. pm_idle_save();
  238. else
  239. safe_halt();
  240. /*
  241. * TBD: Can't get time duration while in C1, as resumes
  242. * go to an ISR rather than here. Need to instrument
  243. * base interrupt handler.
  244. */
  245. sleep_ticks = 0xFFFFFFFF;
  246. break;
  247. case ACPI_STATE_C2:
  248. /* Get start time (ticks) */
  249. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  250. /* Invoke C2 */
  251. inb(cx->address);
  252. /* Dummy op - must do something useless after P_LVL2 read */
  253. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  254. /* Get end time (ticks) */
  255. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  256. /* Re-enable interrupts */
  257. local_irq_enable();
  258. /* Compute time (ticks) that we were actually asleep */
  259. sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
  260. break;
  261. case ACPI_STATE_C3:
  262. if (pr->flags.bm_check) {
  263. if (atomic_inc_return(&c3_cpu_count) ==
  264. num_online_cpus()) {
  265. /*
  266. * All CPUs are trying to go to C3
  267. * Disable bus master arbitration
  268. */
  269. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
  270. ACPI_MTX_DO_NOT_LOCK);
  271. }
  272. } else {
  273. /* SMP with no shared cache... Invalidate cache */
  274. ACPI_FLUSH_CPU_CACHE();
  275. }
  276. /* Get start time (ticks) */
  277. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  278. /* Invoke C3 */
  279. inb(cx->address);
  280. /* Dummy op - must do something useless after P_LVL3 read */
  281. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  282. /* Get end time (ticks) */
  283. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  284. if (pr->flags.bm_check) {
  285. /* Enable bus master arbitration */
  286. atomic_dec(&c3_cpu_count);
  287. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
  288. }
  289. /* Re-enable interrupts */
  290. local_irq_enable();
  291. /* Compute time (ticks) that we were actually asleep */
  292. sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
  293. break;
  294. default:
  295. local_irq_enable();
  296. return;
  297. }
  298. next_state = pr->power.state;
  299. /*
  300. * Promotion?
  301. * ----------
  302. * Track the number of longs (time asleep is greater than threshold)
  303. * and promote when the count threshold is reached. Note that bus
  304. * mastering activity may prevent promotions.
  305. * Do not promote above max_cstate.
  306. */
  307. if (cx->promotion.state &&
  308. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  309. if (sleep_ticks > cx->promotion.threshold.ticks) {
  310. cx->promotion.count++;
  311. cx->demotion.count = 0;
  312. if (cx->promotion.count >= cx->promotion.threshold.count) {
  313. if (pr->flags.bm_check) {
  314. if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) {
  315. next_state = cx->promotion.state;
  316. goto end;
  317. }
  318. }
  319. else {
  320. next_state = cx->promotion.state;
  321. goto end;
  322. }
  323. }
  324. }
  325. }
  326. /*
  327. * Demotion?
  328. * ---------
  329. * Track the number of shorts (time asleep is less than time threshold)
  330. * and demote when the usage threshold is reached.
  331. */
  332. if (cx->demotion.state) {
  333. if (sleep_ticks < cx->demotion.threshold.ticks) {
  334. cx->demotion.count++;
  335. cx->promotion.count = 0;
  336. if (cx->demotion.count >= cx->demotion.threshold.count) {
  337. next_state = cx->demotion.state;
  338. goto end;
  339. }
  340. }
  341. }
  342. end:
  343. /*
  344. * Demote if current state exceeds max_cstate
  345. */
  346. if ((pr->power.state - pr->power.states) > max_cstate) {
  347. if (cx->demotion.state)
  348. next_state = cx->demotion.state;
  349. }
  350. /*
  351. * New Cx State?
  352. * -------------
  353. * If we're going to start using a new Cx state we must clean up
  354. * from the previous and prepare to use the new.
  355. */
  356. if (next_state != pr->power.state)
  357. acpi_processor_power_activate(pr, next_state);
  358. return;
  359. easy_out:
  360. /* do C1 instead of busy loop */
  361. if (pm_idle_save)
  362. pm_idle_save();
  363. else
  364. safe_halt();
  365. return;
  366. }
  367. static int
  368. acpi_processor_set_power_policy (
  369. struct acpi_processor *pr)
  370. {
  371. unsigned int i;
  372. unsigned int state_is_set = 0;
  373. struct acpi_processor_cx *lower = NULL;
  374. struct acpi_processor_cx *higher = NULL;
  375. struct acpi_processor_cx *cx;
  376. ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
  377. if (!pr)
  378. return_VALUE(-EINVAL);
  379. /*
  380. * This function sets the default Cx state policy (OS idle handler).
  381. * Our scheme is to promote quickly to C2 but more conservatively
  382. * to C3. We're favoring C2 for its characteristics of low latency
  383. * (quick response), good power savings, and ability to allow bus
  384. * mastering activity. Note that the Cx state policy is completely
  385. * customizable and can be altered dynamically.
  386. */
  387. /* startup state */
  388. for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  389. cx = &pr->power.states[i];
  390. if (!cx->valid)
  391. continue;
  392. if (!state_is_set)
  393. pr->power.state = cx;
  394. state_is_set++;
  395. break;
  396. }
  397. if (!state_is_set)
  398. return_VALUE(-ENODEV);
  399. /* demotion */
  400. for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  401. cx = &pr->power.states[i];
  402. if (!cx->valid)
  403. continue;
  404. if (lower) {
  405. cx->demotion.state = lower;
  406. cx->demotion.threshold.ticks = cx->latency_ticks;
  407. cx->demotion.threshold.count = 1;
  408. if (cx->type == ACPI_STATE_C3)
  409. cx->demotion.threshold.bm = bm_history;
  410. }
  411. lower = cx;
  412. }
  413. /* promotion */
  414. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  415. cx = &pr->power.states[i];
  416. if (!cx->valid)
  417. continue;
  418. if (higher) {
  419. cx->promotion.state = higher;
  420. cx->promotion.threshold.ticks = cx->latency_ticks;
  421. if (cx->type >= ACPI_STATE_C2)
  422. cx->promotion.threshold.count = 4;
  423. else
  424. cx->promotion.threshold.count = 10;
  425. if (higher->type == ACPI_STATE_C3)
  426. cx->promotion.threshold.bm = bm_history;
  427. }
  428. higher = cx;
  429. }
  430. return_VALUE(0);
  431. }
  432. static int acpi_processor_get_power_info_fadt (struct acpi_processor *pr)
  433. {
  434. int i;
  435. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
  436. if (!pr)
  437. return_VALUE(-EINVAL);
  438. if (!pr->pblk)
  439. return_VALUE(-ENODEV);
  440. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  441. memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
  442. /* if info is obtained from pblk/fadt, type equals state */
  443. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  444. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  445. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  446. /* the C0 state only exists as a filler in our array,
  447. * and all processors need to support C1 */
  448. pr->power.states[ACPI_STATE_C0].valid = 1;
  449. pr->power.states[ACPI_STATE_C1].valid = 1;
  450. /* determine C2 and C3 address from pblk */
  451. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  452. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  453. /* determine latencies from FADT */
  454. pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
  455. pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
  456. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  457. "lvl2[0x%08x] lvl3[0x%08x]\n",
  458. pr->power.states[ACPI_STATE_C2].address,
  459. pr->power.states[ACPI_STATE_C3].address));
  460. return_VALUE(0);
  461. }
  462. static int acpi_processor_get_power_info_default_c1 (struct acpi_processor *pr)
  463. {
  464. int i;
  465. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
  466. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  467. memset(&(pr->power.states[i]), 0,
  468. sizeof(struct acpi_processor_cx));
  469. /* if info is obtained from pblk/fadt, type equals state */
  470. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  471. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  472. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  473. /* the C0 state only exists as a filler in our array,
  474. * and all processors need to support C1 */
  475. pr->power.states[ACPI_STATE_C0].valid = 1;
  476. pr->power.states[ACPI_STATE_C1].valid = 1;
  477. return_VALUE(0);
  478. }
  479. static int acpi_processor_get_power_info_cst (struct acpi_processor *pr)
  480. {
  481. acpi_status status = 0;
  482. acpi_integer count;
  483. int i;
  484. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  485. union acpi_object *cst;
  486. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
  487. if (nocst)
  488. return_VALUE(-ENODEV);
  489. pr->power.count = 0;
  490. for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
  491. memset(&(pr->power.states[i]), 0,
  492. sizeof(struct acpi_processor_cx));
  493. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  494. if (ACPI_FAILURE(status)) {
  495. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  496. return_VALUE(-ENODEV);
  497. }
  498. cst = (union acpi_object *) buffer.pointer;
  499. /* There must be at least 2 elements */
  500. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  501. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "not enough elements in _CST\n"));
  502. status = -EFAULT;
  503. goto end;
  504. }
  505. count = cst->package.elements[0].integer.value;
  506. /* Validate number of power states. */
  507. if (count < 1 || count != cst->package.count - 1) {
  508. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "count given by _CST is not valid\n"));
  509. status = -EFAULT;
  510. goto end;
  511. }
  512. /* We support up to ACPI_PROCESSOR_MAX_POWER. */
  513. if (count > ACPI_PROCESSOR_MAX_POWER) {
  514. printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER);
  515. printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  516. count = ACPI_PROCESSOR_MAX_POWER;
  517. }
  518. /* Tell driver that at least _CST is supported. */
  519. pr->flags.has_cst = 1;
  520. for (i = 1; i <= count; i++) {
  521. union acpi_object *element;
  522. union acpi_object *obj;
  523. struct acpi_power_register *reg;
  524. struct acpi_processor_cx cx;
  525. memset(&cx, 0, sizeof(cx));
  526. element = (union acpi_object *) &(cst->package.elements[i]);
  527. if (element->type != ACPI_TYPE_PACKAGE)
  528. continue;
  529. if (element->package.count != 4)
  530. continue;
  531. obj = (union acpi_object *) &(element->package.elements[0]);
  532. if (obj->type != ACPI_TYPE_BUFFER)
  533. continue;
  534. reg = (struct acpi_power_register *) obj->buffer.pointer;
  535. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  536. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  537. continue;
  538. cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
  539. 0 : reg->address;
  540. /* There should be an easy way to extract an integer... */
  541. obj = (union acpi_object *) &(element->package.elements[1]);
  542. if (obj->type != ACPI_TYPE_INTEGER)
  543. continue;
  544. cx.type = obj->integer.value;
  545. if ((cx.type != ACPI_STATE_C1) &&
  546. (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
  547. continue;
  548. if ((cx.type < ACPI_STATE_C1) ||
  549. (cx.type > ACPI_STATE_C3))
  550. continue;
  551. obj = (union acpi_object *) &(element->package.elements[2]);
  552. if (obj->type != ACPI_TYPE_INTEGER)
  553. continue;
  554. cx.latency = obj->integer.value;
  555. obj = (union acpi_object *) &(element->package.elements[3]);
  556. if (obj->type != ACPI_TYPE_INTEGER)
  557. continue;
  558. cx.power = obj->integer.value;
  559. (pr->power.count)++;
  560. memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx));
  561. }
  562. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", pr->power.count));
  563. /* Validate number of power states discovered */
  564. if (pr->power.count < 2)
  565. status = -ENODEV;
  566. end:
  567. acpi_os_free(buffer.pointer);
  568. return_VALUE(status);
  569. }
  570. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  571. {
  572. ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
  573. if (!cx->address)
  574. return_VOID;
  575. /*
  576. * C2 latency must be less than or equal to 100
  577. * microseconds.
  578. */
  579. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  580. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  581. "latency too large [%d]\n",
  582. cx->latency));
  583. return_VOID;
  584. }
  585. /*
  586. * Otherwise we've met all of our C2 requirements.
  587. * Normalize the C2 latency to expidite policy
  588. */
  589. cx->valid = 1;
  590. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  591. return_VOID;
  592. }
  593. static void acpi_processor_power_verify_c3(
  594. struct acpi_processor *pr,
  595. struct acpi_processor_cx *cx)
  596. {
  597. static int bm_check_flag;
  598. ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
  599. if (!cx->address)
  600. return_VOID;
  601. /*
  602. * C3 latency must be less than or equal to 1000
  603. * microseconds.
  604. */
  605. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  606. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  607. "latency too large [%d]\n",
  608. cx->latency));
  609. return_VOID;
  610. }
  611. /*
  612. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  613. * DMA transfers are used by any ISA device to avoid livelock.
  614. * Note that we could disable Type-F DMA (as recommended by
  615. * the erratum), but this is known to disrupt certain ISA
  616. * devices thus we take the conservative approach.
  617. */
  618. else if (errata.piix4.fdma) {
  619. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  620. "C3 not supported on PIIX4 with Type-F DMA\n"));
  621. return_VOID;
  622. }
  623. /* All the logic here assumes flags.bm_check is same across all CPUs */
  624. if (!bm_check_flag) {
  625. /* Determine whether bm_check is needed based on CPU */
  626. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  627. bm_check_flag = pr->flags.bm_check;
  628. } else {
  629. pr->flags.bm_check = bm_check_flag;
  630. }
  631. if (pr->flags.bm_check) {
  632. /* bus mastering control is necessary */
  633. if (!pr->flags.bm_control) {
  634. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  635. "C3 support requires bus mastering control\n"));
  636. return_VOID;
  637. }
  638. } else {
  639. /*
  640. * WBINVD should be set in fadt, for C3 state to be
  641. * supported on when bm_check is not required.
  642. */
  643. if (acpi_fadt.wb_invd != 1) {
  644. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  645. "Cache invalidation should work properly"
  646. " for C3 to be enabled on SMP systems\n"));
  647. return_VOID;
  648. }
  649. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
  650. 0, ACPI_MTX_DO_NOT_LOCK);
  651. }
  652. /*
  653. * Otherwise we've met all of our C3 requirements.
  654. * Normalize the C3 latency to expidite policy. Enable
  655. * checking of bus mastering status (bm_check) so we can
  656. * use this in our C3 policy
  657. */
  658. cx->valid = 1;
  659. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  660. return_VOID;
  661. }
  662. static int acpi_processor_power_verify(struct acpi_processor *pr)
  663. {
  664. unsigned int i;
  665. unsigned int working = 0;
  666. for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  667. struct acpi_processor_cx *cx = &pr->power.states[i];
  668. switch (cx->type) {
  669. case ACPI_STATE_C1:
  670. cx->valid = 1;
  671. break;
  672. case ACPI_STATE_C2:
  673. acpi_processor_power_verify_c2(cx);
  674. break;
  675. case ACPI_STATE_C3:
  676. acpi_processor_power_verify_c3(pr, cx);
  677. break;
  678. }
  679. if (cx->valid)
  680. working++;
  681. }
  682. return (working);
  683. }
  684. static int acpi_processor_get_power_info (
  685. struct acpi_processor *pr)
  686. {
  687. unsigned int i;
  688. int result;
  689. ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
  690. /* NOTE: the idle thread may not be running while calling
  691. * this function */
  692. result = acpi_processor_get_power_info_cst(pr);
  693. if ((result) || (acpi_processor_power_verify(pr) < 2)) {
  694. result = acpi_processor_get_power_info_fadt(pr);
  695. if ((result) || (acpi_processor_power_verify(pr) < 2))
  696. result = acpi_processor_get_power_info_default_c1(pr);
  697. }
  698. /*
  699. * Set Default Policy
  700. * ------------------
  701. * Now that we know which states are supported, set the default
  702. * policy. Note that this policy can be changed dynamically
  703. * (e.g. encourage deeper sleeps to conserve battery life when
  704. * not on AC).
  705. */
  706. result = acpi_processor_set_power_policy(pr);
  707. if (result)
  708. return_VALUE(result);
  709. /*
  710. * if one state of type C2 or C3 is available, mark this
  711. * CPU as being "idle manageable"
  712. */
  713. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  714. if (pr->power.states[i].valid) {
  715. pr->power.count = i;
  716. pr->flags.power = 1;
  717. }
  718. }
  719. return_VALUE(0);
  720. }
  721. int acpi_processor_cst_has_changed (struct acpi_processor *pr)
  722. {
  723. int result = 0;
  724. ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
  725. if (!pr)
  726. return_VALUE(-EINVAL);
  727. if ( nocst) {
  728. return_VALUE(-ENODEV);
  729. }
  730. if (!pr->flags.power_setup_done)
  731. return_VALUE(-ENODEV);
  732. /* Fall back to the default idle loop */
  733. pm_idle = pm_idle_save;
  734. synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
  735. pr->flags.power = 0;
  736. result = acpi_processor_get_power_info(pr);
  737. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  738. pm_idle = acpi_processor_idle;
  739. return_VALUE(result);
  740. }
  741. /* proc interface */
  742. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  743. {
  744. struct acpi_processor *pr = (struct acpi_processor *)seq->private;
  745. unsigned int i;
  746. ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
  747. if (!pr)
  748. goto end;
  749. seq_printf(seq, "active state: C%zd\n"
  750. "max_cstate: C%d\n"
  751. "bus master activity: %08x\n",
  752. pr->power.state ? pr->power.state - pr->power.states : 0,
  753. max_cstate,
  754. (unsigned)pr->power.bm_activity);
  755. seq_puts(seq, "states:\n");
  756. for (i = 1; i <= pr->power.count; i++) {
  757. seq_printf(seq, " %cC%d: ",
  758. (&pr->power.states[i] == pr->power.state?'*':' '), i);
  759. if (!pr->power.states[i].valid) {
  760. seq_puts(seq, "<not supported>\n");
  761. continue;
  762. }
  763. switch (pr->power.states[i].type) {
  764. case ACPI_STATE_C1:
  765. seq_printf(seq, "type[C1] ");
  766. break;
  767. case ACPI_STATE_C2:
  768. seq_printf(seq, "type[C2] ");
  769. break;
  770. case ACPI_STATE_C3:
  771. seq_printf(seq, "type[C3] ");
  772. break;
  773. default:
  774. seq_printf(seq, "type[--] ");
  775. break;
  776. }
  777. if (pr->power.states[i].promotion.state)
  778. seq_printf(seq, "promotion[C%zd] ",
  779. (pr->power.states[i].promotion.state -
  780. pr->power.states));
  781. else
  782. seq_puts(seq, "promotion[--] ");
  783. if (pr->power.states[i].demotion.state)
  784. seq_printf(seq, "demotion[C%zd] ",
  785. (pr->power.states[i].demotion.state -
  786. pr->power.states));
  787. else
  788. seq_puts(seq, "demotion[--] ");
  789. seq_printf(seq, "latency[%03d] usage[%08d]\n",
  790. pr->power.states[i].latency,
  791. pr->power.states[i].usage);
  792. }
  793. end:
  794. return_VALUE(0);
  795. }
  796. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  797. {
  798. return single_open(file, acpi_processor_power_seq_show,
  799. PDE(inode)->data);
  800. }
  801. static struct file_operations acpi_processor_power_fops = {
  802. .open = acpi_processor_power_open_fs,
  803. .read = seq_read,
  804. .llseek = seq_lseek,
  805. .release = single_release,
  806. };
  807. int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device)
  808. {
  809. acpi_status status = 0;
  810. static int first_run = 0;
  811. struct proc_dir_entry *entry = NULL;
  812. unsigned int i;
  813. ACPI_FUNCTION_TRACE("acpi_processor_power_init");
  814. if (!first_run) {
  815. dmi_check_system(processor_power_dmi_table);
  816. if (max_cstate < ACPI_C_STATES_MAX)
  817. printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate);
  818. first_run++;
  819. }
  820. if (!pr)
  821. return_VALUE(-EINVAL);
  822. if (acpi_fadt.cst_cnt && !nocst) {
  823. status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
  824. if (ACPI_FAILURE(status)) {
  825. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  826. "Notifying BIOS of _CST ability failed\n"));
  827. }
  828. }
  829. acpi_processor_power_init_pdc(&(pr->power), pr->id);
  830. acpi_processor_set_pdc(pr, pr->power.pdc);
  831. acpi_processor_get_power_info(pr);
  832. /*
  833. * Install the idle handler if processor power management is supported.
  834. * Note that we use previously set idle handler will be used on
  835. * platforms that only support C1.
  836. */
  837. if ((pr->flags.power) && (!boot_option_idle_override)) {
  838. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  839. for (i = 1; i <= pr->power.count; i++)
  840. if (pr->power.states[i].valid)
  841. printk(" C%d[C%d]", i, pr->power.states[i].type);
  842. printk(")\n");
  843. if (pr->id == 0) {
  844. pm_idle_save = pm_idle;
  845. pm_idle = acpi_processor_idle;
  846. }
  847. }
  848. /* 'power' [R] */
  849. entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  850. S_IRUGO, acpi_device_dir(device));
  851. if (!entry)
  852. ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
  853. "Unable to create '%s' fs entry\n",
  854. ACPI_PROCESSOR_FILE_POWER));
  855. else {
  856. entry->proc_fops = &acpi_processor_power_fops;
  857. entry->data = acpi_driver_data(device);
  858. entry->owner = THIS_MODULE;
  859. }
  860. pr->flags.power_setup_done = 1;
  861. return_VALUE(0);
  862. }
  863. int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device)
  864. {
  865. ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
  866. pr->flags.power_setup_done = 0;
  867. if (acpi_device_dir(device))
  868. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device));
  869. /* Unregister the idle handler when processor #0 is removed. */
  870. if (pr->id == 0) {
  871. pm_idle = pm_idle_save;
  872. /*
  873. * We are about to unload the current idle thread pm callback
  874. * (pm_idle), Wait for all processors to update cached/local
  875. * copies of pm_idle before proceeding.
  876. */
  877. cpu_idle_wait();
  878. }
  879. return_VALUE(0);
  880. }