processor_idle.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27. *
  28. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/acpi.h>
  37. #include <linux/dmi.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/sched.h> /* need_resched() */
  40. #include <asm/io.h>
  41. #include <asm/uaccess.h>
  42. #include <acpi/acpi_bus.h>
  43. #include <acpi/processor.h>
  44. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  45. #define ACPI_PROCESSOR_CLASS "processor"
  46. #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
  47. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  48. ACPI_MODULE_NAME("acpi_processor")
  49. #define ACPI_PROCESSOR_FILE_POWER "power"
  50. #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
  51. #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  52. #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
  53. static void (*pm_idle_save) (void) __read_mostly;
  54. module_param(max_cstate, uint, 0644);
  55. static unsigned int nocst __read_mostly;
  56. module_param(nocst, uint, 0000);
  57. /*
  58. * bm_history -- bit-mask with a bit per jiffy of bus-master activity
  59. * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
  60. * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
  61. * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  62. * reduce history for more aggressive entry into C3
  63. */
  64. static unsigned int bm_history __read_mostly =
  65. (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
  66. module_param(bm_history, uint, 0644);
  67. /* --------------------------------------------------------------------------
  68. Power Management
  69. -------------------------------------------------------------------------- */
  70. /*
  71. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  72. * For now disable this. Probably a bug somewhere else.
  73. *
  74. * To skip this limit, boot/load with a large max_cstate limit.
  75. */
  76. static int set_max_cstate(struct dmi_system_id *id)
  77. {
  78. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  79. return 0;
  80. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  81. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  82. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  83. max_cstate = (long)id->driver_data;
  84. return 0;
  85. }
  86. /* Actually this shouldn't be __cpuinitdata, would be better to fix the
  87. callers to only run once -AK */
  88. static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
  89. { set_max_cstate, "IBM ThinkPad R40e", {
  90. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  91. DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
  92. { set_max_cstate, "IBM ThinkPad R40e", {
  93. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  94. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
  95. { set_max_cstate, "IBM ThinkPad R40e", {
  96. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  97. DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
  98. { set_max_cstate, "IBM ThinkPad R40e", {
  99. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  100. DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
  101. { set_max_cstate, "IBM ThinkPad R40e", {
  102. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  103. DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
  104. { set_max_cstate, "IBM ThinkPad R40e", {
  105. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  106. DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
  107. { set_max_cstate, "IBM ThinkPad R40e", {
  108. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  109. DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
  110. { set_max_cstate, "IBM ThinkPad R40e", {
  111. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  112. DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
  113. { set_max_cstate, "IBM ThinkPad R40e", {
  114. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  115. DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
  116. { set_max_cstate, "IBM ThinkPad R40e", {
  117. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  118. DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
  119. { set_max_cstate, "IBM ThinkPad R40e", {
  120. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  121. DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
  122. { set_max_cstate, "IBM ThinkPad R40e", {
  123. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  124. DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
  125. { set_max_cstate, "IBM ThinkPad R40e", {
  126. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  127. DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
  128. { set_max_cstate, "IBM ThinkPad R40e", {
  129. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  130. DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
  131. { set_max_cstate, "IBM ThinkPad R40e", {
  132. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  133. DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
  134. { set_max_cstate, "IBM ThinkPad R40e", {
  135. DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
  136. DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
  137. { set_max_cstate, "Medion 41700", {
  138. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  139. DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
  140. { set_max_cstate, "Clevo 5600D", {
  141. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  142. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  143. (void *)2},
  144. {},
  145. };
  146. static inline u32 ticks_elapsed(u32 t1, u32 t2)
  147. {
  148. if (t2 >= t1)
  149. return (t2 - t1);
  150. else if (!acpi_fadt.tmr_val_ext)
  151. return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
  152. else
  153. return ((0xFFFFFFFF - t1) + t2);
  154. }
  155. static void
  156. acpi_processor_power_activate(struct acpi_processor *pr,
  157. struct acpi_processor_cx *new)
  158. {
  159. struct acpi_processor_cx *old;
  160. if (!pr || !new)
  161. return;
  162. old = pr->power.state;
  163. if (old)
  164. old->promotion.count = 0;
  165. new->demotion.count = 0;
  166. /* Cleanup from old state. */
  167. if (old) {
  168. switch (old->type) {
  169. case ACPI_STATE_C3:
  170. /* Disable bus master reload */
  171. if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
  172. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
  173. ACPI_MTX_DO_NOT_LOCK);
  174. break;
  175. }
  176. }
  177. /* Prepare to use new state. */
  178. switch (new->type) {
  179. case ACPI_STATE_C3:
  180. /* Enable bus master reload */
  181. if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
  182. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1,
  183. ACPI_MTX_DO_NOT_LOCK);
  184. break;
  185. }
  186. pr->power.state = new;
  187. return;
  188. }
  189. static void acpi_safe_halt(void)
  190. {
  191. current_thread_info()->status &= ~TS_POLLING;
  192. smp_mb__after_clear_bit();
  193. if (!need_resched())
  194. safe_halt();
  195. current_thread_info()->status |= TS_POLLING;
  196. }
  197. static atomic_t c3_cpu_count;
  198. static void acpi_processor_idle(void)
  199. {
  200. struct acpi_processor *pr = NULL;
  201. struct acpi_processor_cx *cx = NULL;
  202. struct acpi_processor_cx *next_state = NULL;
  203. int sleep_ticks = 0;
  204. u32 t1, t2 = 0;
  205. pr = processors[smp_processor_id()];
  206. if (!pr)
  207. return;
  208. /*
  209. * Interrupts must be disabled during bus mastering calculations and
  210. * for C2/C3 transitions.
  211. */
  212. local_irq_disable();
  213. /*
  214. * Check whether we truly need to go idle, or should
  215. * reschedule:
  216. */
  217. if (unlikely(need_resched())) {
  218. local_irq_enable();
  219. return;
  220. }
  221. cx = pr->power.state;
  222. if (!cx) {
  223. if (pm_idle_save)
  224. pm_idle_save();
  225. else
  226. acpi_safe_halt();
  227. return;
  228. }
  229. /*
  230. * Check BM Activity
  231. * -----------------
  232. * Check for bus mastering activity (if required), record, and check
  233. * for demotion.
  234. */
  235. if (pr->flags.bm_check) {
  236. u32 bm_status = 0;
  237. unsigned long diff = jiffies - pr->power.bm_check_timestamp;
  238. if (diff > 32)
  239. diff = 32;
  240. while (diff) {
  241. /* if we didn't get called, assume there was busmaster activity */
  242. diff--;
  243. if (diff)
  244. pr->power.bm_activity |= 0x1;
  245. pr->power.bm_activity <<= 1;
  246. }
  247. acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
  248. &bm_status, ACPI_MTX_DO_NOT_LOCK);
  249. if (bm_status) {
  250. pr->power.bm_activity++;
  251. acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
  252. 1, ACPI_MTX_DO_NOT_LOCK);
  253. }
  254. /*
  255. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  256. * the true state of bus mastering activity; forcing us to
  257. * manually check the BMIDEA bit of each IDE channel.
  258. */
  259. else if (errata.piix4.bmisx) {
  260. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  261. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  262. pr->power.bm_activity++;
  263. }
  264. pr->power.bm_check_timestamp = jiffies;
  265. /*
  266. * Apply bus mastering demotion policy. Automatically demote
  267. * to avoid a faulty transition. Note that the processor
  268. * won't enter a low-power state during this call (to this
  269. * funciton) but should upon the next.
  270. *
  271. * TBD: A better policy might be to fallback to the demotion
  272. * state (use it for this quantum only) istead of
  273. * demoting -- and rely on duration as our sole demotion
  274. * qualification. This may, however, introduce DMA
  275. * issues (e.g. floppy DMA transfer overrun/underrun).
  276. */
  277. if (pr->power.bm_activity & cx->demotion.threshold.bm) {
  278. local_irq_enable();
  279. next_state = cx->demotion.state;
  280. goto end;
  281. }
  282. }
  283. #ifdef CONFIG_HOTPLUG_CPU
  284. /*
  285. * Check for P_LVL2_UP flag before entering C2 and above on
  286. * an SMP system. We do it here instead of doing it at _CST/P_LVL
  287. * detection phase, to work cleanly with logical CPU hotplug.
  288. */
  289. if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  290. !pr->flags.has_cst && !acpi_fadt.plvl2_up)
  291. cx = &pr->power.states[ACPI_STATE_C1];
  292. #endif
  293. cx->usage++;
  294. /*
  295. * Sleep:
  296. * ------
  297. * Invoke the current Cx state to put the processor to sleep.
  298. */
  299. if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
  300. current_thread_info()->status &= ~TS_POLLING;
  301. smp_mb__after_clear_bit();
  302. if (need_resched()) {
  303. current_thread_info()->status |= TS_POLLING;
  304. local_irq_enable();
  305. return;
  306. }
  307. }
  308. switch (cx->type) {
  309. case ACPI_STATE_C1:
  310. /*
  311. * Invoke C1.
  312. * Use the appropriate idle routine, the one that would
  313. * be used without acpi C-states.
  314. */
  315. if (pm_idle_save)
  316. pm_idle_save();
  317. else
  318. acpi_safe_halt();
  319. /*
  320. * TBD: Can't get time duration while in C1, as resumes
  321. * go to an ISR rather than here. Need to instrument
  322. * base interrupt handler.
  323. */
  324. sleep_ticks = 0xFFFFFFFF;
  325. break;
  326. case ACPI_STATE_C2:
  327. /* Get start time (ticks) */
  328. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  329. /* Invoke C2 */
  330. inb(cx->address);
  331. /* Dummy wait op - must do something useless after P_LVL2 read
  332. because chipsets cannot guarantee that STPCLK# signal
  333. gets asserted in time to freeze execution properly. */
  334. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  335. /* Get end time (ticks) */
  336. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  337. #ifdef CONFIG_GENERIC_TIME
  338. /* TSC halts in C2, so notify users */
  339. mark_tsc_unstable();
  340. #endif
  341. /* Re-enable interrupts */
  342. local_irq_enable();
  343. current_thread_info()->status |= TS_POLLING;
  344. /* Compute time (ticks) that we were actually asleep */
  345. sleep_ticks =
  346. ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
  347. break;
  348. case ACPI_STATE_C3:
  349. if (pr->flags.bm_check) {
  350. if (atomic_inc_return(&c3_cpu_count) ==
  351. num_online_cpus()) {
  352. /*
  353. * All CPUs are trying to go to C3
  354. * Disable bus master arbitration
  355. */
  356. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
  357. ACPI_MTX_DO_NOT_LOCK);
  358. }
  359. } else {
  360. /* SMP with no shared cache... Invalidate cache */
  361. ACPI_FLUSH_CPU_CACHE();
  362. }
  363. /* Get start time (ticks) */
  364. t1 = inl(acpi_fadt.xpm_tmr_blk.address);
  365. /* Invoke C3 */
  366. inb(cx->address);
  367. /* Dummy wait op (see above) */
  368. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  369. /* Get end time (ticks) */
  370. t2 = inl(acpi_fadt.xpm_tmr_blk.address);
  371. if (pr->flags.bm_check) {
  372. /* Enable bus master arbitration */
  373. atomic_dec(&c3_cpu_count);
  374. acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
  375. ACPI_MTX_DO_NOT_LOCK);
  376. }
  377. #ifdef CONFIG_GENERIC_TIME
  378. /* TSC halts in C3, so notify users */
  379. mark_tsc_unstable();
  380. #endif
  381. /* Re-enable interrupts */
  382. local_irq_enable();
  383. current_thread_info()->status |= TS_POLLING;
  384. /* Compute time (ticks) that we were actually asleep */
  385. sleep_ticks =
  386. ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
  387. break;
  388. default:
  389. local_irq_enable();
  390. return;
  391. }
  392. next_state = pr->power.state;
  393. #ifdef CONFIG_HOTPLUG_CPU
  394. /* Don't do promotion/demotion */
  395. if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
  396. !pr->flags.has_cst && !acpi_fadt.plvl2_up) {
  397. next_state = cx;
  398. goto end;
  399. }
  400. #endif
  401. /*
  402. * Promotion?
  403. * ----------
  404. * Track the number of longs (time asleep is greater than threshold)
  405. * and promote when the count threshold is reached. Note that bus
  406. * mastering activity may prevent promotions.
  407. * Do not promote above max_cstate.
  408. */
  409. if (cx->promotion.state &&
  410. ((cx->promotion.state - pr->power.states) <= max_cstate)) {
  411. if (sleep_ticks > cx->promotion.threshold.ticks) {
  412. cx->promotion.count++;
  413. cx->demotion.count = 0;
  414. if (cx->promotion.count >=
  415. cx->promotion.threshold.count) {
  416. if (pr->flags.bm_check) {
  417. if (!
  418. (pr->power.bm_activity & cx->
  419. promotion.threshold.bm)) {
  420. next_state =
  421. cx->promotion.state;
  422. goto end;
  423. }
  424. } else {
  425. next_state = cx->promotion.state;
  426. goto end;
  427. }
  428. }
  429. }
  430. }
  431. /*
  432. * Demotion?
  433. * ---------
  434. * Track the number of shorts (time asleep is less than time threshold)
  435. * and demote when the usage threshold is reached.
  436. */
  437. if (cx->demotion.state) {
  438. if (sleep_ticks < cx->demotion.threshold.ticks) {
  439. cx->demotion.count++;
  440. cx->promotion.count = 0;
  441. if (cx->demotion.count >= cx->demotion.threshold.count) {
  442. next_state = cx->demotion.state;
  443. goto end;
  444. }
  445. }
  446. }
  447. end:
  448. /*
  449. * Demote if current state exceeds max_cstate
  450. */
  451. if ((pr->power.state - pr->power.states) > max_cstate) {
  452. if (cx->demotion.state)
  453. next_state = cx->demotion.state;
  454. }
  455. /*
  456. * New Cx State?
  457. * -------------
  458. * If we're going to start using a new Cx state we must clean up
  459. * from the previous and prepare to use the new.
  460. */
  461. if (next_state != pr->power.state)
  462. acpi_processor_power_activate(pr, next_state);
  463. }
  464. static int acpi_processor_set_power_policy(struct acpi_processor *pr)
  465. {
  466. unsigned int i;
  467. unsigned int state_is_set = 0;
  468. struct acpi_processor_cx *lower = NULL;
  469. struct acpi_processor_cx *higher = NULL;
  470. struct acpi_processor_cx *cx;
  471. if (!pr)
  472. return -EINVAL;
  473. /*
  474. * This function sets the default Cx state policy (OS idle handler).
  475. * Our scheme is to promote quickly to C2 but more conservatively
  476. * to C3. We're favoring C2 for its characteristics of low latency
  477. * (quick response), good power savings, and ability to allow bus
  478. * mastering activity. Note that the Cx state policy is completely
  479. * customizable and can be altered dynamically.
  480. */
  481. /* startup state */
  482. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  483. cx = &pr->power.states[i];
  484. if (!cx->valid)
  485. continue;
  486. if (!state_is_set)
  487. pr->power.state = cx;
  488. state_is_set++;
  489. break;
  490. }
  491. if (!state_is_set)
  492. return -ENODEV;
  493. /* demotion */
  494. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  495. cx = &pr->power.states[i];
  496. if (!cx->valid)
  497. continue;
  498. if (lower) {
  499. cx->demotion.state = lower;
  500. cx->demotion.threshold.ticks = cx->latency_ticks;
  501. cx->demotion.threshold.count = 1;
  502. if (cx->type == ACPI_STATE_C3)
  503. cx->demotion.threshold.bm = bm_history;
  504. }
  505. lower = cx;
  506. }
  507. /* promotion */
  508. for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
  509. cx = &pr->power.states[i];
  510. if (!cx->valid)
  511. continue;
  512. if (higher) {
  513. cx->promotion.state = higher;
  514. cx->promotion.threshold.ticks = cx->latency_ticks;
  515. if (cx->type >= ACPI_STATE_C2)
  516. cx->promotion.threshold.count = 4;
  517. else
  518. cx->promotion.threshold.count = 10;
  519. if (higher->type == ACPI_STATE_C3)
  520. cx->promotion.threshold.bm = bm_history;
  521. }
  522. higher = cx;
  523. }
  524. return 0;
  525. }
  526. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  527. {
  528. if (!pr)
  529. return -EINVAL;
  530. if (!pr->pblk)
  531. return -ENODEV;
  532. /* if info is obtained from pblk/fadt, type equals state */
  533. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  534. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  535. #ifndef CONFIG_HOTPLUG_CPU
  536. /*
  537. * Check for P_LVL2_UP flag before entering C2 and above on
  538. * an SMP system.
  539. */
  540. if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
  541. return -ENODEV;
  542. #endif
  543. /* determine C2 and C3 address from pblk */
  544. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  545. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  546. /* determine latencies from FADT */
  547. pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
  548. pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
  549. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  550. "lvl2[0x%08x] lvl3[0x%08x]\n",
  551. pr->power.states[ACPI_STATE_C2].address,
  552. pr->power.states[ACPI_STATE_C3].address));
  553. return 0;
  554. }
  555. static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
  556. {
  557. /* Zero initialize all the C-states info. */
  558. memset(pr->power.states, 0, sizeof(pr->power.states));
  559. /* set the first C-State to C1 */
  560. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  561. /* the C0 state only exists as a filler in our array,
  562. * and all processors need to support C1 */
  563. pr->power.states[ACPI_STATE_C0].valid = 1;
  564. pr->power.states[ACPI_STATE_C1].valid = 1;
  565. return 0;
  566. }
  567. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  568. {
  569. acpi_status status = 0;
  570. acpi_integer count;
  571. int current_count;
  572. int i;
  573. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  574. union acpi_object *cst;
  575. if (nocst)
  576. return -ENODEV;
  577. current_count = 1;
  578. /* Zero initialize C2 onwards and prepare for fresh CST lookup */
  579. for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++)
  580. memset(&(pr->power.states[i]), 0,
  581. sizeof(struct acpi_processor_cx));
  582. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  583. if (ACPI_FAILURE(status)) {
  584. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  585. return -ENODEV;
  586. }
  587. cst = (union acpi_object *)buffer.pointer;
  588. /* There must be at least 2 elements */
  589. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  590. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  591. status = -EFAULT;
  592. goto end;
  593. }
  594. count = cst->package.elements[0].integer.value;
  595. /* Validate number of power states. */
  596. if (count < 1 || count != cst->package.count - 1) {
  597. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  598. status = -EFAULT;
  599. goto end;
  600. }
  601. /* Tell driver that at least _CST is supported. */
  602. pr->flags.has_cst = 1;
  603. for (i = 1; i <= count; i++) {
  604. union acpi_object *element;
  605. union acpi_object *obj;
  606. struct acpi_power_register *reg;
  607. struct acpi_processor_cx cx;
  608. memset(&cx, 0, sizeof(cx));
  609. element = (union acpi_object *)&(cst->package.elements[i]);
  610. if (element->type != ACPI_TYPE_PACKAGE)
  611. continue;
  612. if (element->package.count != 4)
  613. continue;
  614. obj = (union acpi_object *)&(element->package.elements[0]);
  615. if (obj->type != ACPI_TYPE_BUFFER)
  616. continue;
  617. reg = (struct acpi_power_register *)obj->buffer.pointer;
  618. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  619. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  620. continue;
  621. cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
  622. 0 : reg->address;
  623. /* There should be an easy way to extract an integer... */
  624. obj = (union acpi_object *)&(element->package.elements[1]);
  625. if (obj->type != ACPI_TYPE_INTEGER)
  626. continue;
  627. cx.type = obj->integer.value;
  628. if ((cx.type != ACPI_STATE_C1) &&
  629. (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
  630. continue;
  631. if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3))
  632. continue;
  633. obj = (union acpi_object *)&(element->package.elements[2]);
  634. if (obj->type != ACPI_TYPE_INTEGER)
  635. continue;
  636. cx.latency = obj->integer.value;
  637. obj = (union acpi_object *)&(element->package.elements[3]);
  638. if (obj->type != ACPI_TYPE_INTEGER)
  639. continue;
  640. cx.power = obj->integer.value;
  641. current_count++;
  642. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  643. /*
  644. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  645. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  646. */
  647. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  648. printk(KERN_WARNING
  649. "Limiting number of power states to max (%d)\n",
  650. ACPI_PROCESSOR_MAX_POWER);
  651. printk(KERN_WARNING
  652. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  653. break;
  654. }
  655. }
  656. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  657. current_count));
  658. /* Validate number of power states discovered */
  659. if (current_count < 2)
  660. status = -EFAULT;
  661. end:
  662. acpi_os_free(buffer.pointer);
  663. return status;
  664. }
  665. static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
  666. {
  667. if (!cx->address)
  668. return;
  669. /*
  670. * C2 latency must be less than or equal to 100
  671. * microseconds.
  672. */
  673. else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  674. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  675. "latency too large [%d]\n", cx->latency));
  676. return;
  677. }
  678. /*
  679. * Otherwise we've met all of our C2 requirements.
  680. * Normalize the C2 latency to expidite policy
  681. */
  682. cx->valid = 1;
  683. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  684. return;
  685. }
  686. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  687. struct acpi_processor_cx *cx)
  688. {
  689. static int bm_check_flag;
  690. if (!cx->address)
  691. return;
  692. /*
  693. * C3 latency must be less than or equal to 1000
  694. * microseconds.
  695. */
  696. else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  697. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  698. "latency too large [%d]\n", cx->latency));
  699. return;
  700. }
  701. /*
  702. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  703. * DMA transfers are used by any ISA device to avoid livelock.
  704. * Note that we could disable Type-F DMA (as recommended by
  705. * the erratum), but this is known to disrupt certain ISA
  706. * devices thus we take the conservative approach.
  707. */
  708. else if (errata.piix4.fdma) {
  709. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  710. "C3 not supported on PIIX4 with Type-F DMA\n"));
  711. return;
  712. }
  713. /* All the logic here assumes flags.bm_check is same across all CPUs */
  714. if (!bm_check_flag) {
  715. /* Determine whether bm_check is needed based on CPU */
  716. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  717. bm_check_flag = pr->flags.bm_check;
  718. } else {
  719. pr->flags.bm_check = bm_check_flag;
  720. }
  721. if (pr->flags.bm_check) {
  722. /* bus mastering control is necessary */
  723. if (!pr->flags.bm_control) {
  724. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  725. "C3 support requires bus mastering control\n"));
  726. return;
  727. }
  728. } else {
  729. /*
  730. * WBINVD should be set in fadt, for C3 state to be
  731. * supported on when bm_check is not required.
  732. */
  733. if (acpi_fadt.wb_invd != 1) {
  734. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  735. "Cache invalidation should work properly"
  736. " for C3 to be enabled on SMP systems\n"));
  737. return;
  738. }
  739. acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
  740. 0, ACPI_MTX_DO_NOT_LOCK);
  741. }
  742. /*
  743. * Otherwise we've met all of our C3 requirements.
  744. * Normalize the C3 latency to expidite policy. Enable
  745. * checking of bus mastering status (bm_check) so we can
  746. * use this in our C3 policy
  747. */
  748. cx->valid = 1;
  749. cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
  750. return;
  751. }
  752. static int acpi_processor_power_verify(struct acpi_processor *pr)
  753. {
  754. unsigned int i;
  755. unsigned int working = 0;
  756. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  757. int timer_broadcast = 0;
  758. cpumask_t mask = cpumask_of_cpu(pr->id);
  759. on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
  760. #endif
  761. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  762. struct acpi_processor_cx *cx = &pr->power.states[i];
  763. switch (cx->type) {
  764. case ACPI_STATE_C1:
  765. cx->valid = 1;
  766. break;
  767. case ACPI_STATE_C2:
  768. acpi_processor_power_verify_c2(cx);
  769. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  770. /* Some AMD systems fake C3 as C2, but still
  771. have timer troubles */
  772. if (cx->valid &&
  773. boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  774. timer_broadcast++;
  775. #endif
  776. break;
  777. case ACPI_STATE_C3:
  778. acpi_processor_power_verify_c3(pr, cx);
  779. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  780. if (cx->valid)
  781. timer_broadcast++;
  782. #endif
  783. break;
  784. }
  785. if (cx->valid)
  786. working++;
  787. }
  788. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  789. if (timer_broadcast)
  790. on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
  791. #endif
  792. return (working);
  793. }
  794. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  795. {
  796. unsigned int i;
  797. int result;
  798. /* NOTE: the idle thread may not be running while calling
  799. * this function */
  800. /* Adding C1 state */
  801. acpi_processor_get_power_info_default_c1(pr);
  802. result = acpi_processor_get_power_info_cst(pr);
  803. if (result == -ENODEV)
  804. acpi_processor_get_power_info_fadt(pr);
  805. pr->power.count = acpi_processor_power_verify(pr);
  806. /*
  807. * Set Default Policy
  808. * ------------------
  809. * Now that we know which states are supported, set the default
  810. * policy. Note that this policy can be changed dynamically
  811. * (e.g. encourage deeper sleeps to conserve battery life when
  812. * not on AC).
  813. */
  814. result = acpi_processor_set_power_policy(pr);
  815. if (result)
  816. return result;
  817. /*
  818. * if one state of type C2 or C3 is available, mark this
  819. * CPU as being "idle manageable"
  820. */
  821. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  822. if (pr->power.states[i].valid) {
  823. pr->power.count = i;
  824. if (pr->power.states[i].type >= ACPI_STATE_C2)
  825. pr->flags.power = 1;
  826. }
  827. }
  828. return 0;
  829. }
  830. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  831. {
  832. int result = 0;
  833. if (!pr)
  834. return -EINVAL;
  835. if (nocst) {
  836. return -ENODEV;
  837. }
  838. if (!pr->flags.power_setup_done)
  839. return -ENODEV;
  840. /* Fall back to the default idle loop */
  841. pm_idle = pm_idle_save;
  842. synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
  843. pr->flags.power = 0;
  844. result = acpi_processor_get_power_info(pr);
  845. if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
  846. pm_idle = acpi_processor_idle;
  847. return result;
  848. }
  849. /* proc interface */
  850. static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
  851. {
  852. struct acpi_processor *pr = (struct acpi_processor *)seq->private;
  853. unsigned int i;
  854. if (!pr)
  855. goto end;
  856. seq_printf(seq, "active state: C%zd\n"
  857. "max_cstate: C%d\n"
  858. "bus master activity: %08x\n",
  859. pr->power.state ? pr->power.state - pr->power.states : 0,
  860. max_cstate, (unsigned)pr->power.bm_activity);
  861. seq_puts(seq, "states:\n");
  862. for (i = 1; i <= pr->power.count; i++) {
  863. seq_printf(seq, " %cC%d: ",
  864. (&pr->power.states[i] ==
  865. pr->power.state ? '*' : ' '), i);
  866. if (!pr->power.states[i].valid) {
  867. seq_puts(seq, "<not supported>\n");
  868. continue;
  869. }
  870. switch (pr->power.states[i].type) {
  871. case ACPI_STATE_C1:
  872. seq_printf(seq, "type[C1] ");
  873. break;
  874. case ACPI_STATE_C2:
  875. seq_printf(seq, "type[C2] ");
  876. break;
  877. case ACPI_STATE_C3:
  878. seq_printf(seq, "type[C3] ");
  879. break;
  880. default:
  881. seq_printf(seq, "type[--] ");
  882. break;
  883. }
  884. if (pr->power.states[i].promotion.state)
  885. seq_printf(seq, "promotion[C%zd] ",
  886. (pr->power.states[i].promotion.state -
  887. pr->power.states));
  888. else
  889. seq_puts(seq, "promotion[--] ");
  890. if (pr->power.states[i].demotion.state)
  891. seq_printf(seq, "demotion[C%zd] ",
  892. (pr->power.states[i].demotion.state -
  893. pr->power.states));
  894. else
  895. seq_puts(seq, "demotion[--] ");
  896. seq_printf(seq, "latency[%03d] usage[%08d]\n",
  897. pr->power.states[i].latency,
  898. pr->power.states[i].usage);
  899. }
  900. end:
  901. return 0;
  902. }
  903. static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
  904. {
  905. return single_open(file, acpi_processor_power_seq_show,
  906. PDE(inode)->data);
  907. }
  908. static struct file_operations acpi_processor_power_fops = {
  909. .open = acpi_processor_power_open_fs,
  910. .read = seq_read,
  911. .llseek = seq_lseek,
  912. .release = single_release,
  913. };
  914. int acpi_processor_power_init(struct acpi_processor *pr,
  915. struct acpi_device *device)
  916. {
  917. acpi_status status = 0;
  918. static int first_run;
  919. struct proc_dir_entry *entry = NULL;
  920. unsigned int i;
  921. if (!first_run) {
  922. dmi_check_system(processor_power_dmi_table);
  923. if (max_cstate < ACPI_C_STATES_MAX)
  924. printk(KERN_NOTICE
  925. "ACPI: processor limited to max C-state %d\n",
  926. max_cstate);
  927. first_run++;
  928. }
  929. if (!pr)
  930. return -EINVAL;
  931. if (acpi_fadt.cst_cnt && !nocst) {
  932. status =
  933. acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
  934. if (ACPI_FAILURE(status)) {
  935. ACPI_EXCEPTION((AE_INFO, status,
  936. "Notifying BIOS of _CST ability failed"));
  937. }
  938. }
  939. acpi_processor_get_power_info(pr);
  940. /*
  941. * Install the idle handler if processor power management is supported.
  942. * Note that we use previously set idle handler will be used on
  943. * platforms that only support C1.
  944. */
  945. if ((pr->flags.power) && (!boot_option_idle_override)) {
  946. printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
  947. for (i = 1; i <= pr->power.count; i++)
  948. if (pr->power.states[i].valid)
  949. printk(" C%d[C%d]", i,
  950. pr->power.states[i].type);
  951. printk(")\n");
  952. if (pr->id == 0) {
  953. pm_idle_save = pm_idle;
  954. pm_idle = acpi_processor_idle;
  955. }
  956. }
  957. /* 'power' [R] */
  958. entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  959. S_IRUGO, acpi_device_dir(device));
  960. if (!entry)
  961. return -EIO;
  962. else {
  963. entry->proc_fops = &acpi_processor_power_fops;
  964. entry->data = acpi_driver_data(device);
  965. entry->owner = THIS_MODULE;
  966. }
  967. pr->flags.power_setup_done = 1;
  968. return 0;
  969. }
  970. int acpi_processor_power_exit(struct acpi_processor *pr,
  971. struct acpi_device *device)
  972. {
  973. pr->flags.power_setup_done = 0;
  974. if (acpi_device_dir(device))
  975. remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
  976. acpi_device_dir(device));
  977. /* Unregister the idle handler when processor #0 is removed. */
  978. if (pr->id == 0) {
  979. pm_idle = pm_idle_save;
  980. /*
  981. * We are about to unload the current idle thread pm callback
  982. * (pm_idle), Wait for all processors to update cached/local
  983. * copies of pm_idle before proceeding.
  984. */
  985. cpu_idle_wait();
  986. }
  987. return 0;
  988. }