powernow-k8.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175
  1. /*
  2. * (c) 2003, 2004, 2005 Advanced Micro Devices, Inc.
  3. * Your use of this code is subject to the terms and conditions of the
  4. * GNU general public license version 2. See "COPYING" or
  5. * http://www.gnu.org/licenses/gpl.html
  6. *
  7. * Support : mark.langsdorf@amd.com
  8. *
  9. * Based on the powernow-k7.c module written by Dave Jones.
  10. * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
  11. * (C) 2004 Dominik Brodowski <linux@brodo.de>
  12. * (C) 2004 Pavel Machek <pavel@suse.cz>
  13. * Licensed under the terms of the GNU GPL License version 2.
  14. * Based upon datasheets & sample CPUs kindly provided by AMD.
  15. *
  16. * Valuable input gratefully received from Dave Jones, Pavel Machek,
  17. * Dominik Brodowski, and others.
  18. * Originally developed by Paul Devriendt.
  19. * Processor information obtained from Chapter 9 (Power and Thermal Management)
  20. * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
  21. * Opteron Processors" available for download from www.amd.com
  22. *
  23. * Tables for specific CPUs can be infrerred from
  24. * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/smp.h>
  28. #include <linux/module.h>
  29. #include <linux/init.h>
  30. #include <linux/cpufreq.h>
  31. #include <linux/slab.h>
  32. #include <linux/string.h>
  33. #include <linux/cpumask.h>
  34. #include <asm/msr.h>
  35. #include <asm/io.h>
  36. #include <asm/delay.h>
  37. #ifdef CONFIG_X86_POWERNOW_K8_ACPI
  38. #include <linux/acpi.h>
  39. #include <acpi/processor.h>
  40. #endif
  41. #define PFX "powernow-k8: "
  42. #define BFX PFX "BIOS error: "
  43. #define VERSION "version 1.50.3"
  44. #include "powernow-k8.h"
  45. /* serialize freq changes */
  46. static DECLARE_MUTEX(fidvid_sem);
  47. static struct powernow_k8_data *powernow_data[NR_CPUS];
  48. #ifndef CONFIG_SMP
  49. static cpumask_t cpu_core_map[1];
  50. #endif
  51. /* Return a frequency in MHz, given an input fid */
  52. static u32 find_freq_from_fid(u32 fid)
  53. {
  54. return 800 + (fid * 100);
  55. }
  56. /* Return a frequency in KHz, given an input fid */
  57. static u32 find_khz_freq_from_fid(u32 fid)
  58. {
  59. return 1000 * find_freq_from_fid(fid);
  60. }
  61. /* Return a voltage in miliVolts, given an input vid */
  62. static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
  63. {
  64. return 1550-vid*25;
  65. }
  66. /* Return the vco fid for an input fid
  67. *
  68. * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
  69. * only from corresponding high fids. This returns "high" fid corresponding to
  70. * "low" one.
  71. */
  72. static u32 convert_fid_to_vco_fid(u32 fid)
  73. {
  74. if (fid < HI_FID_TABLE_BOTTOM) {
  75. return 8 + (2 * fid);
  76. } else {
  77. return fid;
  78. }
  79. }
  80. /*
  81. * Return 1 if the pending bit is set. Unless we just instructed the processor
  82. * to transition to a new state, seeing this bit set is really bad news.
  83. */
  84. static int pending_bit_stuck(void)
  85. {
  86. u32 lo, hi;
  87. rdmsr(MSR_FIDVID_STATUS, lo, hi);
  88. return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
  89. }
  90. /*
  91. * Update the global current fid / vid values from the status msr.
  92. * Returns 1 on error.
  93. */
  94. static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
  95. {
  96. u32 lo, hi;
  97. u32 i = 0;
  98. do {
  99. if (i++ > 0x1000000) {
  100. printk(KERN_ERR PFX "detected change pending stuck\n");
  101. return 1;
  102. }
  103. rdmsr(MSR_FIDVID_STATUS, lo, hi);
  104. } while (lo & MSR_S_LO_CHANGE_PENDING);
  105. data->currvid = hi & MSR_S_HI_CURRENT_VID;
  106. data->currfid = lo & MSR_S_LO_CURRENT_FID;
  107. return 0;
  108. }
  109. /* the isochronous relief time */
  110. static void count_off_irt(struct powernow_k8_data *data)
  111. {
  112. udelay((1 << data->irt) * 10);
  113. return;
  114. }
  115. /* the voltage stabalization time */
  116. static void count_off_vst(struct powernow_k8_data *data)
  117. {
  118. udelay(data->vstable * VST_UNITS_20US);
  119. return;
  120. }
  121. /* need to init the control msr to a safe value (for each cpu) */
  122. static void fidvid_msr_init(void)
  123. {
  124. u32 lo, hi;
  125. u8 fid, vid;
  126. rdmsr(MSR_FIDVID_STATUS, lo, hi);
  127. vid = hi & MSR_S_HI_CURRENT_VID;
  128. fid = lo & MSR_S_LO_CURRENT_FID;
  129. lo = fid | (vid << MSR_C_LO_VID_SHIFT);
  130. hi = MSR_C_HI_STP_GNT_BENIGN;
  131. dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
  132. wrmsr(MSR_FIDVID_CTL, lo, hi);
  133. }
  134. /* write the new fid value along with the other control fields to the msr */
  135. static int write_new_fid(struct powernow_k8_data *data, u32 fid)
  136. {
  137. u32 lo;
  138. u32 savevid = data->currvid;
  139. if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
  140. printk(KERN_ERR PFX "internal error - overflow on fid write\n");
  141. return 1;
  142. }
  143. lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
  144. dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
  145. fid, lo, data->plllock * PLL_LOCK_CONVERSION);
  146. wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
  147. if (query_current_values_with_pending_wait(data))
  148. return 1;
  149. count_off_irt(data);
  150. if (savevid != data->currvid) {
  151. printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n",
  152. savevid, data->currvid);
  153. return 1;
  154. }
  155. if (fid != data->currfid) {
  156. printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
  157. data->currfid);
  158. return 1;
  159. }
  160. return 0;
  161. }
  162. /* Write a new vid to the hardware */
  163. static int write_new_vid(struct powernow_k8_data *data, u32 vid)
  164. {
  165. u32 lo;
  166. u32 savefid = data->currfid;
  167. if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
  168. printk(KERN_ERR PFX "internal error - overflow on vid write\n");
  169. return 1;
  170. }
  171. lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
  172. dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
  173. vid, lo, STOP_GRANT_5NS);
  174. wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
  175. if (query_current_values_with_pending_wait(data))
  176. return 1;
  177. if (savefid != data->currfid) {
  178. printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
  179. savefid, data->currfid);
  180. return 1;
  181. }
  182. if (vid != data->currvid) {
  183. printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid,
  184. data->currvid);
  185. return 1;
  186. }
  187. return 0;
  188. }
  189. /*
  190. * Reduce the vid by the max of step or reqvid.
  191. * Decreasing vid codes represent increasing voltages:
  192. * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
  193. */
  194. static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
  195. {
  196. if ((data->currvid - reqvid) > step)
  197. reqvid = data->currvid - step;
  198. if (write_new_vid(data, reqvid))
  199. return 1;
  200. count_off_vst(data);
  201. return 0;
  202. }
  203. /* Change the fid and vid, by the 3 phases. */
  204. static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid)
  205. {
  206. if (core_voltage_pre_transition(data, reqvid))
  207. return 1;
  208. if (core_frequency_transition(data, reqfid))
  209. return 1;
  210. if (core_voltage_post_transition(data, reqvid))
  211. return 1;
  212. if (query_current_values_with_pending_wait(data))
  213. return 1;
  214. if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
  215. printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
  216. smp_processor_id(),
  217. reqfid, reqvid, data->currfid, data->currvid);
  218. return 1;
  219. }
  220. dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
  221. smp_processor_id(), data->currfid, data->currvid);
  222. return 0;
  223. }
  224. /* Phase 1 - core voltage transition ... setup voltage */
  225. static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid)
  226. {
  227. u32 rvosteps = data->rvo;
  228. u32 savefid = data->currfid;
  229. u32 maxvid, lo;
  230. dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
  231. smp_processor_id(),
  232. data->currfid, data->currvid, reqvid, data->rvo);
  233. rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
  234. maxvid = 0x1f & (maxvid >> 16);
  235. dprintk("ph1 maxvid=0x%x\n", maxvid);
  236. if (reqvid < maxvid) /* lower numbers are higher voltages */
  237. reqvid = maxvid;
  238. while (data->currvid > reqvid) {
  239. dprintk("ph1: curr 0x%x, req vid 0x%x\n",
  240. data->currvid, reqvid);
  241. if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
  242. return 1;
  243. }
  244. while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
  245. if (data->currvid == maxvid) {
  246. rvosteps = 0;
  247. } else {
  248. dprintk("ph1: changing vid for rvo, req 0x%x\n",
  249. data->currvid - 1);
  250. if (decrease_vid_code_by_step(data, data->currvid - 1, 1))
  251. return 1;
  252. rvosteps--;
  253. }
  254. }
  255. if (query_current_values_with_pending_wait(data))
  256. return 1;
  257. if (savefid != data->currfid) {
  258. printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid);
  259. return 1;
  260. }
  261. dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n",
  262. data->currfid, data->currvid);
  263. return 0;
  264. }
  265. /* Phase 2 - core frequency transition */
  266. static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
  267. {
  268. u32 vcoreqfid, vcocurrfid, vcofiddiff, savevid = data->currvid;
  269. if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
  270. printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
  271. reqfid, data->currfid);
  272. return 1;
  273. }
  274. if (data->currfid == reqfid) {
  275. printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid);
  276. return 0;
  277. }
  278. dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
  279. smp_processor_id(),
  280. data->currfid, data->currvid, reqfid);
  281. vcoreqfid = convert_fid_to_vco_fid(reqfid);
  282. vcocurrfid = convert_fid_to_vco_fid(data->currfid);
  283. vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
  284. : vcoreqfid - vcocurrfid;
  285. while (vcofiddiff > 2) {
  286. if (reqfid > data->currfid) {
  287. if (data->currfid > LO_FID_TABLE_TOP) {
  288. if (write_new_fid(data, data->currfid + 2)) {
  289. return 1;
  290. }
  291. } else {
  292. if (write_new_fid
  293. (data, 2 + convert_fid_to_vco_fid(data->currfid))) {
  294. return 1;
  295. }
  296. }
  297. } else {
  298. if (write_new_fid(data, data->currfid - 2))
  299. return 1;
  300. }
  301. vcocurrfid = convert_fid_to_vco_fid(data->currfid);
  302. vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
  303. : vcoreqfid - vcocurrfid;
  304. }
  305. if (write_new_fid(data, reqfid))
  306. return 1;
  307. if (query_current_values_with_pending_wait(data))
  308. return 1;
  309. if (data->currfid != reqfid) {
  310. printk(KERN_ERR PFX
  311. "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
  312. data->currfid, reqfid);
  313. return 1;
  314. }
  315. if (savevid != data->currvid) {
  316. printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
  317. savevid, data->currvid);
  318. return 1;
  319. }
  320. dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n",
  321. data->currfid, data->currvid);
  322. return 0;
  323. }
  324. /* Phase 3 - core voltage transition flow ... jump to the final vid. */
  325. static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid)
  326. {
  327. u32 savefid = data->currfid;
  328. u32 savereqvid = reqvid;
  329. dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
  330. smp_processor_id(),
  331. data->currfid, data->currvid);
  332. if (reqvid != data->currvid) {
  333. if (write_new_vid(data, reqvid))
  334. return 1;
  335. if (savefid != data->currfid) {
  336. printk(KERN_ERR PFX
  337. "ph3: bad fid change, save 0x%x, curr 0x%x\n",
  338. savefid, data->currfid);
  339. return 1;
  340. }
  341. if (data->currvid != reqvid) {
  342. printk(KERN_ERR PFX
  343. "ph3: failed vid transition\n, req 0x%x, curr 0x%x",
  344. reqvid, data->currvid);
  345. return 1;
  346. }
  347. }
  348. if (query_current_values_with_pending_wait(data))
  349. return 1;
  350. if (savereqvid != data->currvid) {
  351. dprintk("ph3 failed, currvid 0x%x\n", data->currvid);
  352. return 1;
  353. }
  354. if (savefid != data->currfid) {
  355. dprintk("ph3 failed, currfid changed 0x%x\n",
  356. data->currfid);
  357. return 1;
  358. }
  359. dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n",
  360. data->currfid, data->currvid);
  361. return 0;
  362. }
  363. static int check_supported_cpu(unsigned int cpu)
  364. {
  365. cpumask_t oldmask = CPU_MASK_ALL;
  366. u32 eax, ebx, ecx, edx;
  367. unsigned int rc = 0;
  368. oldmask = current->cpus_allowed;
  369. set_cpus_allowed(current, cpumask_of_cpu(cpu));
  370. schedule();
  371. if (smp_processor_id() != cpu) {
  372. printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
  373. goto out;
  374. }
  375. if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
  376. goto out;
  377. eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
  378. if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
  379. ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
  380. ((eax & CPUID_XMOD) > CPUID_XMOD_REV_F)) {
  381. printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
  382. goto out;
  383. }
  384. eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
  385. if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
  386. printk(KERN_INFO PFX
  387. "No frequency change capabilities detected\n");
  388. goto out;
  389. }
  390. cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
  391. if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) {
  392. printk(KERN_INFO PFX "Power state transitions not supported\n");
  393. goto out;
  394. }
  395. rc = 1;
  396. out:
  397. set_cpus_allowed(current, oldmask);
  398. schedule();
  399. return rc;
  400. }
  401. static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
  402. {
  403. unsigned int j;
  404. u8 lastfid = 0xff;
  405. for (j = 0; j < data->numps; j++) {
  406. if (pst[j].vid > LEAST_VID) {
  407. printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid);
  408. return -EINVAL;
  409. }
  410. if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */
  411. printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j);
  412. return -ENODEV;
  413. }
  414. if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */
  415. printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j);
  416. return -ENODEV;
  417. }
  418. if ((pst[j].fid > MAX_FID)
  419. || (pst[j].fid & 1)
  420. || (j && (pst[j].fid < HI_FID_TABLE_BOTTOM))) {
  421. /* Only first fid is allowed to be in "low" range */
  422. printk(KERN_ERR PFX "two low fids - %d : 0x%x\n", j, pst[j].fid);
  423. return -EINVAL;
  424. }
  425. if (pst[j].fid < lastfid)
  426. lastfid = pst[j].fid;
  427. }
  428. if (lastfid & 1) {
  429. printk(KERN_ERR PFX "lastfid invalid\n");
  430. return -EINVAL;
  431. }
  432. if (lastfid > LO_FID_TABLE_TOP)
  433. printk(KERN_INFO PFX "first fid not from lo freq table\n");
  434. return 0;
  435. }
  436. static void print_basics(struct powernow_k8_data *data)
  437. {
  438. int j;
  439. for (j = 0; j < data->numps; j++) {
  440. if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID)
  441. printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x (%d mV)\n", j,
  442. data->powernow_table[j].index & 0xff,
  443. data->powernow_table[j].frequency/1000,
  444. data->powernow_table[j].index >> 8,
  445. find_millivolts_from_vid(data, data->powernow_table[j].index >> 8));
  446. }
  447. if (data->batps)
  448. printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps);
  449. }
  450. static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
  451. {
  452. struct cpufreq_frequency_table *powernow_table;
  453. unsigned int j;
  454. if (data->batps) { /* use ACPI support to get full speed on mains power */
  455. printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps);
  456. data->numps = data->batps;
  457. }
  458. for ( j=1; j<data->numps; j++ ) {
  459. if (pst[j-1].fid >= pst[j].fid) {
  460. printk(KERN_ERR PFX "PST out of sequence\n");
  461. return -EINVAL;
  462. }
  463. }
  464. if (data->numps < 2) {
  465. printk(KERN_ERR PFX "no p states to transition\n");
  466. return -ENODEV;
  467. }
  468. if (check_pst_table(data, pst, maxvid))
  469. return -EINVAL;
  470. powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
  471. * (data->numps + 1)), GFP_KERNEL);
  472. if (!powernow_table) {
  473. printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
  474. return -ENOMEM;
  475. }
  476. for (j = 0; j < data->numps; j++) {
  477. powernow_table[j].index = pst[j].fid; /* lower 8 bits */
  478. powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
  479. powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid);
  480. }
  481. powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
  482. powernow_table[data->numps].index = 0;
  483. if (query_current_values_with_pending_wait(data)) {
  484. kfree(powernow_table);
  485. return -EIO;
  486. }
  487. dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
  488. data->powernow_table = powernow_table;
  489. print_basics(data);
  490. for (j = 0; j < data->numps; j++)
  491. if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
  492. return 0;
  493. dprintk("currfid/vid do not match PST, ignoring\n");
  494. return 0;
  495. }
  496. /* Find and validate the PSB/PST table in BIOS. */
  497. static int find_psb_table(struct powernow_k8_data *data)
  498. {
  499. struct psb_s *psb;
  500. unsigned int i;
  501. u32 mvs;
  502. u8 maxvid;
  503. u32 cpst = 0;
  504. u32 thiscpuid;
  505. for (i = 0xc0000; i < 0xffff0; i += 0x10) {
  506. /* Scan BIOS looking for the signature. */
  507. /* It can not be at ffff0 - it is too big. */
  508. psb = phys_to_virt(i);
  509. if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
  510. continue;
  511. dprintk("found PSB header at 0x%p\n", psb);
  512. dprintk("table vers: 0x%x\n", psb->tableversion);
  513. if (psb->tableversion != PSB_VERSION_1_4) {
  514. printk(KERN_INFO BFX "PSB table is not v1.4\n");
  515. return -ENODEV;
  516. }
  517. dprintk("flags: 0x%x\n", psb->flags1);
  518. if (psb->flags1) {
  519. printk(KERN_ERR BFX "unknown flags\n");
  520. return -ENODEV;
  521. }
  522. data->vstable = psb->vstable;
  523. dprintk("voltage stabilization time: %d(*20us)\n", data->vstable);
  524. dprintk("flags2: 0x%x\n", psb->flags2);
  525. data->rvo = psb->flags2 & 3;
  526. data->irt = ((psb->flags2) >> 2) & 3;
  527. mvs = ((psb->flags2) >> 4) & 3;
  528. data->vidmvs = 1 << mvs;
  529. data->batps = ((psb->flags2) >> 6) & 3;
  530. dprintk("ramp voltage offset: %d\n", data->rvo);
  531. dprintk("isochronous relief time: %d\n", data->irt);
  532. dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
  533. dprintk("numpst: 0x%x\n", psb->num_tables);
  534. cpst = psb->num_tables;
  535. if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){
  536. thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
  537. if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) {
  538. cpst = 1;
  539. }
  540. }
  541. if (cpst != 1) {
  542. printk(KERN_ERR BFX "numpst must be 1\n");
  543. return -ENODEV;
  544. }
  545. data->plllock = psb->plllocktime;
  546. dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
  547. dprintk("maxfid: 0x%x\n", psb->maxfid);
  548. dprintk("maxvid: 0x%x\n", psb->maxvid);
  549. maxvid = psb->maxvid;
  550. data->numps = psb->numps;
  551. dprintk("numpstates: 0x%x\n", data->numps);
  552. return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
  553. }
  554. /*
  555. * If you see this message, complain to BIOS manufacturer. If
  556. * he tells you "we do not support Linux" or some similar
  557. * nonsense, remember that Windows 2000 uses the same legacy
  558. * mechanism that the old Linux PSB driver uses. Tell them it
  559. * is broken with Windows 2000.
  560. *
  561. * The reference to the AMD documentation is chapter 9 in the
  562. * BIOS and Kernel Developer's Guide, which is available on
  563. * www.amd.com
  564. */
  565. printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n");
  566. return -ENODEV;
  567. }
  568. #ifdef CONFIG_X86_POWERNOW_K8_ACPI
  569. static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
  570. {
  571. if (!data->acpi_data.state_count)
  572. return;
  573. data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
  574. data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
  575. data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
  576. data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
  577. data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
  578. data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
  579. }
  580. static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
  581. {
  582. int i;
  583. int cntlofreq = 0;
  584. struct cpufreq_frequency_table *powernow_table;
  585. if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
  586. dprintk("register performance failed: bad ACPI data\n");
  587. return -EIO;
  588. }
  589. /* verify the data contained in the ACPI structures */
  590. if (data->acpi_data.state_count <= 1) {
  591. dprintk("No ACPI P-States\n");
  592. goto err_out;
  593. }
  594. if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
  595. (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
  596. dprintk("Invalid control/status registers (%x - %x)\n",
  597. data->acpi_data.control_register.space_id,
  598. data->acpi_data.status_register.space_id);
  599. goto err_out;
  600. }
  601. /* fill in data->powernow_table */
  602. powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
  603. * (data->acpi_data.state_count + 1)), GFP_KERNEL);
  604. if (!powernow_table) {
  605. dprintk("powernow_table memory alloc failure\n");
  606. goto err_out;
  607. }
  608. for (i = 0; i < data->acpi_data.state_count; i++) {
  609. u32 fid;
  610. u32 vid;
  611. if (data->exttype) {
  612. fid = data->acpi_data.states[i].status & FID_MASK;
  613. vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK;
  614. } else {
  615. fid = data->acpi_data.states[i].control & FID_MASK;
  616. vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
  617. }
  618. dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
  619. powernow_table[i].index = fid; /* lower 8 bits */
  620. powernow_table[i].index |= (vid << 8); /* upper 8 bits */
  621. powernow_table[i].frequency = find_khz_freq_from_fid(fid);
  622. /* verify frequency is OK */
  623. if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) ||
  624. (powernow_table[i].frequency < (MIN_FREQ * 1000))) {
  625. dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency);
  626. powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
  627. continue;
  628. }
  629. /* verify voltage is OK - BIOSs are using "off" to indicate invalid */
  630. if (vid == VID_OFF) {
  631. dprintk("invalid vid %u, ignoring\n", vid);
  632. powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
  633. continue;
  634. }
  635. /* verify only 1 entry from the lo frequency table */
  636. if (fid < HI_FID_TABLE_BOTTOM) {
  637. if (cntlofreq) {
  638. /* if both entries are the same, ignore this
  639. * one...
  640. */
  641. if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
  642. (powernow_table[i].index != powernow_table[cntlofreq].index)) {
  643. printk(KERN_ERR PFX "Too many lo freq table entries\n");
  644. goto err_out_mem;
  645. }
  646. dprintk("double low frequency table entry, ignoring it.\n");
  647. powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
  648. continue;
  649. } else
  650. cntlofreq = i;
  651. }
  652. if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
  653. printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
  654. powernow_table[i].frequency,
  655. (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
  656. powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
  657. continue;
  658. }
  659. }
  660. powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
  661. powernow_table[data->acpi_data.state_count].index = 0;
  662. data->powernow_table = powernow_table;
  663. /* fill in data */
  664. data->numps = data->acpi_data.state_count;
  665. print_basics(data);
  666. powernow_k8_acpi_pst_values(data, 0);
  667. /* notify BIOS that we exist */
  668. acpi_processor_notify_smm(THIS_MODULE);
  669. return 0;
  670. err_out_mem:
  671. kfree(powernow_table);
  672. err_out:
  673. acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
  674. /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
  675. data->acpi_data.state_count = 0;
  676. return -ENODEV;
  677. }
  678. static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
  679. {
  680. if (data->acpi_data.state_count)
  681. acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
  682. }
  683. #else
  684. static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
  685. static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
  686. static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
  687. #endif /* CONFIG_X86_POWERNOW_K8_ACPI */
  688. /* Take a frequency, and issue the fid/vid transition command */
  689. static int transition_frequency(struct powernow_k8_data *data, unsigned int index)
  690. {
  691. u32 fid;
  692. u32 vid;
  693. int res, i;
  694. struct cpufreq_freqs freqs;
  695. dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
  696. /* fid are the lower 8 bits of the index we stored into
  697. * the cpufreq frequency table in find_psb_table, vid are
  698. * the upper 8 bits.
  699. */
  700. fid = data->powernow_table[index].index & 0xFF;
  701. vid = (data->powernow_table[index].index & 0xFF00) >> 8;
  702. dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
  703. if (query_current_values_with_pending_wait(data))
  704. return 1;
  705. if ((data->currvid == vid) && (data->currfid == fid)) {
  706. dprintk("target matches current values (fid 0x%x, vid 0x%x)\n",
  707. fid, vid);
  708. return 0;
  709. }
  710. if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
  711. printk(KERN_ERR PFX
  712. "ignoring illegal change in lo freq table-%x to 0x%x\n",
  713. data->currfid, fid);
  714. return 1;
  715. }
  716. dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n",
  717. smp_processor_id(), fid, vid);
  718. freqs.cpu = data->cpu;
  719. freqs.old = find_khz_freq_from_fid(data->currfid);
  720. freqs.new = find_khz_freq_from_fid(fid);
  721. for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
  722. freqs.cpu = i;
  723. cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
  724. }
  725. res = transition_fid_vid(data, fid, vid);
  726. freqs.new = find_khz_freq_from_fid(data->currfid);
  727. for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
  728. freqs.cpu = i;
  729. cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
  730. }
  731. return res;
  732. }
  733. /* Driver entry point to switch to the target frequency */
  734. static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
  735. {
  736. cpumask_t oldmask = CPU_MASK_ALL;
  737. struct powernow_k8_data *data = powernow_data[pol->cpu];
  738. u32 checkfid = data->currfid;
  739. u32 checkvid = data->currvid;
  740. unsigned int newstate;
  741. int ret = -EIO;
  742. int i;
  743. /* only run on specific CPU from here on */
  744. oldmask = current->cpus_allowed;
  745. set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
  746. schedule();
  747. if (smp_processor_id() != pol->cpu) {
  748. printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
  749. goto err_out;
  750. }
  751. if (pending_bit_stuck()) {
  752. printk(KERN_ERR PFX "failing targ, change pending bit set\n");
  753. goto err_out;
  754. }
  755. dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
  756. pol->cpu, targfreq, pol->min, pol->max, relation);
  757. if (query_current_values_with_pending_wait(data)) {
  758. ret = -EIO;
  759. goto err_out;
  760. }
  761. dprintk("targ: curr fid 0x%x, vid 0x%x\n",
  762. data->currfid, data->currvid);
  763. if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
  764. printk(KERN_INFO PFX
  765. "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
  766. checkfid, data->currfid, checkvid, data->currvid);
  767. }
  768. if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
  769. goto err_out;
  770. down(&fidvid_sem);
  771. powernow_k8_acpi_pst_values(data, newstate);
  772. if (transition_frequency(data, newstate)) {
  773. printk(KERN_ERR PFX "transition frequency failed\n");
  774. ret = 1;
  775. up(&fidvid_sem);
  776. goto err_out;
  777. }
  778. /* Update all the fid/vids of our siblings */
  779. for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
  780. powernow_data[i]->currvid = data->currvid;
  781. powernow_data[i]->currfid = data->currfid;
  782. }
  783. up(&fidvid_sem);
  784. pol->cur = find_khz_freq_from_fid(data->currfid);
  785. ret = 0;
  786. err_out:
  787. set_cpus_allowed(current, oldmask);
  788. schedule();
  789. return ret;
  790. }
  791. /* Driver entry point to verify the policy and range of frequencies */
  792. static int powernowk8_verify(struct cpufreq_policy *pol)
  793. {
  794. struct powernow_k8_data *data = powernow_data[pol->cpu];
  795. return cpufreq_frequency_table_verify(pol, data->powernow_table);
  796. }
  797. /* per CPU init entry point to the driver */
  798. static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
  799. {
  800. struct powernow_k8_data *data;
  801. cpumask_t oldmask = CPU_MASK_ALL;
  802. int rc, i;
  803. if (!check_supported_cpu(pol->cpu))
  804. return -ENODEV;
  805. data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
  806. if (!data) {
  807. printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
  808. return -ENOMEM;
  809. }
  810. memset(data,0,sizeof(struct powernow_k8_data));
  811. data->cpu = pol->cpu;
  812. if (powernow_k8_cpu_init_acpi(data)) {
  813. /*
  814. * Use the PSB BIOS structure. This is only availabe on
  815. * an UP version, and is deprecated by AMD.
  816. */
  817. if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
  818. printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
  819. kfree(data);
  820. return -ENODEV;
  821. }
  822. if (pol->cpu != 0) {
  823. printk(KERN_ERR PFX "init not cpu 0\n");
  824. kfree(data);
  825. return -ENODEV;
  826. }
  827. rc = find_psb_table(data);
  828. if (rc) {
  829. kfree(data);
  830. return -ENODEV;
  831. }
  832. }
  833. /* only run on specific CPU from here on */
  834. oldmask = current->cpus_allowed;
  835. set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
  836. schedule();
  837. if (smp_processor_id() != pol->cpu) {
  838. printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
  839. goto err_out;
  840. }
  841. if (pending_bit_stuck()) {
  842. printk(KERN_ERR PFX "failing init, change pending bit set\n");
  843. goto err_out;
  844. }
  845. if (query_current_values_with_pending_wait(data))
  846. goto err_out;
  847. fidvid_msr_init();
  848. /* run on any CPU again */
  849. set_cpus_allowed(current, oldmask);
  850. schedule();
  851. pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
  852. pol->cpus = cpu_core_map[pol->cpu];
  853. /* Take a crude guess here.
  854. * That guess was in microseconds, so multiply with 1000 */
  855. pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
  856. + (3 * (1 << data->irt) * 10)) * 1000;
  857. pol->cur = find_khz_freq_from_fid(data->currfid);
  858. dprintk("policy current frequency %d kHz\n", pol->cur);
  859. /* min/max the cpu is capable of */
  860. if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
  861. printk(KERN_ERR PFX "invalid powernow_table\n");
  862. powernow_k8_cpu_exit_acpi(data);
  863. kfree(data->powernow_table);
  864. kfree(data);
  865. return -EINVAL;
  866. }
  867. cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
  868. printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
  869. data->currfid, data->currvid);
  870. for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
  871. powernow_data[i] = data;
  872. }
  873. return 0;
  874. err_out:
  875. set_cpus_allowed(current, oldmask);
  876. schedule();
  877. powernow_k8_cpu_exit_acpi(data);
  878. kfree(data);
  879. return -ENODEV;
  880. }
  881. static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
  882. {
  883. struct powernow_k8_data *data = powernow_data[pol->cpu];
  884. if (!data)
  885. return -EINVAL;
  886. powernow_k8_cpu_exit_acpi(data);
  887. cpufreq_frequency_table_put_attr(pol->cpu);
  888. kfree(data->powernow_table);
  889. kfree(data);
  890. return 0;
  891. }
  892. static unsigned int powernowk8_get (unsigned int cpu)
  893. {
  894. struct powernow_k8_data *data = powernow_data[cpu];
  895. cpumask_t oldmask = current->cpus_allowed;
  896. unsigned int khz = 0;
  897. set_cpus_allowed(current, cpumask_of_cpu(cpu));
  898. if (smp_processor_id() != cpu) {
  899. printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
  900. set_cpus_allowed(current, oldmask);
  901. return 0;
  902. }
  903. preempt_disable();
  904. if (query_current_values_with_pending_wait(data))
  905. goto out;
  906. khz = find_khz_freq_from_fid(data->currfid);
  907. out:
  908. preempt_enable_no_resched();
  909. set_cpus_allowed(current, oldmask);
  910. return khz;
  911. }
  912. static struct freq_attr* powernow_k8_attr[] = {
  913. &cpufreq_freq_attr_scaling_available_freqs,
  914. NULL,
  915. };
  916. static struct cpufreq_driver cpufreq_amd64_driver = {
  917. .verify = powernowk8_verify,
  918. .target = powernowk8_target,
  919. .init = powernowk8_cpu_init,
  920. .exit = __devexit_p(powernowk8_cpu_exit),
  921. .get = powernowk8_get,
  922. .name = "powernow-k8",
  923. .owner = THIS_MODULE,
  924. .attr = powernow_k8_attr,
  925. };
  926. /* driver entry point for init */
  927. static int __init powernowk8_init(void)
  928. {
  929. unsigned int i, supported_cpus = 0;
  930. for (i=0; i<NR_CPUS; i++) {
  931. if (!cpu_online(i))
  932. continue;
  933. if (check_supported_cpu(i))
  934. supported_cpus++;
  935. }
  936. if (supported_cpus == num_online_cpus()) {
  937. printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n",
  938. supported_cpus);
  939. return cpufreq_register_driver(&cpufreq_amd64_driver);
  940. }
  941. return -ENODEV;
  942. }
  943. /* driver entry point for term */
  944. static void __exit powernowk8_exit(void)
  945. {
  946. dprintk("exit\n");
  947. cpufreq_unregister_driver(&cpufreq_amd64_driver);
  948. }
  949. MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com.");
  950. MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
  951. MODULE_LICENSE("GPL");
  952. late_initcall(powernowk8_init);
  953. module_exit(powernowk8_exit);