centaur.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. #include <linux/kernel.h>
  2. #include <linux/init.h>
  3. #include <linux/bitops.h>
  4. #include <asm/processor.h>
  5. #include <asm/msr.h>
  6. #include <asm/e820.h>
  7. #include <asm/mtrr.h>
  8. #include "cpu.h"
  9. #ifdef CONFIG_X86_OOSTORE
  10. static u32 __cpuinit power2(u32 x)
  11. {
  12. u32 s=1;
  13. while(s<=x)
  14. s<<=1;
  15. return s>>=1;
  16. }
  17. /*
  18. * Set up an actual MCR
  19. */
  20. static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
  21. {
  22. u32 lo, hi;
  23. hi = base & ~0xFFF;
  24. lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
  25. lo &= ~0xFFF; /* Remove the ctrl value bits */
  26. lo |= key; /* Attribute we wish to set */
  27. wrmsr(reg+MSR_IDT_MCR0, lo, hi);
  28. mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
  29. }
  30. /*
  31. * Figure what we can cover with MCR's
  32. *
  33. * Shortcut: We know you can't put 4Gig of RAM on a winchip
  34. */
  35. static u32 __cpuinit ramtop(void) /* 16388 */
  36. {
  37. int i;
  38. u32 top = 0;
  39. u32 clip = 0xFFFFFFFFUL;
  40. for (i = 0; i < e820.nr_map; i++) {
  41. unsigned long start, end;
  42. if (e820.map[i].addr > 0xFFFFFFFFUL)
  43. continue;
  44. /*
  45. * Don't MCR over reserved space. Ignore the ISA hole
  46. * we frob around that catastrophy already
  47. */
  48. if (e820.map[i].type == E820_RESERVED)
  49. {
  50. if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
  51. clip = e820.map[i].addr;
  52. continue;
  53. }
  54. start = e820.map[i].addr;
  55. end = e820.map[i].addr + e820.map[i].size;
  56. if (start >= end)
  57. continue;
  58. if (end > top)
  59. top = end;
  60. }
  61. /* Everything below 'top' should be RAM except for the ISA hole.
  62. Because of the limited MCR's we want to map NV/ACPI into our
  63. MCR range for gunk in RAM
  64. Clip might cause us to MCR insufficient RAM but that is an
  65. acceptable failure mode and should only bite obscure boxes with
  66. a VESA hole at 15Mb
  67. The second case Clip sometimes kicks in is when the EBDA is marked
  68. as reserved. Again we fail safe with reasonable results
  69. */
  70. if(top>clip)
  71. top=clip;
  72. return top;
  73. }
  74. /*
  75. * Compute a set of MCR's to give maximum coverage
  76. */
  77. static int __cpuinit centaur_mcr_compute(int nr, int key)
  78. {
  79. u32 mem = ramtop();
  80. u32 root = power2(mem);
  81. u32 base = root;
  82. u32 top = root;
  83. u32 floor = 0;
  84. int ct = 0;
  85. while(ct<nr)
  86. {
  87. u32 fspace = 0;
  88. /*
  89. * Find the largest block we will fill going upwards
  90. */
  91. u32 high = power2(mem-top);
  92. /*
  93. * Find the largest block we will fill going downwards
  94. */
  95. u32 low = base/2;
  96. /*
  97. * Don't fill below 1Mb going downwards as there
  98. * is an ISA hole in the way.
  99. */
  100. if(base <= 1024*1024)
  101. low = 0;
  102. /*
  103. * See how much space we could cover by filling below
  104. * the ISA hole
  105. */
  106. if(floor == 0)
  107. fspace = 512*1024;
  108. else if(floor ==512*1024)
  109. fspace = 128*1024;
  110. /* And forget ROM space */
  111. /*
  112. * Now install the largest coverage we get
  113. */
  114. if(fspace > high && fspace > low)
  115. {
  116. centaur_mcr_insert(ct, floor, fspace, key);
  117. floor += fspace;
  118. }
  119. else if(high > low)
  120. {
  121. centaur_mcr_insert(ct, top, high, key);
  122. top += high;
  123. }
  124. else if(low > 0)
  125. {
  126. base -= low;
  127. centaur_mcr_insert(ct, base, low, key);
  128. }
  129. else break;
  130. ct++;
  131. }
  132. /*
  133. * We loaded ct values. We now need to set the mask. The caller
  134. * must do this bit.
  135. */
  136. return ct;
  137. }
  138. static void __cpuinit centaur_create_optimal_mcr(void)
  139. {
  140. int i;
  141. /*
  142. * Allocate up to 6 mcrs to mark as much of ram as possible
  143. * as write combining and weak write ordered.
  144. *
  145. * To experiment with: Linux never uses stack operations for
  146. * mmio spaces so we could globally enable stack operation wc
  147. *
  148. * Load the registers with type 31 - full write combining, all
  149. * writes weakly ordered.
  150. */
  151. int used = centaur_mcr_compute(6, 31);
  152. /*
  153. * Wipe unused MCRs
  154. */
  155. for(i=used;i<8;i++)
  156. wrmsr(MSR_IDT_MCR0+i, 0, 0);
  157. }
  158. static void __cpuinit winchip2_create_optimal_mcr(void)
  159. {
  160. u32 lo, hi;
  161. int i;
  162. /*
  163. * Allocate up to 6 mcrs to mark as much of ram as possible
  164. * as write combining, weak store ordered.
  165. *
  166. * Load the registers with type 25
  167. * 8 - weak write ordering
  168. * 16 - weak read ordering
  169. * 1 - write combining
  170. */
  171. int used = centaur_mcr_compute(6, 25);
  172. /*
  173. * Mark the registers we are using.
  174. */
  175. rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  176. for(i=0;i<used;i++)
  177. lo|=1<<(9+i);
  178. wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  179. /*
  180. * Wipe unused MCRs
  181. */
  182. for(i=used;i<8;i++)
  183. wrmsr(MSR_IDT_MCR0+i, 0, 0);
  184. }
  185. /*
  186. * Handle the MCR key on the Winchip 2.
  187. */
  188. static void __cpuinit winchip2_unprotect_mcr(void)
  189. {
  190. u32 lo, hi;
  191. u32 key;
  192. rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  193. lo&=~0x1C0; /* blank bits 8-6 */
  194. key = (lo>>17) & 7;
  195. lo |= key<<6; /* replace with unlock key */
  196. wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  197. }
  198. static void __cpuinit winchip2_protect_mcr(void)
  199. {
  200. u32 lo, hi;
  201. rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  202. lo&=~0x1C0; /* blank bits 8-6 */
  203. wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  204. }
  205. #endif /* CONFIG_X86_OOSTORE */
  206. #define ACE_PRESENT (1 << 6)
  207. #define ACE_ENABLED (1 << 7)
  208. #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
  209. #define RNG_PRESENT (1 << 2)
  210. #define RNG_ENABLED (1 << 3)
  211. #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
  212. static void __cpuinit init_c3(struct cpuinfo_x86 *c)
  213. {
  214. u32 lo, hi;
  215. /* Test for Centaur Extended Feature Flags presence */
  216. if (cpuid_eax(0xC0000000) >= 0xC0000001) {
  217. u32 tmp = cpuid_edx(0xC0000001);
  218. /* enable ACE unit, if present and disabled */
  219. if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
  220. rdmsr (MSR_VIA_FCR, lo, hi);
  221. lo |= ACE_FCR; /* enable ACE unit */
  222. wrmsr (MSR_VIA_FCR, lo, hi);
  223. printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
  224. }
  225. /* enable RNG unit, if present and disabled */
  226. if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
  227. rdmsr (MSR_VIA_RNG, lo, hi);
  228. lo |= RNG_ENABLE; /* enable RNG unit */
  229. wrmsr (MSR_VIA_RNG, lo, hi);
  230. printk(KERN_INFO "CPU: Enabled h/w RNG\n");
  231. }
  232. /* store Centaur Extended Feature Flags as
  233. * word 5 of the CPU capability bit array
  234. */
  235. c->x86_capability[5] = cpuid_edx(0xC0000001);
  236. }
  237. /* Cyrix III family needs CX8 & PGE explicity enabled. */
  238. if (c->x86_model >=6 && c->x86_model <= 9) {
  239. rdmsr (MSR_VIA_FCR, lo, hi);
  240. lo |= (1<<1 | 1<<7);
  241. wrmsr (MSR_VIA_FCR, lo, hi);
  242. set_bit(X86_FEATURE_CX8, c->x86_capability);
  243. }
  244. /* Before Nehemiah, the C3's had 3dNOW! */
  245. if (c->x86_model >=6 && c->x86_model <9)
  246. set_bit(X86_FEATURE_3DNOW, c->x86_capability);
  247. get_model_name(c);
  248. display_cacheinfo(c);
  249. }
  250. static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
  251. {
  252. enum {
  253. ECX8=1<<1,
  254. EIERRINT=1<<2,
  255. DPM=1<<3,
  256. DMCE=1<<4,
  257. DSTPCLK=1<<5,
  258. ELINEAR=1<<6,
  259. DSMC=1<<7,
  260. DTLOCK=1<<8,
  261. EDCTLB=1<<8,
  262. EMMX=1<<9,
  263. DPDC=1<<11,
  264. EBRPRED=1<<12,
  265. DIC=1<<13,
  266. DDC=1<<14,
  267. DNA=1<<15,
  268. ERETSTK=1<<16,
  269. E2MMX=1<<19,
  270. EAMD3D=1<<20,
  271. };
  272. char *name;
  273. u32 fcr_set=0;
  274. u32 fcr_clr=0;
  275. u32 lo,hi,newlo;
  276. u32 aa,bb,cc,dd;
  277. /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
  278. 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
  279. clear_bit(0*32+31, c->x86_capability);
  280. switch (c->x86) {
  281. case 5:
  282. switch(c->x86_model) {
  283. case 4:
  284. name="C6";
  285. fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
  286. fcr_clr=DPDC;
  287. printk(KERN_NOTICE "Disabling bugged TSC.\n");
  288. clear_bit(X86_FEATURE_TSC, c->x86_capability);
  289. #ifdef CONFIG_X86_OOSTORE
  290. centaur_create_optimal_mcr();
  291. /* Enable
  292. write combining on non-stack, non-string
  293. write combining on string, all types
  294. weak write ordering
  295. The C6 original lacks weak read order
  296. Note 0x120 is write only on Winchip 1 */
  297. wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
  298. #endif
  299. break;
  300. case 8:
  301. switch(c->x86_mask) {
  302. default:
  303. name="2";
  304. break;
  305. case 7 ... 9:
  306. name="2A";
  307. break;
  308. case 10 ... 15:
  309. name="2B";
  310. break;
  311. }
  312. fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
  313. fcr_clr=DPDC;
  314. #ifdef CONFIG_X86_OOSTORE
  315. winchip2_unprotect_mcr();
  316. winchip2_create_optimal_mcr();
  317. rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  318. /* Enable
  319. write combining on non-stack, non-string
  320. write combining on string, all types
  321. weak write ordering
  322. */
  323. lo|=31;
  324. wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  325. winchip2_protect_mcr();
  326. #endif
  327. break;
  328. case 9:
  329. name="3";
  330. fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
  331. fcr_clr=DPDC;
  332. #ifdef CONFIG_X86_OOSTORE
  333. winchip2_unprotect_mcr();
  334. winchip2_create_optimal_mcr();
  335. rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  336. /* Enable
  337. write combining on non-stack, non-string
  338. write combining on string, all types
  339. weak write ordering
  340. */
  341. lo|=31;
  342. wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  343. winchip2_protect_mcr();
  344. #endif
  345. break;
  346. default:
  347. name="??";
  348. }
  349. rdmsr(MSR_IDT_FCR1, lo, hi);
  350. newlo=(lo|fcr_set) & (~fcr_clr);
  351. if (newlo!=lo) {
  352. printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
  353. wrmsr(MSR_IDT_FCR1, newlo, hi );
  354. } else {
  355. printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
  356. }
  357. /* Emulate MTRRs using Centaur's MCR. */
  358. set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
  359. /* Report CX8 */
  360. set_bit(X86_FEATURE_CX8, c->x86_capability);
  361. /* Set 3DNow! on Winchip 2 and above. */
  362. if (c->x86_model >=8)
  363. set_bit(X86_FEATURE_3DNOW, c->x86_capability);
  364. /* See if we can find out some more. */
  365. if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
  366. /* Yes, we can. */
  367. cpuid(0x80000005,&aa,&bb,&cc,&dd);
  368. /* Add L1 data and code cache sizes. */
  369. c->x86_cache_size = (cc>>24)+(dd>>24);
  370. }
  371. sprintf( c->x86_model_id, "WinChip %s", name );
  372. break;
  373. case 6:
  374. init_c3(c);
  375. break;
  376. }
  377. }
  378. static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
  379. {
  380. /* VIA C3 CPUs (670-68F) need further shifting. */
  381. if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
  382. size >>= 8;
  383. /* VIA also screwed up Nehemiah stepping 1, and made
  384. it return '65KB' instead of '64KB'
  385. - Note, it seems this may only be in engineering samples. */
  386. if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
  387. size -=1;
  388. return size;
  389. }
  390. static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
  391. .c_vendor = "Centaur",
  392. .c_ident = { "CentaurHauls" },
  393. .c_init = init_centaur,
  394. .c_size_cache = centaur_size_cache,
  395. };
  396. int __init centaur_init_cpu(void)
  397. {
  398. cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
  399. return 0;
  400. }