cyrix.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include <asm/io.h>
  6. #include "mtrr.h"
  7. int arr3_protected;
  8. static void
  9. cyrix_get_arr(unsigned int reg, unsigned long *base,
  10. unsigned long *size, mtrr_type * type)
  11. {
  12. unsigned long flags;
  13. unsigned char arr, ccr3, rcr, shift;
  14. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  15. /* Save flags and disable interrupts */
  16. local_irq_save(flags);
  17. ccr3 = getCx86(CX86_CCR3);
  18. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
  19. ((unsigned char *) base)[3] = getCx86(arr);
  20. ((unsigned char *) base)[2] = getCx86(arr + 1);
  21. ((unsigned char *) base)[1] = getCx86(arr + 2);
  22. rcr = getCx86(CX86_RCR_BASE + reg);
  23. setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
  24. /* Enable interrupts if it was enabled previously */
  25. local_irq_restore(flags);
  26. shift = ((unsigned char *) base)[1] & 0x0f;
  27. *base >>= PAGE_SHIFT;
  28. /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
  29. * Note: shift==0xf means 4G, this is unsupported.
  30. */
  31. if (shift)
  32. *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
  33. else
  34. *size = 0;
  35. /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
  36. if (reg < 7) {
  37. switch (rcr) {
  38. case 1:
  39. *type = MTRR_TYPE_UNCACHABLE;
  40. break;
  41. case 8:
  42. *type = MTRR_TYPE_WRBACK;
  43. break;
  44. case 9:
  45. *type = MTRR_TYPE_WRCOMB;
  46. break;
  47. case 24:
  48. default:
  49. *type = MTRR_TYPE_WRTHROUGH;
  50. break;
  51. }
  52. } else {
  53. switch (rcr) {
  54. case 0:
  55. *type = MTRR_TYPE_UNCACHABLE;
  56. break;
  57. case 8:
  58. *type = MTRR_TYPE_WRCOMB;
  59. break;
  60. case 9:
  61. *type = MTRR_TYPE_WRBACK;
  62. break;
  63. case 25:
  64. default:
  65. *type = MTRR_TYPE_WRTHROUGH;
  66. break;
  67. }
  68. }
  69. }
  70. static int
  71. cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
  72. /* [SUMMARY] Get a free ARR.
  73. <base> The starting (base) address of the region.
  74. <size> The size (in bytes) of the region.
  75. [RETURNS] The index of the region on success, else -1 on error.
  76. */
  77. {
  78. int i;
  79. mtrr_type ltype;
  80. unsigned long lbase, lsize;
  81. switch (replace_reg) {
  82. case 7:
  83. if (size < 0x40)
  84. break;
  85. case 6:
  86. case 5:
  87. case 4:
  88. return replace_reg;
  89. case 3:
  90. if (arr3_protected)
  91. break;
  92. case 2:
  93. case 1:
  94. case 0:
  95. return replace_reg;
  96. }
  97. /* If we are to set up a region >32M then look at ARR7 immediately */
  98. if (size > 0x2000) {
  99. cyrix_get_arr(7, &lbase, &lsize, &ltype);
  100. if (lsize == 0)
  101. return 7;
  102. /* Else try ARR0-ARR6 first */
  103. } else {
  104. for (i = 0; i < 7; i++) {
  105. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  106. if ((i == 3) && arr3_protected)
  107. continue;
  108. if (lsize == 0)
  109. return i;
  110. }
  111. /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  112. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  113. if ((lsize == 0) && (size >= 0x40))
  114. return i;
  115. }
  116. return -ENOSPC;
  117. }
  118. static u32 cr4 = 0;
  119. static u32 ccr3;
  120. static void prepare_set(void)
  121. {
  122. u32 cr0;
  123. /* Save value of CR4 and clear Page Global Enable (bit 7) */
  124. if ( cpu_has_pge ) {
  125. cr4 = read_cr4();
  126. write_cr4(cr4 & (unsigned char) ~(1 << 7));
  127. }
  128. /* Disable and flush caches. Note that wbinvd flushes the TLBs as
  129. a side-effect */
  130. cr0 = read_cr0() | 0x40000000;
  131. wbinvd();
  132. write_cr0(cr0);
  133. wbinvd();
  134. /* Cyrix ARRs - everything else were excluded at the top */
  135. ccr3 = getCx86(CX86_CCR3);
  136. /* Cyrix ARRs - everything else were excluded at the top */
  137. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  138. }
  139. static void post_set(void)
  140. {
  141. /* Flush caches and TLBs */
  142. wbinvd();
  143. /* Cyrix ARRs - everything else was excluded at the top */
  144. setCx86(CX86_CCR3, ccr3);
  145. /* Enable caches */
  146. write_cr0(read_cr0() & 0xbfffffff);
  147. /* Restore value of CR4 */
  148. if ( cpu_has_pge )
  149. write_cr4(cr4);
  150. }
  151. static void cyrix_set_arr(unsigned int reg, unsigned long base,
  152. unsigned long size, mtrr_type type)
  153. {
  154. unsigned char arr, arr_type, arr_size;
  155. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  156. /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  157. if (reg >= 7)
  158. size >>= 6;
  159. size &= 0x7fff; /* make sure arr_size <= 14 */
  160. for (arr_size = 0; size; arr_size++, size >>= 1) ;
  161. if (reg < 7) {
  162. switch (type) {
  163. case MTRR_TYPE_UNCACHABLE:
  164. arr_type = 1;
  165. break;
  166. case MTRR_TYPE_WRCOMB:
  167. arr_type = 9;
  168. break;
  169. case MTRR_TYPE_WRTHROUGH:
  170. arr_type = 24;
  171. break;
  172. default:
  173. arr_type = 8;
  174. break;
  175. }
  176. } else {
  177. switch (type) {
  178. case MTRR_TYPE_UNCACHABLE:
  179. arr_type = 0;
  180. break;
  181. case MTRR_TYPE_WRCOMB:
  182. arr_type = 8;
  183. break;
  184. case MTRR_TYPE_WRTHROUGH:
  185. arr_type = 25;
  186. break;
  187. default:
  188. arr_type = 9;
  189. break;
  190. }
  191. }
  192. prepare_set();
  193. base <<= PAGE_SHIFT;
  194. setCx86(arr, ((unsigned char *) &base)[3]);
  195. setCx86(arr + 1, ((unsigned char *) &base)[2]);
  196. setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  197. setCx86(CX86_RCR_BASE + reg, arr_type);
  198. post_set();
  199. }
  200. typedef struct {
  201. unsigned long base;
  202. unsigned long size;
  203. mtrr_type type;
  204. } arr_state_t;
  205. static arr_state_t arr_state[8] __devinitdata = {
  206. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  207. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  208. };
  209. static unsigned char ccr_state[7] __devinitdata = { 0, 0, 0, 0, 0, 0, 0 };
  210. static void cyrix_set_all(void)
  211. {
  212. int i;
  213. prepare_set();
  214. /* the CCRs are not contiguous */
  215. for (i = 0; i < 4; i++)
  216. setCx86(CX86_CCR0 + i, ccr_state[i]);
  217. for (; i < 7; i++)
  218. setCx86(CX86_CCR4 + i, ccr_state[i]);
  219. for (i = 0; i < 8; i++)
  220. cyrix_set_arr(i, arr_state[i].base,
  221. arr_state[i].size, arr_state[i].type);
  222. post_set();
  223. }
  224. #if 0
  225. /*
  226. * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  227. * with the SMM (System Management Mode) mode. So we need the following:
  228. * Check whether SMI_LOCK (CCR3 bit 0) is set
  229. * if it is set, write a warning message: ARR3 cannot be changed!
  230. * (it cannot be changed until the next processor reset)
  231. * if it is reset, then we can change it, set all the needed bits:
  232. * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  233. * - disable access to SMM memory (CCR1 bit 2 reset)
  234. * - disable SMM mode (CCR1 bit 1 reset)
  235. * - disable write protection of ARR3 (CCR6 bit 1 reset)
  236. * - (maybe) disable ARR3
  237. * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  238. */
  239. static void __init
  240. cyrix_arr_init(void)
  241. {
  242. struct set_mtrr_context ctxt;
  243. unsigned char ccr[7];
  244. int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  245. #ifdef CONFIG_SMP
  246. int i;
  247. #endif
  248. /* flush cache and enable MAPEN */
  249. set_mtrr_prepare_save(&ctxt);
  250. set_mtrr_cache_disable(&ctxt);
  251. /* Save all CCRs locally */
  252. ccr[0] = getCx86(CX86_CCR0);
  253. ccr[1] = getCx86(CX86_CCR1);
  254. ccr[2] = getCx86(CX86_CCR2);
  255. ccr[3] = ctxt.ccr3;
  256. ccr[4] = getCx86(CX86_CCR4);
  257. ccr[5] = getCx86(CX86_CCR5);
  258. ccr[6] = getCx86(CX86_CCR6);
  259. if (ccr[3] & 1) {
  260. ccrc[3] = 1;
  261. arr3_protected = 1;
  262. } else {
  263. /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  264. * access to SMM memory through ARR3 (bit 7).
  265. */
  266. if (ccr[1] & 0x80) {
  267. ccr[1] &= 0x7f;
  268. ccrc[1] |= 0x80;
  269. }
  270. if (ccr[1] & 0x04) {
  271. ccr[1] &= 0xfb;
  272. ccrc[1] |= 0x04;
  273. }
  274. if (ccr[1] & 0x02) {
  275. ccr[1] &= 0xfd;
  276. ccrc[1] |= 0x02;
  277. }
  278. arr3_protected = 0;
  279. if (ccr[6] & 0x02) {
  280. ccr[6] &= 0xfd;
  281. ccrc[6] = 1; /* Disable write protection of ARR3 */
  282. setCx86(CX86_CCR6, ccr[6]);
  283. }
  284. /* Disable ARR3. This is safe now that we disabled SMM. */
  285. /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  286. }
  287. /* If we changed CCR1 in memory, change it in the processor, too. */
  288. if (ccrc[1])
  289. setCx86(CX86_CCR1, ccr[1]);
  290. /* Enable ARR usage by the processor */
  291. if (!(ccr[5] & 0x20)) {
  292. ccr[5] |= 0x20;
  293. ccrc[5] = 1;
  294. setCx86(CX86_CCR5, ccr[5]);
  295. }
  296. #ifdef CONFIG_SMP
  297. for (i = 0; i < 7; i++)
  298. ccr_state[i] = ccr[i];
  299. for (i = 0; i < 8; i++)
  300. cyrix_get_arr(i,
  301. &arr_state[i].base, &arr_state[i].size,
  302. &arr_state[i].type);
  303. #endif
  304. set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
  305. if (ccrc[5])
  306. printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  307. if (ccrc[3])
  308. printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  309. /*
  310. if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  311. if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  312. if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  313. */
  314. if (ccrc[6])
  315. printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  316. }
  317. #endif
  318. static struct mtrr_ops cyrix_mtrr_ops = {
  319. .vendor = X86_VENDOR_CYRIX,
  320. // .init = cyrix_arr_init,
  321. .set_all = cyrix_set_all,
  322. .set = cyrix_set_arr,
  323. .get = cyrix_get_arr,
  324. .get_free_region = cyrix_get_free_region,
  325. .validate_add_page = generic_validate_add_page,
  326. .have_wrcomb = positive_have_wrcomb,
  327. };
  328. int __init cyrix_init_mtrr(void)
  329. {
  330. set_mtrr_ops(&cyrix_mtrr_ops);
  331. return 0;
  332. }
  333. //arch_initcall(cyrix_init_mtrr);