cyrix.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include <asm/io.h>
  6. #include <asm/processor-cyrix.h>
  7. #include <asm/processor-flags.h>
  8. #include "mtrr.h"
  9. int arr3_protected;
  10. static void
  11. cyrix_get_arr(unsigned int reg, unsigned long *base,
  12. unsigned long *size, mtrr_type * type)
  13. {
  14. unsigned long flags;
  15. unsigned char arr, ccr3, rcr, shift;
  16. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  17. /* Save flags and disable interrupts */
  18. local_irq_save(flags);
  19. ccr3 = getCx86(CX86_CCR3);
  20. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
  21. ((unsigned char *) base)[3] = getCx86(arr);
  22. ((unsigned char *) base)[2] = getCx86(arr + 1);
  23. ((unsigned char *) base)[1] = getCx86(arr + 2);
  24. rcr = getCx86(CX86_RCR_BASE + reg);
  25. setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
  26. /* Enable interrupts if it was enabled previously */
  27. local_irq_restore(flags);
  28. shift = ((unsigned char *) base)[1] & 0x0f;
  29. *base >>= PAGE_SHIFT;
  30. /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
  31. * Note: shift==0xf means 4G, this is unsupported.
  32. */
  33. if (shift)
  34. *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
  35. else
  36. *size = 0;
  37. /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
  38. if (reg < 7) {
  39. switch (rcr) {
  40. case 1:
  41. *type = MTRR_TYPE_UNCACHABLE;
  42. break;
  43. case 8:
  44. *type = MTRR_TYPE_WRBACK;
  45. break;
  46. case 9:
  47. *type = MTRR_TYPE_WRCOMB;
  48. break;
  49. case 24:
  50. default:
  51. *type = MTRR_TYPE_WRTHROUGH;
  52. break;
  53. }
  54. } else {
  55. switch (rcr) {
  56. case 0:
  57. *type = MTRR_TYPE_UNCACHABLE;
  58. break;
  59. case 8:
  60. *type = MTRR_TYPE_WRCOMB;
  61. break;
  62. case 9:
  63. *type = MTRR_TYPE_WRBACK;
  64. break;
  65. case 25:
  66. default:
  67. *type = MTRR_TYPE_WRTHROUGH;
  68. break;
  69. }
  70. }
  71. }
  72. static int
  73. cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
  74. /* [SUMMARY] Get a free ARR.
  75. <base> The starting (base) address of the region.
  76. <size> The size (in bytes) of the region.
  77. [RETURNS] The index of the region on success, else -1 on error.
  78. */
  79. {
  80. int i;
  81. mtrr_type ltype;
  82. unsigned long lbase, lsize;
  83. switch (replace_reg) {
  84. case 7:
  85. if (size < 0x40)
  86. break;
  87. case 6:
  88. case 5:
  89. case 4:
  90. return replace_reg;
  91. case 3:
  92. if (arr3_protected)
  93. break;
  94. case 2:
  95. case 1:
  96. case 0:
  97. return replace_reg;
  98. }
  99. /* If we are to set up a region >32M then look at ARR7 immediately */
  100. if (size > 0x2000) {
  101. cyrix_get_arr(7, &lbase, &lsize, &ltype);
  102. if (lsize == 0)
  103. return 7;
  104. /* Else try ARR0-ARR6 first */
  105. } else {
  106. for (i = 0; i < 7; i++) {
  107. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  108. if ((i == 3) && arr3_protected)
  109. continue;
  110. if (lsize == 0)
  111. return i;
  112. }
  113. /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  114. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  115. if ((lsize == 0) && (size >= 0x40))
  116. return i;
  117. }
  118. return -ENOSPC;
  119. }
  120. static u32 cr4 = 0;
  121. static u32 ccr3;
  122. static void prepare_set(void)
  123. {
  124. u32 cr0;
  125. /* Save value of CR4 and clear Page Global Enable (bit 7) */
  126. if ( cpu_has_pge ) {
  127. cr4 = read_cr4();
  128. write_cr4(cr4 & ~X86_CR4_PGE);
  129. }
  130. /* Disable and flush caches. Note that wbinvd flushes the TLBs as
  131. a side-effect */
  132. cr0 = read_cr0() | X86_CR0_CD;
  133. wbinvd();
  134. write_cr0(cr0);
  135. wbinvd();
  136. /* Cyrix ARRs - everything else was excluded at the top */
  137. ccr3 = getCx86(CX86_CCR3);
  138. /* Cyrix ARRs - everything else was excluded at the top */
  139. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  140. }
  141. static void post_set(void)
  142. {
  143. /* Flush caches and TLBs */
  144. wbinvd();
  145. /* Cyrix ARRs - everything else was excluded at the top */
  146. setCx86(CX86_CCR3, ccr3);
  147. /* Enable caches */
  148. write_cr0(read_cr0() & 0xbfffffff);
  149. /* Restore value of CR4 */
  150. if ( cpu_has_pge )
  151. write_cr4(cr4);
  152. }
  153. static void cyrix_set_arr(unsigned int reg, unsigned long base,
  154. unsigned long size, mtrr_type type)
  155. {
  156. unsigned char arr, arr_type, arr_size;
  157. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  158. /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  159. if (reg >= 7)
  160. size >>= 6;
  161. size &= 0x7fff; /* make sure arr_size <= 14 */
  162. for (arr_size = 0; size; arr_size++, size >>= 1) ;
  163. if (reg < 7) {
  164. switch (type) {
  165. case MTRR_TYPE_UNCACHABLE:
  166. arr_type = 1;
  167. break;
  168. case MTRR_TYPE_WRCOMB:
  169. arr_type = 9;
  170. break;
  171. case MTRR_TYPE_WRTHROUGH:
  172. arr_type = 24;
  173. break;
  174. default:
  175. arr_type = 8;
  176. break;
  177. }
  178. } else {
  179. switch (type) {
  180. case MTRR_TYPE_UNCACHABLE:
  181. arr_type = 0;
  182. break;
  183. case MTRR_TYPE_WRCOMB:
  184. arr_type = 8;
  185. break;
  186. case MTRR_TYPE_WRTHROUGH:
  187. arr_type = 25;
  188. break;
  189. default:
  190. arr_type = 9;
  191. break;
  192. }
  193. }
  194. prepare_set();
  195. base <<= PAGE_SHIFT;
  196. setCx86(arr, ((unsigned char *) &base)[3]);
  197. setCx86(arr + 1, ((unsigned char *) &base)[2]);
  198. setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  199. setCx86(CX86_RCR_BASE + reg, arr_type);
  200. post_set();
  201. }
  202. typedef struct {
  203. unsigned long base;
  204. unsigned long size;
  205. mtrr_type type;
  206. } arr_state_t;
  207. static arr_state_t arr_state[8] = {
  208. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  209. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  210. };
  211. static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
  212. static void cyrix_set_all(void)
  213. {
  214. int i;
  215. prepare_set();
  216. /* the CCRs are not contiguous */
  217. for (i = 0; i < 4; i++)
  218. setCx86(CX86_CCR0 + i, ccr_state[i]);
  219. for (; i < 7; i++)
  220. setCx86(CX86_CCR4 + i, ccr_state[i]);
  221. for (i = 0; i < 8; i++)
  222. cyrix_set_arr(i, arr_state[i].base,
  223. arr_state[i].size, arr_state[i].type);
  224. post_set();
  225. }
  226. #if 0
  227. /*
  228. * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  229. * with the SMM (System Management Mode) mode. So we need the following:
  230. * Check whether SMI_LOCK (CCR3 bit 0) is set
  231. * if it is set, write a warning message: ARR3 cannot be changed!
  232. * (it cannot be changed until the next processor reset)
  233. * if it is reset, then we can change it, set all the needed bits:
  234. * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  235. * - disable access to SMM memory (CCR1 bit 2 reset)
  236. * - disable SMM mode (CCR1 bit 1 reset)
  237. * - disable write protection of ARR3 (CCR6 bit 1 reset)
  238. * - (maybe) disable ARR3
  239. * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  240. */
  241. static void __init
  242. cyrix_arr_init(void)
  243. {
  244. struct set_mtrr_context ctxt;
  245. unsigned char ccr[7];
  246. int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  247. #ifdef CONFIG_SMP
  248. int i;
  249. #endif
  250. /* flush cache and enable MAPEN */
  251. set_mtrr_prepare_save(&ctxt);
  252. set_mtrr_cache_disable(&ctxt);
  253. /* Save all CCRs locally */
  254. ccr[0] = getCx86(CX86_CCR0);
  255. ccr[1] = getCx86(CX86_CCR1);
  256. ccr[2] = getCx86(CX86_CCR2);
  257. ccr[3] = ctxt.ccr3;
  258. ccr[4] = getCx86(CX86_CCR4);
  259. ccr[5] = getCx86(CX86_CCR5);
  260. ccr[6] = getCx86(CX86_CCR6);
  261. if (ccr[3] & 1) {
  262. ccrc[3] = 1;
  263. arr3_protected = 1;
  264. } else {
  265. /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  266. * access to SMM memory through ARR3 (bit 7).
  267. */
  268. if (ccr[1] & 0x80) {
  269. ccr[1] &= 0x7f;
  270. ccrc[1] |= 0x80;
  271. }
  272. if (ccr[1] & 0x04) {
  273. ccr[1] &= 0xfb;
  274. ccrc[1] |= 0x04;
  275. }
  276. if (ccr[1] & 0x02) {
  277. ccr[1] &= 0xfd;
  278. ccrc[1] |= 0x02;
  279. }
  280. arr3_protected = 0;
  281. if (ccr[6] & 0x02) {
  282. ccr[6] &= 0xfd;
  283. ccrc[6] = 1; /* Disable write protection of ARR3 */
  284. setCx86(CX86_CCR6, ccr[6]);
  285. }
  286. /* Disable ARR3. This is safe now that we disabled SMM. */
  287. /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  288. }
  289. /* If we changed CCR1 in memory, change it in the processor, too. */
  290. if (ccrc[1])
  291. setCx86(CX86_CCR1, ccr[1]);
  292. /* Enable ARR usage by the processor */
  293. if (!(ccr[5] & 0x20)) {
  294. ccr[5] |= 0x20;
  295. ccrc[5] = 1;
  296. setCx86(CX86_CCR5, ccr[5]);
  297. }
  298. #ifdef CONFIG_SMP
  299. for (i = 0; i < 7; i++)
  300. ccr_state[i] = ccr[i];
  301. for (i = 0; i < 8; i++)
  302. cyrix_get_arr(i,
  303. &arr_state[i].base, &arr_state[i].size,
  304. &arr_state[i].type);
  305. #endif
  306. set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
  307. if (ccrc[5])
  308. printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  309. if (ccrc[3])
  310. printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  311. /*
  312. if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  313. if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  314. if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  315. */
  316. if (ccrc[6])
  317. printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  318. }
  319. #endif
  320. static struct mtrr_ops cyrix_mtrr_ops = {
  321. .vendor = X86_VENDOR_CYRIX,
  322. // .init = cyrix_arr_init,
  323. .set_all = cyrix_set_all,
  324. .set = cyrix_set_arr,
  325. .get = cyrix_get_arr,
  326. .get_free_region = cyrix_get_free_region,
  327. .validate_add_page = generic_validate_add_page,
  328. .have_wrcomb = positive_have_wrcomb,
  329. };
  330. int __init cyrix_init_mtrr(void)
  331. {
  332. set_mtrr_ops(&cyrix_mtrr_ops);
  333. return 0;
  334. }
  335. //arch_initcall(cyrix_init_mtrr);