cyrix.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include <asm/io.h>
  6. #include <asm/processor-cyrix.h>
  7. #include "mtrr.h"
  8. int arr3_protected;
  9. static void
  10. cyrix_get_arr(unsigned int reg, unsigned long *base,
  11. unsigned long *size, mtrr_type * type)
  12. {
  13. unsigned long flags;
  14. unsigned char arr, ccr3, rcr, shift;
  15. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  16. /* Save flags and disable interrupts */
  17. local_irq_save(flags);
  18. ccr3 = getCx86(CX86_CCR3);
  19. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
  20. ((unsigned char *) base)[3] = getCx86(arr);
  21. ((unsigned char *) base)[2] = getCx86(arr + 1);
  22. ((unsigned char *) base)[1] = getCx86(arr + 2);
  23. rcr = getCx86(CX86_RCR_BASE + reg);
  24. setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
  25. /* Enable interrupts if it was enabled previously */
  26. local_irq_restore(flags);
  27. shift = ((unsigned char *) base)[1] & 0x0f;
  28. *base >>= PAGE_SHIFT;
  29. /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
  30. * Note: shift==0xf means 4G, this is unsupported.
  31. */
  32. if (shift)
  33. *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
  34. else
  35. *size = 0;
  36. /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
  37. if (reg < 7) {
  38. switch (rcr) {
  39. case 1:
  40. *type = MTRR_TYPE_UNCACHABLE;
  41. break;
  42. case 8:
  43. *type = MTRR_TYPE_WRBACK;
  44. break;
  45. case 9:
  46. *type = MTRR_TYPE_WRCOMB;
  47. break;
  48. case 24:
  49. default:
  50. *type = MTRR_TYPE_WRTHROUGH;
  51. break;
  52. }
  53. } else {
  54. switch (rcr) {
  55. case 0:
  56. *type = MTRR_TYPE_UNCACHABLE;
  57. break;
  58. case 8:
  59. *type = MTRR_TYPE_WRCOMB;
  60. break;
  61. case 9:
  62. *type = MTRR_TYPE_WRBACK;
  63. break;
  64. case 25:
  65. default:
  66. *type = MTRR_TYPE_WRTHROUGH;
  67. break;
  68. }
  69. }
  70. }
  71. static int
  72. cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
  73. /* [SUMMARY] Get a free ARR.
  74. <base> The starting (base) address of the region.
  75. <size> The size (in bytes) of the region.
  76. [RETURNS] The index of the region on success, else -1 on error.
  77. */
  78. {
  79. int i;
  80. mtrr_type ltype;
  81. unsigned long lbase, lsize;
  82. switch (replace_reg) {
  83. case 7:
  84. if (size < 0x40)
  85. break;
  86. case 6:
  87. case 5:
  88. case 4:
  89. return replace_reg;
  90. case 3:
  91. if (arr3_protected)
  92. break;
  93. case 2:
  94. case 1:
  95. case 0:
  96. return replace_reg;
  97. }
  98. /* If we are to set up a region >32M then look at ARR7 immediately */
  99. if (size > 0x2000) {
  100. cyrix_get_arr(7, &lbase, &lsize, &ltype);
  101. if (lsize == 0)
  102. return 7;
  103. /* Else try ARR0-ARR6 first */
  104. } else {
  105. for (i = 0; i < 7; i++) {
  106. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  107. if ((i == 3) && arr3_protected)
  108. continue;
  109. if (lsize == 0)
  110. return i;
  111. }
  112. /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  113. cyrix_get_arr(i, &lbase, &lsize, &ltype);
  114. if ((lsize == 0) && (size >= 0x40))
  115. return i;
  116. }
  117. return -ENOSPC;
  118. }
  119. static u32 cr4 = 0;
  120. static u32 ccr3;
  121. static void prepare_set(void)
  122. {
  123. u32 cr0;
  124. /* Save value of CR4 and clear Page Global Enable (bit 7) */
  125. if ( cpu_has_pge ) {
  126. cr4 = read_cr4();
  127. write_cr4(cr4 & ~X86_CR4_PGE);
  128. }
  129. /* Disable and flush caches. Note that wbinvd flushes the TLBs as
  130. a side-effect */
  131. cr0 = read_cr0() | 0x40000000;
  132. wbinvd();
  133. write_cr0(cr0);
  134. wbinvd();
  135. /* Cyrix ARRs - everything else were excluded at the top */
  136. ccr3 = getCx86(CX86_CCR3);
  137. /* Cyrix ARRs - everything else were excluded at the top */
  138. setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  139. }
  140. static void post_set(void)
  141. {
  142. /* Flush caches and TLBs */
  143. wbinvd();
  144. /* Cyrix ARRs - everything else was excluded at the top */
  145. setCx86(CX86_CCR3, ccr3);
  146. /* Enable caches */
  147. write_cr0(read_cr0() & 0xbfffffff);
  148. /* Restore value of CR4 */
  149. if ( cpu_has_pge )
  150. write_cr4(cr4);
  151. }
  152. static void cyrix_set_arr(unsigned int reg, unsigned long base,
  153. unsigned long size, mtrr_type type)
  154. {
  155. unsigned char arr, arr_type, arr_size;
  156. arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
  157. /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  158. if (reg >= 7)
  159. size >>= 6;
  160. size &= 0x7fff; /* make sure arr_size <= 14 */
  161. for (arr_size = 0; size; arr_size++, size >>= 1) ;
  162. if (reg < 7) {
  163. switch (type) {
  164. case MTRR_TYPE_UNCACHABLE:
  165. arr_type = 1;
  166. break;
  167. case MTRR_TYPE_WRCOMB:
  168. arr_type = 9;
  169. break;
  170. case MTRR_TYPE_WRTHROUGH:
  171. arr_type = 24;
  172. break;
  173. default:
  174. arr_type = 8;
  175. break;
  176. }
  177. } else {
  178. switch (type) {
  179. case MTRR_TYPE_UNCACHABLE:
  180. arr_type = 0;
  181. break;
  182. case MTRR_TYPE_WRCOMB:
  183. arr_type = 8;
  184. break;
  185. case MTRR_TYPE_WRTHROUGH:
  186. arr_type = 25;
  187. break;
  188. default:
  189. arr_type = 9;
  190. break;
  191. }
  192. }
  193. prepare_set();
  194. base <<= PAGE_SHIFT;
  195. setCx86(arr, ((unsigned char *) &base)[3]);
  196. setCx86(arr + 1, ((unsigned char *) &base)[2]);
  197. setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  198. setCx86(CX86_RCR_BASE + reg, arr_type);
  199. post_set();
  200. }
  201. typedef struct {
  202. unsigned long base;
  203. unsigned long size;
  204. mtrr_type type;
  205. } arr_state_t;
  206. static arr_state_t arr_state[8] = {
  207. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  208. {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  209. };
  210. static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
  211. static void cyrix_set_all(void)
  212. {
  213. int i;
  214. prepare_set();
  215. /* the CCRs are not contiguous */
  216. for (i = 0; i < 4; i++)
  217. setCx86(CX86_CCR0 + i, ccr_state[i]);
  218. for (; i < 7; i++)
  219. setCx86(CX86_CCR4 + i, ccr_state[i]);
  220. for (i = 0; i < 8; i++)
  221. cyrix_set_arr(i, arr_state[i].base,
  222. arr_state[i].size, arr_state[i].type);
  223. post_set();
  224. }
  225. #if 0
  226. /*
  227. * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  228. * with the SMM (System Management Mode) mode. So we need the following:
  229. * Check whether SMI_LOCK (CCR3 bit 0) is set
  230. * if it is set, write a warning message: ARR3 cannot be changed!
  231. * (it cannot be changed until the next processor reset)
  232. * if it is reset, then we can change it, set all the needed bits:
  233. * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  234. * - disable access to SMM memory (CCR1 bit 2 reset)
  235. * - disable SMM mode (CCR1 bit 1 reset)
  236. * - disable write protection of ARR3 (CCR6 bit 1 reset)
  237. * - (maybe) disable ARR3
  238. * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  239. */
  240. static void __init
  241. cyrix_arr_init(void)
  242. {
  243. struct set_mtrr_context ctxt;
  244. unsigned char ccr[7];
  245. int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  246. #ifdef CONFIG_SMP
  247. int i;
  248. #endif
  249. /* flush cache and enable MAPEN */
  250. set_mtrr_prepare_save(&ctxt);
  251. set_mtrr_cache_disable(&ctxt);
  252. /* Save all CCRs locally */
  253. ccr[0] = getCx86(CX86_CCR0);
  254. ccr[1] = getCx86(CX86_CCR1);
  255. ccr[2] = getCx86(CX86_CCR2);
  256. ccr[3] = ctxt.ccr3;
  257. ccr[4] = getCx86(CX86_CCR4);
  258. ccr[5] = getCx86(CX86_CCR5);
  259. ccr[6] = getCx86(CX86_CCR6);
  260. if (ccr[3] & 1) {
  261. ccrc[3] = 1;
  262. arr3_protected = 1;
  263. } else {
  264. /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  265. * access to SMM memory through ARR3 (bit 7).
  266. */
  267. if (ccr[1] & 0x80) {
  268. ccr[1] &= 0x7f;
  269. ccrc[1] |= 0x80;
  270. }
  271. if (ccr[1] & 0x04) {
  272. ccr[1] &= 0xfb;
  273. ccrc[1] |= 0x04;
  274. }
  275. if (ccr[1] & 0x02) {
  276. ccr[1] &= 0xfd;
  277. ccrc[1] |= 0x02;
  278. }
  279. arr3_protected = 0;
  280. if (ccr[6] & 0x02) {
  281. ccr[6] &= 0xfd;
  282. ccrc[6] = 1; /* Disable write protection of ARR3 */
  283. setCx86(CX86_CCR6, ccr[6]);
  284. }
  285. /* Disable ARR3. This is safe now that we disabled SMM. */
  286. /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  287. }
  288. /* If we changed CCR1 in memory, change it in the processor, too. */
  289. if (ccrc[1])
  290. setCx86(CX86_CCR1, ccr[1]);
  291. /* Enable ARR usage by the processor */
  292. if (!(ccr[5] & 0x20)) {
  293. ccr[5] |= 0x20;
  294. ccrc[5] = 1;
  295. setCx86(CX86_CCR5, ccr[5]);
  296. }
  297. #ifdef CONFIG_SMP
  298. for (i = 0; i < 7; i++)
  299. ccr_state[i] = ccr[i];
  300. for (i = 0; i < 8; i++)
  301. cyrix_get_arr(i,
  302. &arr_state[i].base, &arr_state[i].size,
  303. &arr_state[i].type);
  304. #endif
  305. set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
  306. if (ccrc[5])
  307. printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  308. if (ccrc[3])
  309. printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  310. /*
  311. if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  312. if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  313. if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  314. */
  315. if (ccrc[6])
  316. printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  317. }
  318. #endif
  319. static struct mtrr_ops cyrix_mtrr_ops = {
  320. .vendor = X86_VENDOR_CYRIX,
  321. // .init = cyrix_arr_init,
  322. .set_all = cyrix_set_all,
  323. .set = cyrix_set_arr,
  324. .get = cyrix_get_arr,
  325. .get_free_region = cyrix_get_free_region,
  326. .validate_add_page = generic_validate_add_page,
  327. .have_wrcomb = positive_have_wrcomb,
  328. };
  329. int __init cyrix_init_mtrr(void)
  330. {
  331. set_mtrr_ops(&cyrix_mtrr_ops);
  332. return 0;
  333. }
  334. //arch_initcall(cyrix_init_mtrr);