amd.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include "mtrr.h"
  6. static void
  7. amd_get_mtrr(unsigned int reg, unsigned long *base,
  8. unsigned int *size, mtrr_type * type)
  9. {
  10. unsigned long low, high;
  11. rdmsr(MSR_K6_UWCCR, low, high);
  12. /* Upper dword is region 1, lower is region 0 */
  13. if (reg == 1)
  14. low = high;
  15. /* The base masks off on the right alignment */
  16. *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
  17. *type = 0;
  18. if (low & 1)
  19. *type = MTRR_TYPE_UNCACHABLE;
  20. if (low & 2)
  21. *type = MTRR_TYPE_WRCOMB;
  22. if (!(low & 3)) {
  23. *size = 0;
  24. return;
  25. }
  26. /*
  27. * This needs a little explaining. The size is stored as an
  28. * inverted mask of bits of 128K granularity 15 bits long offset
  29. * 2 bits
  30. *
  31. * So to get a size we do invert the mask and add 1 to the lowest
  32. * mask bit (4 as its 2 bits in). This gives us a size we then shift
  33. * to turn into 128K blocks
  34. *
  35. * eg 111 1111 1111 1100 is 512K
  36. *
  37. * invert 000 0000 0000 0011
  38. * +1 000 0000 0000 0100
  39. * *128K ...
  40. */
  41. low = (~low) & 0x1FFFC;
  42. *size = (low + 4) << (15 - PAGE_SHIFT);
  43. return;
  44. }
  45. static void amd_set_mtrr(unsigned int reg, unsigned long base,
  46. unsigned long size, mtrr_type type)
  47. /* [SUMMARY] Set variable MTRR register on the local CPU.
  48. <reg> The register to set.
  49. <base> The base address of the region.
  50. <size> The size of the region. If this is 0 the region is disabled.
  51. <type> The type of the region.
  52. <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
  53. be done externally.
  54. [RETURNS] Nothing.
  55. */
  56. {
  57. u32 regs[2];
  58. /*
  59. * Low is MTRR0 , High MTRR 1
  60. */
  61. rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  62. /*
  63. * Blank to disable
  64. */
  65. if (size == 0)
  66. regs[reg] = 0;
  67. else
  68. /* Set the register to the base, the type (off by one) and an
  69. inverted bitmask of the size The size is the only odd
  70. bit. We are fed say 512K We invert this and we get 111 1111
  71. 1111 1011 but if you subtract one and invert you get the
  72. desired 111 1111 1111 1100 mask
  73. But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
  74. regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
  75. | (base << PAGE_SHIFT) | (type + 1);
  76. /*
  77. * The writeback rule is quite specific. See the manual. Its
  78. * disable local interrupts, write back the cache, set the mtrr
  79. */
  80. wbinvd();
  81. wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  82. }
  83. static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  84. {
  85. /* Apply the K6 block alignment and size rules
  86. In order
  87. o Uncached or gathering only
  88. o 128K or bigger block
  89. o Power of 2 block
  90. o base suitably aligned to the power
  91. */
  92. if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  93. || (size & ~(size - 1)) - size || (base & (size - 1)))
  94. return -EINVAL;
  95. return 0;
  96. }
  97. static struct mtrr_ops amd_mtrr_ops = {
  98. .vendor = X86_VENDOR_AMD,
  99. .set = amd_set_mtrr,
  100. .get = amd_get_mtrr,
  101. .get_free_region = generic_get_free_region,
  102. .validate_add_page = amd_validate_add_page,
  103. .have_wrcomb = positive_have_wrcomb,
  104. };
  105. int __init amd_init_mtrr(void)
  106. {
  107. set_mtrr_ops(&amd_mtrr_ops);
  108. return 0;
  109. }
  110. //arch_initcall(amd_mtrr_init);