amd.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include "mtrr.h"
  6. static void
  7. amd_get_mtrr(unsigned int reg, unsigned long *base,
  8. unsigned long *size, mtrr_type * type)
  9. {
  10. unsigned long low, high;
  11. rdmsr(MSR_K6_UWCCR, low, high);
  12. /* Upper dword is region 1, lower is region 0 */
  13. if (reg == 1)
  14. low = high;
  15. /* The base masks off on the right alignment */
  16. *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
  17. *type = 0;
  18. if (low & 1)
  19. *type = MTRR_TYPE_UNCACHABLE;
  20. if (low & 2)
  21. *type = MTRR_TYPE_WRCOMB;
  22. if (!(low & 3)) {
  23. *size = 0;
  24. return;
  25. }
  26. /*
  27. * This needs a little explaining. The size is stored as an
  28. * inverted mask of bits of 128K granularity 15 bits long offset
  29. * 2 bits
  30. *
  31. * So to get a size we do invert the mask and add 1 to the lowest
  32. * mask bit (4 as its 2 bits in). This gives us a size we then shift
  33. * to turn into 128K blocks
  34. *
  35. * eg 111 1111 1111 1100 is 512K
  36. *
  37. * invert 000 0000 0000 0011
  38. * +1 000 0000 0000 0100
  39. * *128K ...
  40. */
  41. low = (~low) & 0x1FFFC;
  42. *size = (low + 4) << (15 - PAGE_SHIFT);
  43. return;
  44. }
  45. static void amd_set_mtrr(unsigned int reg, unsigned long base,
  46. unsigned long size, mtrr_type type)
  47. /* [SUMMARY] Set variable MTRR register on the local CPU.
  48. <reg> The register to set.
  49. <base> The base address of the region.
  50. <size> The size of the region. If this is 0 the region is disabled.
  51. <type> The type of the region.
  52. [RETURNS] Nothing.
  53. */
  54. {
  55. u32 regs[2];
  56. /*
  57. * Low is MTRR0 , High MTRR 1
  58. */
  59. rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  60. /*
  61. * Blank to disable
  62. */
  63. if (size == 0)
  64. regs[reg] = 0;
  65. else
  66. /* Set the register to the base, the type (off by one) and an
  67. inverted bitmask of the size The size is the only odd
  68. bit. We are fed say 512K We invert this and we get 111 1111
  69. 1111 1011 but if you subtract one and invert you get the
  70. desired 111 1111 1111 1100 mask
  71. But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
  72. regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
  73. | (base << PAGE_SHIFT) | (type + 1);
  74. /*
  75. * The writeback rule is quite specific. See the manual. Its
  76. * disable local interrupts, write back the cache, set the mtrr
  77. */
  78. wbinvd();
  79. wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  80. }
  81. static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  82. {
  83. /* Apply the K6 block alignment and size rules
  84. In order
  85. o Uncached or gathering only
  86. o 128K or bigger block
  87. o Power of 2 block
  88. o base suitably aligned to the power
  89. */
  90. if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  91. || (size & ~(size - 1)) - size || (base & (size - 1)))
  92. return -EINVAL;
  93. return 0;
  94. }
  95. static struct mtrr_ops amd_mtrr_ops = {
  96. .vendor = X86_VENDOR_AMD,
  97. .set = amd_set_mtrr,
  98. .get = amd_get_mtrr,
  99. .get_free_region = generic_get_free_region,
  100. .validate_add_page = amd_validate_add_page,
  101. .have_wrcomb = positive_have_wrcomb,
  102. };
  103. int __init amd_init_mtrr(void)
  104. {
  105. set_mtrr_ops(&amd_mtrr_ops);
  106. return 0;
  107. }
  108. //arch_initcall(amd_mtrr_init);