amd.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <asm/mtrr.h>
  4. #include <asm/msr.h>
  5. #include "mtrr.h"
  6. static void
  7. amd_get_mtrr(unsigned int reg, unsigned long *base,
  8. unsigned long *size, mtrr_type *type)
  9. {
  10. unsigned long low, high;
  11. rdmsr(MSR_K6_UWCCR, low, high);
  12. /* Upper dword is region 1, lower is region 0 */
  13. if (reg == 1)
  14. low = high;
  15. /* The base masks off on the right alignment */
  16. *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
  17. *type = 0;
  18. if (low & 1)
  19. *type = MTRR_TYPE_UNCACHABLE;
  20. if (low & 2)
  21. *type = MTRR_TYPE_WRCOMB;
  22. if (!(low & 3)) {
  23. *size = 0;
  24. return;
  25. }
  26. /*
  27. * This needs a little explaining. The size is stored as an
  28. * inverted mask of bits of 128K granularity 15 bits long offset
  29. * 2 bits.
  30. *
  31. * So to get a size we do invert the mask and add 1 to the lowest
  32. * mask bit (4 as its 2 bits in). This gives us a size we then shift
  33. * to turn into 128K blocks.
  34. *
  35. * eg 111 1111 1111 1100 is 512K
  36. *
  37. * invert 000 0000 0000 0011
  38. * +1 000 0000 0000 0100
  39. * *128K ...
  40. */
  41. low = (~low) & 0x1FFFC;
  42. *size = (low + 4) << (15 - PAGE_SHIFT);
  43. }
  44. /**
  45. * amd_set_mtrr - Set variable MTRR register on the local CPU.
  46. *
  47. * @reg The register to set.
  48. * @base The base address of the region.
  49. * @size The size of the region. If this is 0 the region is disabled.
  50. * @type The type of the region.
  51. *
  52. * Returns nothing.
  53. */
  54. static void
  55. amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
  56. {
  57. u32 regs[2];
  58. /*
  59. * Low is MTRR0, High MTRR 1
  60. */
  61. rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  62. /*
  63. * Blank to disable
  64. */
  65. if (size == 0) {
  66. regs[reg] = 0;
  67. } else {
  68. /*
  69. * Set the register to the base, the type (off by one) and an
  70. * inverted bitmask of the size The size is the only odd
  71. * bit. We are fed say 512K We invert this and we get 111 1111
  72. * 1111 1011 but if you subtract one and invert you get the
  73. * desired 111 1111 1111 1100 mask
  74. *
  75. * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
  76. */
  77. regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
  78. | (base << PAGE_SHIFT) | (type + 1);
  79. }
  80. /*
  81. * The writeback rule is quite specific. See the manual. Its
  82. * disable local interrupts, write back the cache, set the mtrr
  83. */
  84. wbinvd();
  85. wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
  86. }
  87. static int
  88. amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  89. {
  90. /*
  91. * Apply the K6 block alignment and size rules
  92. * In order
  93. * o Uncached or gathering only
  94. * o 128K or bigger block
  95. * o Power of 2 block
  96. * o base suitably aligned to the power
  97. */
  98. if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  99. || (size & ~(size - 1)) - size || (base & (size - 1)))
  100. return -EINVAL;
  101. return 0;
  102. }
  103. static const struct mtrr_ops amd_mtrr_ops = {
  104. .vendor = X86_VENDOR_AMD,
  105. .set = amd_set_mtrr,
  106. .get = amd_get_mtrr,
  107. .get_free_region = generic_get_free_region,
  108. .validate_add_page = amd_validate_add_page,
  109. .have_wrcomb = positive_have_wrcomb,
  110. };
  111. int __init amd_init_mtrr(void)
  112. {
  113. set_mtrr_ops(&amd_mtrr_ops);
  114. return 0;
  115. }