pat.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /*
  2. * Handle caching attributes in page tables (PAT)
  3. *
  4. * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  5. * Suresh B Siddha <suresh.b.siddha@intel.com>
  6. *
  7. * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/kernel.h>
  11. #include <linux/gfp.h>
  12. #include <linux/fs.h>
  13. #include <linux/bootmem.h>
  14. #include <asm/msr.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/processor.h>
  17. #include <asm/page.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/pat.h>
  20. #include <asm/e820.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/fcntl.h>
  23. #include <asm/mtrr.h>
  24. #include <asm/io.h>
  25. #ifdef CONFIG_X86_PAT
  26. int __read_mostly pat_enabled = 1;
  27. void __cpuinit pat_disable(char *reason)
  28. {
  29. pat_enabled = 0;
  30. printk(KERN_INFO "%s\n", reason);
  31. }
  32. static int __init nopat(char *str)
  33. {
  34. pat_disable("PAT support disabled.");
  35. return 0;
  36. }
  37. early_param("nopat", nopat);
  38. #endif
  39. static int debug_enable;
  40. static int __init pat_debug_setup(char *str)
  41. {
  42. debug_enable = 1;
  43. return 0;
  44. }
  45. __setup("debugpat", pat_debug_setup);
  46. #define dprintk(fmt, arg...) \
  47. do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
  48. static u64 __read_mostly boot_pat_state;
  49. enum {
  50. PAT_UC = 0, /* uncached */
  51. PAT_WC = 1, /* Write combining */
  52. PAT_WT = 4, /* Write Through */
  53. PAT_WP = 5, /* Write Protected */
  54. PAT_WB = 6, /* Write Back (default) */
  55. PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
  56. };
  57. #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
  58. void pat_init(void)
  59. {
  60. u64 pat;
  61. if (!pat_enabled)
  62. return;
  63. /* Paranoia check. */
  64. if (!cpu_has_pat && boot_pat_state) {
  65. /*
  66. * If this happens we are on a secondary CPU, but
  67. * switched to PAT on the boot CPU. We have no way to
  68. * undo PAT.
  69. */
  70. printk(KERN_ERR "PAT enabled, "
  71. "but not supported by secondary CPU\n");
  72. BUG();
  73. }
  74. /* Set PWT to Write-Combining. All other bits stay the same */
  75. /*
  76. * PTE encoding used in Linux:
  77. * PAT
  78. * |PCD
  79. * ||PWT
  80. * |||
  81. * 000 WB _PAGE_CACHE_WB
  82. * 001 WC _PAGE_CACHE_WC
  83. * 010 UC- _PAGE_CACHE_UC_MINUS
  84. * 011 UC _PAGE_CACHE_UC
  85. * PAT bit unused
  86. */
  87. pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
  88. PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
  89. /* Boot CPU check */
  90. if (!boot_pat_state)
  91. rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
  92. wrmsrl(MSR_IA32_CR_PAT, pat);
  93. printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
  94. smp_processor_id(), boot_pat_state, pat);
  95. }
  96. #undef PAT
  97. static char *cattr_name(unsigned long flags)
  98. {
  99. switch (flags & _PAGE_CACHE_MASK) {
  100. case _PAGE_CACHE_UC: return "uncached";
  101. case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
  102. case _PAGE_CACHE_WB: return "write-back";
  103. case _PAGE_CACHE_WC: return "write-combining";
  104. default: return "broken";
  105. }
  106. }
  107. /*
  108. * The global memtype list keeps track of memory type for specific
  109. * physical memory areas. Conflicting memory types in different
  110. * mappings can cause CPU cache corruption. To avoid this we keep track.
  111. *
  112. * The list is sorted based on starting address and can contain multiple
  113. * entries for each address (this allows reference counting for overlapping
  114. * areas). All the aliases have the same cache attributes of course.
  115. * Zero attributes are represented as holes.
  116. *
  117. * Currently the data structure is a list because the number of mappings
  118. * are expected to be relatively small. If this should be a problem
  119. * it could be changed to a rbtree or similar.
  120. *
  121. * memtype_lock protects the whole list.
  122. */
  123. struct memtype {
  124. u64 start;
  125. u64 end;
  126. unsigned long type;
  127. struct list_head nd;
  128. };
  129. static LIST_HEAD(memtype_list);
  130. static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
  131. /*
  132. * Does intersection of PAT memory type and MTRR memory type and returns
  133. * the resulting memory type as PAT understands it.
  134. * (Type in pat and mtrr will not have same value)
  135. * The intersection is based on "Effective Memory Type" tables in IA-32
  136. * SDM vol 3a
  137. */
  138. static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
  139. {
  140. /*
  141. * Look for MTRR hint to get the effective type in case where PAT
  142. * request is for WB.
  143. */
  144. if (req_type == _PAGE_CACHE_WB) {
  145. u8 mtrr_type;
  146. mtrr_type = mtrr_type_lookup(start, end);
  147. if (mtrr_type == MTRR_TYPE_UNCACHABLE)
  148. return _PAGE_CACHE_UC;
  149. if (mtrr_type == MTRR_TYPE_WRCOMB)
  150. return _PAGE_CACHE_WC;
  151. }
  152. return req_type;
  153. }
  154. static int chk_conflict(struct memtype *new, struct memtype *entry,
  155. unsigned long *type)
  156. {
  157. if (new->type != entry->type) {
  158. if (type) {
  159. new->type = entry->type;
  160. *type = entry->type;
  161. } else
  162. goto conflict;
  163. }
  164. /* check overlaps with more than one entry in the list */
  165. list_for_each_entry_continue(entry, &memtype_list, nd) {
  166. if (new->end <= entry->start)
  167. break;
  168. else if (new->type != entry->type)
  169. goto conflict;
  170. }
  171. return 0;
  172. conflict:
  173. printk(KERN_INFO "%s:%d conflicting memory types "
  174. "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
  175. new->end, cattr_name(new->type), cattr_name(entry->type));
  176. return -EBUSY;
  177. }
  178. /*
  179. * req_type typically has one of the:
  180. * - _PAGE_CACHE_WB
  181. * - _PAGE_CACHE_WC
  182. * - _PAGE_CACHE_UC_MINUS
  183. * - _PAGE_CACHE_UC
  184. *
  185. * req_type will have a special case value '-1', when requester want to inherit
  186. * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
  187. *
  188. * If new_type is NULL, function will return an error if it cannot reserve the
  189. * region with req_type. If new_type is non-NULL, function will return
  190. * available type in new_type in case of no error. In case of any error
  191. * it will return a negative return value.
  192. */
  193. int reserve_memtype(u64 start, u64 end, unsigned long req_type,
  194. unsigned long *new_type)
  195. {
  196. struct memtype *new, *entry;
  197. unsigned long actual_type;
  198. struct list_head *where;
  199. int err = 0;
  200. BUG_ON(start >= end); /* end is exclusive */
  201. if (!pat_enabled) {
  202. /* This is identical to page table setting without PAT */
  203. if (new_type) {
  204. if (req_type == -1)
  205. *new_type = _PAGE_CACHE_WB;
  206. else
  207. *new_type = req_type & _PAGE_CACHE_MASK;
  208. }
  209. return 0;
  210. }
  211. /* Low ISA region is always mapped WB in page table. No need to track */
  212. if (is_ISA_range(start, end - 1)) {
  213. if (new_type)
  214. *new_type = _PAGE_CACHE_WB;
  215. return 0;
  216. }
  217. if (req_type == -1) {
  218. /*
  219. * Call mtrr_lookup to get the type hint. This is an
  220. * optimization for /dev/mem mmap'ers into WB memory (BIOS
  221. * tools and ACPI tools). Use WB request for WB memory and use
  222. * UC_MINUS otherwise.
  223. */
  224. u8 mtrr_type = mtrr_type_lookup(start, end);
  225. if (mtrr_type == MTRR_TYPE_WRBACK)
  226. actual_type = _PAGE_CACHE_WB;
  227. else
  228. actual_type = _PAGE_CACHE_UC_MINUS;
  229. } else
  230. actual_type = pat_x_mtrr_type(start, end,
  231. req_type & _PAGE_CACHE_MASK);
  232. new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
  233. if (!new)
  234. return -ENOMEM;
  235. new->start = start;
  236. new->end = end;
  237. new->type = actual_type;
  238. if (new_type)
  239. *new_type = actual_type;
  240. spin_lock(&memtype_lock);
  241. /* Search for existing mapping that overlaps the current range */
  242. where = NULL;
  243. list_for_each_entry(entry, &memtype_list, nd) {
  244. if (end <= entry->start) {
  245. where = entry->nd.prev;
  246. break;
  247. } else if (start <= entry->start) { /* end > entry->start */
  248. err = chk_conflict(new, entry, new_type);
  249. if (!err) {
  250. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  251. entry->start, entry->end);
  252. where = entry->nd.prev;
  253. }
  254. break;
  255. } else if (start < entry->end) { /* start > entry->start */
  256. err = chk_conflict(new, entry, new_type);
  257. if (!err) {
  258. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  259. entry->start, entry->end);
  260. where = &entry->nd;
  261. }
  262. break;
  263. }
  264. }
  265. if (err) {
  266. printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
  267. "track %s, req %s\n",
  268. start, end, cattr_name(new->type), cattr_name(req_type));
  269. kfree(new);
  270. spin_unlock(&memtype_lock);
  271. return err;
  272. }
  273. if (where)
  274. list_add(&new->nd, where);
  275. else
  276. list_add_tail(&new->nd, &memtype_list);
  277. spin_unlock(&memtype_lock);
  278. dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
  279. start, end, cattr_name(new->type), cattr_name(req_type),
  280. new_type ? cattr_name(*new_type) : "-");
  281. return err;
  282. }
  283. int free_memtype(u64 start, u64 end)
  284. {
  285. struct memtype *entry;
  286. int err = -EINVAL;
  287. if (!pat_enabled)
  288. return 0;
  289. /* Low ISA region is always mapped WB. No need to track */
  290. if (is_ISA_range(start, end - 1))
  291. return 0;
  292. spin_lock(&memtype_lock);
  293. list_for_each_entry(entry, &memtype_list, nd) {
  294. if (entry->start == start && entry->end == end) {
  295. list_del(&entry->nd);
  296. kfree(entry);
  297. err = 0;
  298. break;
  299. }
  300. }
  301. spin_unlock(&memtype_lock);
  302. if (err) {
  303. printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
  304. current->comm, current->pid, start, end);
  305. }
  306. dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
  307. return err;
  308. }
  309. /*
  310. * /dev/mem mmap interface. The memtype used for mapping varies:
  311. * - Use UC for mappings with O_SYNC flag
  312. * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
  313. * inherit the memtype from existing mapping.
  314. * - Else use UC_MINUS memtype (for backward compatibility with existing
  315. * X drivers.
  316. */
  317. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  318. unsigned long size, pgprot_t vma_prot)
  319. {
  320. return vma_prot;
  321. }
  322. #ifdef CONFIG_NONPROMISC_DEVMEM
  323. /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
  324. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  325. {
  326. return 1;
  327. }
  328. #else
  329. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  330. {
  331. u64 from = ((u64)pfn) << PAGE_SHIFT;
  332. u64 to = from + size;
  333. u64 cursor = from;
  334. while (cursor < to) {
  335. if (!devmem_is_allowed(pfn)) {
  336. printk(KERN_INFO
  337. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  338. current->comm, from, to);
  339. return 0;
  340. }
  341. cursor += PAGE_SIZE;
  342. pfn++;
  343. }
  344. return 1;
  345. }
  346. #endif /* CONFIG_NONPROMISC_DEVMEM */
  347. int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  348. unsigned long size, pgprot_t *vma_prot)
  349. {
  350. u64 offset = ((u64) pfn) << PAGE_SHIFT;
  351. unsigned long flags = _PAGE_CACHE_UC_MINUS;
  352. int retval;
  353. if (!range_is_allowed(pfn, size))
  354. return 0;
  355. if (file->f_flags & O_SYNC) {
  356. flags = _PAGE_CACHE_UC;
  357. }
  358. #ifdef CONFIG_X86_32
  359. /*
  360. * On the PPro and successors, the MTRRs are used to set
  361. * memory types for physical addresses outside main memory,
  362. * so blindly setting UC or PWT on those pages is wrong.
  363. * For Pentiums and earlier, the surround logic should disable
  364. * caching for the high addresses through the KEN pin, but
  365. * we maintain the tradition of paranoia in this code.
  366. */
  367. if (!pat_enabled &&
  368. !(boot_cpu_has(X86_FEATURE_MTRR) ||
  369. boot_cpu_has(X86_FEATURE_K6_MTRR) ||
  370. boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
  371. boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
  372. (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
  373. flags = _PAGE_CACHE_UC;
  374. }
  375. #endif
  376. /*
  377. * With O_SYNC, we can only take UC mapping. Fail if we cannot.
  378. * Without O_SYNC, we want to get
  379. * - WB for WB-able memory and no other conflicting mappings
  380. * - UC_MINUS for non-WB-able memory with no other conflicting mappings
  381. * - Inherit from confliting mappings otherwise
  382. */
  383. if (flags != _PAGE_CACHE_UC_MINUS) {
  384. retval = reserve_memtype(offset, offset + size, flags, NULL);
  385. } else {
  386. retval = reserve_memtype(offset, offset + size, -1, &flags);
  387. }
  388. if (retval < 0)
  389. return 0;
  390. if (((pfn < max_low_pfn_mapped) ||
  391. (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
  392. ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
  393. free_memtype(offset, offset + size);
  394. printk(KERN_INFO
  395. "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
  396. current->comm, current->pid,
  397. cattr_name(flags),
  398. offset, (unsigned long long)(offset + size));
  399. return 0;
  400. }
  401. *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
  402. flags);
  403. return 1;
  404. }
  405. void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
  406. {
  407. u64 addr = (u64)pfn << PAGE_SHIFT;
  408. unsigned long flags;
  409. unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
  410. reserve_memtype(addr, addr + size, want_flags, &flags);
  411. if (flags != want_flags) {
  412. printk(KERN_INFO
  413. "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
  414. current->comm, current->pid,
  415. cattr_name(want_flags),
  416. addr, (unsigned long long)(addr + size),
  417. cattr_name(flags));
  418. }
  419. }
  420. void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
  421. {
  422. u64 addr = (u64)pfn << PAGE_SHIFT;
  423. free_memtype(addr, addr + size);
  424. }