pat.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * Handle caching attributes in page tables (PAT)
  3. *
  4. * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  5. * Suresh B Siddha <suresh.b.siddha@intel.com>
  6. *
  7. * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/kernel.h>
  11. #include <linux/gfp.h>
  12. #include <linux/fs.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/seq_file.h>
  16. #include <asm/msr.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/processor.h>
  19. #include <asm/page.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pat.h>
  22. #include <asm/e820.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/fcntl.h>
  25. #include <asm/mtrr.h>
  26. #include <asm/io.h>
  27. #ifdef CONFIG_X86_PAT
  28. int __read_mostly pat_enabled = 1;
  29. void __cpuinit pat_disable(char *reason)
  30. {
  31. pat_enabled = 0;
  32. printk(KERN_INFO "%s\n", reason);
  33. }
  34. static int __init nopat(char *str)
  35. {
  36. pat_disable("PAT support disabled.");
  37. return 0;
  38. }
  39. early_param("nopat", nopat);
  40. #endif
  41. static int debug_enable;
  42. static int __init pat_debug_setup(char *str)
  43. {
  44. debug_enable = 1;
  45. return 0;
  46. }
  47. __setup("debugpat", pat_debug_setup);
  48. #define dprintk(fmt, arg...) \
  49. do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
  50. static u64 __read_mostly boot_pat_state;
  51. enum {
  52. PAT_UC = 0, /* uncached */
  53. PAT_WC = 1, /* Write combining */
  54. PAT_WT = 4, /* Write Through */
  55. PAT_WP = 5, /* Write Protected */
  56. PAT_WB = 6, /* Write Back (default) */
  57. PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
  58. };
  59. #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
  60. void pat_init(void)
  61. {
  62. u64 pat;
  63. if (!pat_enabled)
  64. return;
  65. /* Paranoia check. */
  66. if (!cpu_has_pat && boot_pat_state) {
  67. /*
  68. * If this happens we are on a secondary CPU, but
  69. * switched to PAT on the boot CPU. We have no way to
  70. * undo PAT.
  71. */
  72. printk(KERN_ERR "PAT enabled, "
  73. "but not supported by secondary CPU\n");
  74. BUG();
  75. }
  76. /* Set PWT to Write-Combining. All other bits stay the same */
  77. /*
  78. * PTE encoding used in Linux:
  79. * PAT
  80. * |PCD
  81. * ||PWT
  82. * |||
  83. * 000 WB _PAGE_CACHE_WB
  84. * 001 WC _PAGE_CACHE_WC
  85. * 010 UC- _PAGE_CACHE_UC_MINUS
  86. * 011 UC _PAGE_CACHE_UC
  87. * PAT bit unused
  88. */
  89. pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
  90. PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
  91. /* Boot CPU check */
  92. if (!boot_pat_state)
  93. rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
  94. wrmsrl(MSR_IA32_CR_PAT, pat);
  95. printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
  96. smp_processor_id(), boot_pat_state, pat);
  97. }
  98. #undef PAT
  99. static char *cattr_name(unsigned long flags)
  100. {
  101. switch (flags & _PAGE_CACHE_MASK) {
  102. case _PAGE_CACHE_UC: return "uncached";
  103. case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
  104. case _PAGE_CACHE_WB: return "write-back";
  105. case _PAGE_CACHE_WC: return "write-combining";
  106. default: return "broken";
  107. }
  108. }
  109. /*
  110. * The global memtype list keeps track of memory type for specific
  111. * physical memory areas. Conflicting memory types in different
  112. * mappings can cause CPU cache corruption. To avoid this we keep track.
  113. *
  114. * The list is sorted based on starting address and can contain multiple
  115. * entries for each address (this allows reference counting for overlapping
  116. * areas). All the aliases have the same cache attributes of course.
  117. * Zero attributes are represented as holes.
  118. *
  119. * Currently the data structure is a list because the number of mappings
  120. * are expected to be relatively small. If this should be a problem
  121. * it could be changed to a rbtree or similar.
  122. *
  123. * memtype_lock protects the whole list.
  124. */
  125. struct memtype {
  126. u64 start;
  127. u64 end;
  128. unsigned long type;
  129. struct list_head nd;
  130. };
  131. static LIST_HEAD(memtype_list);
  132. static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
  133. /*
  134. * Does intersection of PAT memory type and MTRR memory type and returns
  135. * the resulting memory type as PAT understands it.
  136. * (Type in pat and mtrr will not have same value)
  137. * The intersection is based on "Effective Memory Type" tables in IA-32
  138. * SDM vol 3a
  139. */
  140. static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
  141. {
  142. /*
  143. * Look for MTRR hint to get the effective type in case where PAT
  144. * request is for WB.
  145. */
  146. if (req_type == _PAGE_CACHE_WB) {
  147. u8 mtrr_type;
  148. mtrr_type = mtrr_type_lookup(start, end);
  149. if (mtrr_type == MTRR_TYPE_UNCACHABLE)
  150. return _PAGE_CACHE_UC;
  151. if (mtrr_type == MTRR_TYPE_WRCOMB)
  152. return _PAGE_CACHE_WC;
  153. }
  154. return req_type;
  155. }
  156. static int chk_conflict(struct memtype *new, struct memtype *entry,
  157. unsigned long *type)
  158. {
  159. if (new->type != entry->type) {
  160. if (type) {
  161. new->type = entry->type;
  162. *type = entry->type;
  163. } else
  164. goto conflict;
  165. }
  166. /* check overlaps with more than one entry in the list */
  167. list_for_each_entry_continue(entry, &memtype_list, nd) {
  168. if (new->end <= entry->start)
  169. break;
  170. else if (new->type != entry->type)
  171. goto conflict;
  172. }
  173. return 0;
  174. conflict:
  175. printk(KERN_INFO "%s:%d conflicting memory types "
  176. "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
  177. new->end, cattr_name(new->type), cattr_name(entry->type));
  178. return -EBUSY;
  179. }
  180. /*
  181. * req_type typically has one of the:
  182. * - _PAGE_CACHE_WB
  183. * - _PAGE_CACHE_WC
  184. * - _PAGE_CACHE_UC_MINUS
  185. * - _PAGE_CACHE_UC
  186. *
  187. * req_type will have a special case value '-1', when requester want to inherit
  188. * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
  189. *
  190. * If new_type is NULL, function will return an error if it cannot reserve the
  191. * region with req_type. If new_type is non-NULL, function will return
  192. * available type in new_type in case of no error. In case of any error
  193. * it will return a negative return value.
  194. */
  195. int reserve_memtype(u64 start, u64 end, unsigned long req_type,
  196. unsigned long *new_type)
  197. {
  198. struct memtype *new, *entry;
  199. unsigned long actual_type;
  200. struct list_head *where;
  201. int err = 0;
  202. BUG_ON(start >= end); /* end is exclusive */
  203. if (!pat_enabled) {
  204. /* This is identical to page table setting without PAT */
  205. if (new_type) {
  206. if (req_type == -1)
  207. *new_type = _PAGE_CACHE_WB;
  208. else
  209. *new_type = req_type & _PAGE_CACHE_MASK;
  210. }
  211. return 0;
  212. }
  213. /* Low ISA region is always mapped WB in page table. No need to track */
  214. if (is_ISA_range(start, end - 1)) {
  215. if (new_type)
  216. *new_type = _PAGE_CACHE_WB;
  217. return 0;
  218. }
  219. if (req_type == -1) {
  220. /*
  221. * Call mtrr_lookup to get the type hint. This is an
  222. * optimization for /dev/mem mmap'ers into WB memory (BIOS
  223. * tools and ACPI tools). Use WB request for WB memory and use
  224. * UC_MINUS otherwise.
  225. */
  226. u8 mtrr_type = mtrr_type_lookup(start, end);
  227. if (mtrr_type == MTRR_TYPE_WRBACK)
  228. actual_type = _PAGE_CACHE_WB;
  229. else
  230. actual_type = _PAGE_CACHE_UC_MINUS;
  231. } else
  232. actual_type = pat_x_mtrr_type(start, end,
  233. req_type & _PAGE_CACHE_MASK);
  234. new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
  235. if (!new)
  236. return -ENOMEM;
  237. new->start = start;
  238. new->end = end;
  239. new->type = actual_type;
  240. if (new_type)
  241. *new_type = actual_type;
  242. spin_lock(&memtype_lock);
  243. /* Search for existing mapping that overlaps the current range */
  244. where = NULL;
  245. list_for_each_entry(entry, &memtype_list, nd) {
  246. if (end <= entry->start) {
  247. where = entry->nd.prev;
  248. break;
  249. } else if (start <= entry->start) { /* end > entry->start */
  250. err = chk_conflict(new, entry, new_type);
  251. if (!err) {
  252. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  253. entry->start, entry->end);
  254. where = entry->nd.prev;
  255. }
  256. break;
  257. } else if (start < entry->end) { /* start > entry->start */
  258. err = chk_conflict(new, entry, new_type);
  259. if (!err) {
  260. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  261. entry->start, entry->end);
  262. where = &entry->nd;
  263. }
  264. break;
  265. }
  266. }
  267. if (err) {
  268. printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
  269. "track %s, req %s\n",
  270. start, end, cattr_name(new->type), cattr_name(req_type));
  271. kfree(new);
  272. spin_unlock(&memtype_lock);
  273. return err;
  274. }
  275. if (where)
  276. list_add(&new->nd, where);
  277. else
  278. list_add_tail(&new->nd, &memtype_list);
  279. spin_unlock(&memtype_lock);
  280. dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
  281. start, end, cattr_name(new->type), cattr_name(req_type),
  282. new_type ? cattr_name(*new_type) : "-");
  283. return err;
  284. }
  285. int free_memtype(u64 start, u64 end)
  286. {
  287. struct memtype *entry;
  288. int err = -EINVAL;
  289. if (!pat_enabled)
  290. return 0;
  291. /* Low ISA region is always mapped WB. No need to track */
  292. if (is_ISA_range(start, end - 1))
  293. return 0;
  294. spin_lock(&memtype_lock);
  295. list_for_each_entry(entry, &memtype_list, nd) {
  296. if (entry->start == start && entry->end == end) {
  297. list_del(&entry->nd);
  298. kfree(entry);
  299. err = 0;
  300. break;
  301. }
  302. }
  303. spin_unlock(&memtype_lock);
  304. if (err) {
  305. printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
  306. current->comm, current->pid, start, end);
  307. }
  308. dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
  309. return err;
  310. }
  311. /*
  312. * /dev/mem mmap interface. The memtype used for mapping varies:
  313. * - Use UC for mappings with O_SYNC flag
  314. * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
  315. * inherit the memtype from existing mapping.
  316. * - Else use UC_MINUS memtype (for backward compatibility with existing
  317. * X drivers.
  318. */
  319. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  320. unsigned long size, pgprot_t vma_prot)
  321. {
  322. return vma_prot;
  323. }
  324. #ifdef CONFIG_STRICT_DEVMEM
  325. /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
  326. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  327. {
  328. return 1;
  329. }
  330. #else
  331. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  332. {
  333. u64 from = ((u64)pfn) << PAGE_SHIFT;
  334. u64 to = from + size;
  335. u64 cursor = from;
  336. while (cursor < to) {
  337. if (!devmem_is_allowed(pfn)) {
  338. printk(KERN_INFO
  339. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  340. current->comm, from, to);
  341. return 0;
  342. }
  343. cursor += PAGE_SIZE;
  344. pfn++;
  345. }
  346. return 1;
  347. }
  348. #endif /* CONFIG_STRICT_DEVMEM */
  349. int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  350. unsigned long size, pgprot_t *vma_prot)
  351. {
  352. u64 offset = ((u64) pfn) << PAGE_SHIFT;
  353. unsigned long flags = _PAGE_CACHE_UC_MINUS;
  354. int retval;
  355. if (!range_is_allowed(pfn, size))
  356. return 0;
  357. if (file->f_flags & O_SYNC) {
  358. flags = _PAGE_CACHE_UC;
  359. }
  360. #ifdef CONFIG_X86_32
  361. /*
  362. * On the PPro and successors, the MTRRs are used to set
  363. * memory types for physical addresses outside main memory,
  364. * so blindly setting UC or PWT on those pages is wrong.
  365. * For Pentiums and earlier, the surround logic should disable
  366. * caching for the high addresses through the KEN pin, but
  367. * we maintain the tradition of paranoia in this code.
  368. */
  369. if (!pat_enabled &&
  370. !(boot_cpu_has(X86_FEATURE_MTRR) ||
  371. boot_cpu_has(X86_FEATURE_K6_MTRR) ||
  372. boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
  373. boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
  374. (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
  375. flags = _PAGE_CACHE_UC;
  376. }
  377. #endif
  378. /*
  379. * With O_SYNC, we can only take UC mapping. Fail if we cannot.
  380. * Without O_SYNC, we want to get
  381. * - WB for WB-able memory and no other conflicting mappings
  382. * - UC_MINUS for non-WB-able memory with no other conflicting mappings
  383. * - Inherit from confliting mappings otherwise
  384. */
  385. if (flags != _PAGE_CACHE_UC_MINUS) {
  386. retval = reserve_memtype(offset, offset + size, flags, NULL);
  387. } else {
  388. retval = reserve_memtype(offset, offset + size, -1, &flags);
  389. }
  390. if (retval < 0)
  391. return 0;
  392. if (((pfn < max_low_pfn_mapped) ||
  393. (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
  394. ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
  395. free_memtype(offset, offset + size);
  396. printk(KERN_INFO
  397. "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
  398. current->comm, current->pid,
  399. cattr_name(flags),
  400. offset, (unsigned long long)(offset + size));
  401. return 0;
  402. }
  403. *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
  404. flags);
  405. return 1;
  406. }
  407. void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
  408. {
  409. u64 addr = (u64)pfn << PAGE_SHIFT;
  410. unsigned long flags;
  411. unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
  412. reserve_memtype(addr, addr + size, want_flags, &flags);
  413. if (flags != want_flags) {
  414. printk(KERN_INFO
  415. "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
  416. current->comm, current->pid,
  417. cattr_name(want_flags),
  418. addr, (unsigned long long)(addr + size),
  419. cattr_name(flags));
  420. }
  421. }
  422. void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
  423. {
  424. u64 addr = (u64)pfn << PAGE_SHIFT;
  425. free_memtype(addr, addr + size);
  426. }
  427. #if defined(CONFIG_DEBUG_FS)
  428. /* get Nth element of the linked list */
  429. static struct memtype *memtype_get_idx(loff_t pos)
  430. {
  431. struct memtype *list_node, *print_entry;
  432. int i = 1;
  433. print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
  434. if (!print_entry)
  435. return NULL;
  436. spin_lock(&memtype_lock);
  437. list_for_each_entry(list_node, &memtype_list, nd) {
  438. if (pos == i) {
  439. *print_entry = *list_node;
  440. spin_unlock(&memtype_lock);
  441. return print_entry;
  442. }
  443. ++i;
  444. }
  445. spin_unlock(&memtype_lock);
  446. kfree(print_entry);
  447. return NULL;
  448. }
  449. static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
  450. {
  451. if (*pos == 0) {
  452. ++*pos;
  453. seq_printf(seq, "PAT memtype list:\n");
  454. }
  455. return memtype_get_idx(*pos);
  456. }
  457. static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  458. {
  459. ++*pos;
  460. return memtype_get_idx(*pos);
  461. }
  462. static void memtype_seq_stop(struct seq_file *seq, void *v)
  463. {
  464. }
  465. static int memtype_seq_show(struct seq_file *seq, void *v)
  466. {
  467. struct memtype *print_entry = (struct memtype *)v;
  468. seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
  469. print_entry->start, print_entry->end);
  470. kfree(print_entry);
  471. return 0;
  472. }
  473. static struct seq_operations memtype_seq_ops = {
  474. .start = memtype_seq_start,
  475. .next = memtype_seq_next,
  476. .stop = memtype_seq_stop,
  477. .show = memtype_seq_show,
  478. };
  479. static int memtype_seq_open(struct inode *inode, struct file *file)
  480. {
  481. return seq_open(file, &memtype_seq_ops);
  482. }
  483. static const struct file_operations memtype_fops = {
  484. .open = memtype_seq_open,
  485. .read = seq_read,
  486. .llseek = seq_lseek,
  487. .release = seq_release,
  488. };
  489. static int __init pat_memtype_list_init(void)
  490. {
  491. debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
  492. NULL, &memtype_fops);
  493. return 0;
  494. }
  495. late_initcall(pat_memtype_list_init);
  496. #endif /* CONFIG_DEBUG_FS */