pat.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * Handle caching attributes in page tables (PAT)
  3. *
  4. * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  5. * Suresh B Siddha <suresh.b.siddha@intel.com>
  6. *
  7. * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
  8. */
  9. #include <linux/seq_file.h>
  10. #include <linux/bootmem.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/gfp.h>
  15. #include <linux/mm.h>
  16. #include <linux/fs.h>
  17. #include <linux/rbtree.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/processor.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/fcntl.h>
  23. #include <asm/e820.h>
  24. #include <asm/mtrr.h>
  25. #include <asm/page.h>
  26. #include <asm/msr.h>
  27. #include <asm/pat.h>
  28. #include <asm/io.h>
  29. #ifdef CONFIG_X86_PAT
  30. int __read_mostly pat_enabled = 1;
  31. static inline void pat_disable(const char *reason)
  32. {
  33. pat_enabled = 0;
  34. printk(KERN_INFO "%s\n", reason);
  35. }
  36. static int __init nopat(char *str)
  37. {
  38. pat_disable("PAT support disabled.");
  39. return 0;
  40. }
  41. early_param("nopat", nopat);
  42. #else
  43. static inline void pat_disable(const char *reason)
  44. {
  45. (void)reason;
  46. }
  47. #endif
  48. static int debug_enable;
  49. static int __init pat_debug_setup(char *str)
  50. {
  51. debug_enable = 1;
  52. return 0;
  53. }
  54. __setup("debugpat", pat_debug_setup);
  55. #define dprintk(fmt, arg...) \
  56. do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
  57. static u64 __read_mostly boot_pat_state;
  58. enum {
  59. PAT_UC = 0, /* uncached */
  60. PAT_WC = 1, /* Write combining */
  61. PAT_WT = 4, /* Write Through */
  62. PAT_WP = 5, /* Write Protected */
  63. PAT_WB = 6, /* Write Back (default) */
  64. PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
  65. };
  66. #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
  67. void pat_init(void)
  68. {
  69. u64 pat;
  70. if (!pat_enabled)
  71. return;
  72. if (!cpu_has_pat) {
  73. if (!boot_pat_state) {
  74. pat_disable("PAT not supported by CPU.");
  75. return;
  76. } else {
  77. /*
  78. * If this happens we are on a secondary CPU, but
  79. * switched to PAT on the boot CPU. We have no way to
  80. * undo PAT.
  81. */
  82. printk(KERN_ERR "PAT enabled, "
  83. "but not supported by secondary CPU\n");
  84. BUG();
  85. }
  86. }
  87. /* Set PWT to Write-Combining. All other bits stay the same */
  88. /*
  89. * PTE encoding used in Linux:
  90. * PAT
  91. * |PCD
  92. * ||PWT
  93. * |||
  94. * 000 WB _PAGE_CACHE_WB
  95. * 001 WC _PAGE_CACHE_WC
  96. * 010 UC- _PAGE_CACHE_UC_MINUS
  97. * 011 UC _PAGE_CACHE_UC
  98. * PAT bit unused
  99. */
  100. pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
  101. PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
  102. /* Boot CPU check */
  103. if (!boot_pat_state)
  104. rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
  105. wrmsrl(MSR_IA32_CR_PAT, pat);
  106. printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
  107. smp_processor_id(), boot_pat_state, pat);
  108. }
  109. #undef PAT
  110. static char *cattr_name(unsigned long flags)
  111. {
  112. switch (flags & _PAGE_CACHE_MASK) {
  113. case _PAGE_CACHE_UC: return "uncached";
  114. case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
  115. case _PAGE_CACHE_WB: return "write-back";
  116. case _PAGE_CACHE_WC: return "write-combining";
  117. default: return "broken";
  118. }
  119. }
  120. /*
  121. * The global memtype list keeps track of memory type for specific
  122. * physical memory areas. Conflicting memory types in different
  123. * mappings can cause CPU cache corruption. To avoid this we keep track.
  124. *
  125. * The list is sorted based on starting address and can contain multiple
  126. * entries for each address (this allows reference counting for overlapping
  127. * areas). All the aliases have the same cache attributes of course.
  128. * Zero attributes are represented as holes.
  129. *
  130. * The data structure is a list that is also organized as an rbtree
  131. * sorted on the start address of memtype range.
  132. *
  133. * memtype_lock protects both the linear list and rbtree.
  134. */
  135. struct memtype {
  136. u64 start;
  137. u64 end;
  138. unsigned long type;
  139. struct list_head nd;
  140. struct rb_node rb;
  141. };
  142. static struct rb_root memtype_rbroot = RB_ROOT;
  143. static LIST_HEAD(memtype_list);
  144. static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
  145. static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
  146. {
  147. struct rb_node *node = root->rb_node;
  148. struct memtype *last_lower = NULL;
  149. while (node) {
  150. struct memtype *data = container_of(node, struct memtype, rb);
  151. if (data->start < start) {
  152. last_lower = data;
  153. node = node->rb_right;
  154. } else if (data->start > start) {
  155. node = node->rb_left;
  156. } else
  157. return data;
  158. }
  159. /* Will return NULL if there is no entry with its start <= start */
  160. return last_lower;
  161. }
  162. static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
  163. {
  164. struct rb_node **new = &(root->rb_node);
  165. struct rb_node *parent = NULL;
  166. while (*new) {
  167. struct memtype *this = container_of(*new, struct memtype, rb);
  168. parent = *new;
  169. if (data->start <= this->start)
  170. new = &((*new)->rb_left);
  171. else if (data->start > this->start)
  172. new = &((*new)->rb_right);
  173. }
  174. rb_link_node(&data->rb, parent, new);
  175. rb_insert_color(&data->rb, root);
  176. }
  177. /*
  178. * Does intersection of PAT memory type and MTRR memory type and returns
  179. * the resulting memory type as PAT understands it.
  180. * (Type in pat and mtrr will not have same value)
  181. * The intersection is based on "Effective Memory Type" tables in IA-32
  182. * SDM vol 3a
  183. */
  184. static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
  185. {
  186. /*
  187. * Look for MTRR hint to get the effective type in case where PAT
  188. * request is for WB.
  189. */
  190. if (req_type == _PAGE_CACHE_WB) {
  191. u8 mtrr_type;
  192. mtrr_type = mtrr_type_lookup(start, end);
  193. if (mtrr_type != MTRR_TYPE_WRBACK)
  194. return _PAGE_CACHE_UC_MINUS;
  195. return _PAGE_CACHE_WB;
  196. }
  197. return req_type;
  198. }
  199. static int
  200. chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
  201. {
  202. if (new->type != entry->type) {
  203. if (type) {
  204. new->type = entry->type;
  205. *type = entry->type;
  206. } else
  207. goto conflict;
  208. }
  209. /* check overlaps with more than one entry in the list */
  210. list_for_each_entry_continue(entry, &memtype_list, nd) {
  211. if (new->end <= entry->start)
  212. break;
  213. else if (new->type != entry->type)
  214. goto conflict;
  215. }
  216. return 0;
  217. conflict:
  218. printk(KERN_INFO "%s:%d conflicting memory types "
  219. "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
  220. new->end, cattr_name(new->type), cattr_name(entry->type));
  221. return -EBUSY;
  222. }
  223. static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
  224. {
  225. int ram_page = 0, not_rampage = 0;
  226. unsigned long page_nr;
  227. for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
  228. ++page_nr) {
  229. /*
  230. * For legacy reasons, physical address range in the legacy ISA
  231. * region is tracked as non-RAM. This will allow users of
  232. * /dev/mem to map portions of legacy ISA region, even when
  233. * some of those portions are listed(or not even listed) with
  234. * different e820 types(RAM/reserved/..)
  235. */
  236. if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
  237. page_is_ram(page_nr))
  238. ram_page = 1;
  239. else
  240. not_rampage = 1;
  241. if (ram_page == not_rampage)
  242. return -1;
  243. }
  244. return ram_page;
  245. }
  246. /*
  247. * For RAM pages, we use page flags to mark the pages with appropriate type.
  248. * Here we do two pass:
  249. * - Find the memtype of all the pages in the range, look for any conflicts
  250. * - In case of no conflicts, set the new memtype for pages in the range
  251. *
  252. * Caller must hold memtype_lock for atomicity.
  253. */
  254. static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
  255. unsigned long *new_type)
  256. {
  257. struct page *page;
  258. u64 pfn;
  259. if (req_type == _PAGE_CACHE_UC) {
  260. /* We do not support strong UC */
  261. WARN_ON_ONCE(1);
  262. req_type = _PAGE_CACHE_UC_MINUS;
  263. }
  264. for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
  265. unsigned long type;
  266. page = pfn_to_page(pfn);
  267. type = get_page_memtype(page);
  268. if (type != -1) {
  269. printk(KERN_INFO "reserve_ram_pages_type failed "
  270. "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
  271. start, end, type, req_type);
  272. if (new_type)
  273. *new_type = type;
  274. return -EBUSY;
  275. }
  276. }
  277. if (new_type)
  278. *new_type = req_type;
  279. for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
  280. page = pfn_to_page(pfn);
  281. set_page_memtype(page, req_type);
  282. }
  283. return 0;
  284. }
  285. static int free_ram_pages_type(u64 start, u64 end)
  286. {
  287. struct page *page;
  288. u64 pfn;
  289. for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
  290. page = pfn_to_page(pfn);
  291. set_page_memtype(page, -1);
  292. }
  293. return 0;
  294. }
  295. /*
  296. * req_type typically has one of the:
  297. * - _PAGE_CACHE_WB
  298. * - _PAGE_CACHE_WC
  299. * - _PAGE_CACHE_UC_MINUS
  300. * - _PAGE_CACHE_UC
  301. *
  302. * req_type will have a special case value '-1', when requester want to inherit
  303. * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
  304. *
  305. * If new_type is NULL, function will return an error if it cannot reserve the
  306. * region with req_type. If new_type is non-NULL, function will return
  307. * available type in new_type in case of no error. In case of any error
  308. * it will return a negative return value.
  309. */
  310. int reserve_memtype(u64 start, u64 end, unsigned long req_type,
  311. unsigned long *new_type)
  312. {
  313. struct memtype *new, *entry;
  314. unsigned long actual_type;
  315. struct list_head *where;
  316. int is_range_ram;
  317. int err = 0;
  318. BUG_ON(start >= end); /* end is exclusive */
  319. if (!pat_enabled) {
  320. /* This is identical to page table setting without PAT */
  321. if (new_type) {
  322. if (req_type == -1)
  323. *new_type = _PAGE_CACHE_WB;
  324. else if (req_type == _PAGE_CACHE_WC)
  325. *new_type = _PAGE_CACHE_UC_MINUS;
  326. else
  327. *new_type = req_type & _PAGE_CACHE_MASK;
  328. }
  329. return 0;
  330. }
  331. /* Low ISA region is always mapped WB in page table. No need to track */
  332. if (is_ISA_range(start, end - 1)) {
  333. if (new_type)
  334. *new_type = _PAGE_CACHE_WB;
  335. return 0;
  336. }
  337. /*
  338. * Call mtrr_lookup to get the type hint. This is an
  339. * optimization for /dev/mem mmap'ers into WB memory (BIOS
  340. * tools and ACPI tools). Use WB request for WB memory and use
  341. * UC_MINUS otherwise.
  342. */
  343. actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
  344. if (new_type)
  345. *new_type = actual_type;
  346. is_range_ram = pat_pagerange_is_ram(start, end);
  347. if (is_range_ram == 1) {
  348. spin_lock(&memtype_lock);
  349. err = reserve_ram_pages_type(start, end, req_type, new_type);
  350. spin_unlock(&memtype_lock);
  351. return err;
  352. } else if (is_range_ram < 0) {
  353. return -EINVAL;
  354. }
  355. new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
  356. if (!new)
  357. return -ENOMEM;
  358. new->start = start;
  359. new->end = end;
  360. new->type = actual_type;
  361. spin_lock(&memtype_lock);
  362. entry = memtype_rb_search(&memtype_rbroot, new->start);
  363. if (likely(entry != NULL)) {
  364. /* To work correctly with list_for_each_entry_continue */
  365. entry = list_entry(entry->nd.prev, struct memtype, nd);
  366. } else {
  367. entry = list_entry(&memtype_list, struct memtype, nd);
  368. }
  369. /* Search for existing mapping that overlaps the current range */
  370. where = NULL;
  371. list_for_each_entry_continue(entry, &memtype_list, nd) {
  372. if (end <= entry->start) {
  373. where = entry->nd.prev;
  374. break;
  375. } else if (start <= entry->start) { /* end > entry->start */
  376. err = chk_conflict(new, entry, new_type);
  377. if (!err) {
  378. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  379. entry->start, entry->end);
  380. where = entry->nd.prev;
  381. }
  382. break;
  383. } else if (start < entry->end) { /* start > entry->start */
  384. err = chk_conflict(new, entry, new_type);
  385. if (!err) {
  386. dprintk("Overlap at 0x%Lx-0x%Lx\n",
  387. entry->start, entry->end);
  388. /*
  389. * Move to right position in the linked
  390. * list to add this new entry
  391. */
  392. list_for_each_entry_continue(entry,
  393. &memtype_list, nd) {
  394. if (start <= entry->start) {
  395. where = entry->nd.prev;
  396. break;
  397. }
  398. }
  399. }
  400. break;
  401. }
  402. }
  403. if (err) {
  404. printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
  405. "track %s, req %s\n",
  406. start, end, cattr_name(new->type), cattr_name(req_type));
  407. kfree(new);
  408. spin_unlock(&memtype_lock);
  409. return err;
  410. }
  411. if (where)
  412. list_add(&new->nd, where);
  413. else
  414. list_add_tail(&new->nd, &memtype_list);
  415. memtype_rb_insert(&memtype_rbroot, new);
  416. spin_unlock(&memtype_lock);
  417. dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
  418. start, end, cattr_name(new->type), cattr_name(req_type),
  419. new_type ? cattr_name(*new_type) : "-");
  420. return err;
  421. }
  422. int free_memtype(u64 start, u64 end)
  423. {
  424. struct memtype *entry, *saved_entry;
  425. int err = -EINVAL;
  426. int is_range_ram;
  427. if (!pat_enabled)
  428. return 0;
  429. /* Low ISA region is always mapped WB. No need to track */
  430. if (is_ISA_range(start, end - 1))
  431. return 0;
  432. is_range_ram = pat_pagerange_is_ram(start, end);
  433. if (is_range_ram == 1) {
  434. spin_lock(&memtype_lock);
  435. err = free_ram_pages_type(start, end);
  436. spin_unlock(&memtype_lock);
  437. return err;
  438. } else if (is_range_ram < 0) {
  439. return -EINVAL;
  440. }
  441. spin_lock(&memtype_lock);
  442. entry = memtype_rb_search(&memtype_rbroot, start);
  443. if (unlikely(entry == NULL))
  444. goto unlock_ret;
  445. /*
  446. * Saved entry points to an entry with start same or less than what
  447. * we searched for. Now go through the list in both directions to look
  448. * for the entry that matches with both start and end, with list stored
  449. * in sorted start address
  450. */
  451. saved_entry = entry;
  452. list_for_each_entry(entry, &memtype_list, nd) {
  453. if (entry->start == start && entry->end == end) {
  454. rb_erase(&entry->rb, &memtype_rbroot);
  455. list_del(&entry->nd);
  456. kfree(entry);
  457. err = 0;
  458. break;
  459. } else if (entry->start > start) {
  460. break;
  461. }
  462. }
  463. if (!err)
  464. goto unlock_ret;
  465. entry = saved_entry;
  466. list_for_each_entry_reverse(entry, &memtype_list, nd) {
  467. if (entry->start == start && entry->end == end) {
  468. rb_erase(&entry->rb, &memtype_rbroot);
  469. list_del(&entry->nd);
  470. kfree(entry);
  471. err = 0;
  472. break;
  473. } else if (entry->start < start) {
  474. break;
  475. }
  476. }
  477. unlock_ret:
  478. spin_unlock(&memtype_lock);
  479. if (err) {
  480. printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
  481. current->comm, current->pid, start, end);
  482. }
  483. dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
  484. return err;
  485. }
  486. /**
  487. * lookup_memtype - Looksup the memory type for a physical address
  488. * @paddr: physical address of which memory type needs to be looked up
  489. *
  490. * Only to be called when PAT is enabled
  491. *
  492. * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
  493. * _PAGE_CACHE_UC
  494. */
  495. static unsigned long lookup_memtype(u64 paddr)
  496. {
  497. int rettype = _PAGE_CACHE_WB;
  498. struct memtype *entry;
  499. if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
  500. return rettype;
  501. if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
  502. struct page *page;
  503. spin_lock(&memtype_lock);
  504. page = pfn_to_page(paddr >> PAGE_SHIFT);
  505. rettype = get_page_memtype(page);
  506. spin_unlock(&memtype_lock);
  507. /*
  508. * -1 from get_page_memtype() implies RAM page is in its
  509. * default state and not reserved, and hence of type WB
  510. */
  511. if (rettype == -1)
  512. rettype = _PAGE_CACHE_WB;
  513. return rettype;
  514. }
  515. spin_lock(&memtype_lock);
  516. entry = memtype_rb_search(&memtype_rbroot, paddr);
  517. if (entry != NULL)
  518. rettype = entry->type;
  519. else
  520. rettype = _PAGE_CACHE_UC_MINUS;
  521. spin_unlock(&memtype_lock);
  522. return rettype;
  523. }
  524. /**
  525. * io_reserve_memtype - Request a memory type mapping for a region of memory
  526. * @start: start (physical address) of the region
  527. * @end: end (physical address) of the region
  528. * @type: A pointer to memtype, with requested type. On success, requested
  529. * or any other compatible type that was available for the region is returned
  530. *
  531. * On success, returns 0
  532. * On failure, returns non-zero
  533. */
  534. int io_reserve_memtype(resource_size_t start, resource_size_t end,
  535. unsigned long *type)
  536. {
  537. resource_size_t size = end - start;
  538. unsigned long req_type = *type;
  539. unsigned long new_type;
  540. int ret;
  541. WARN_ON_ONCE(iomem_map_sanity_check(start, size));
  542. ret = reserve_memtype(start, end, req_type, &new_type);
  543. if (ret)
  544. goto out_err;
  545. if (!is_new_memtype_allowed(start, size, req_type, new_type))
  546. goto out_free;
  547. if (kernel_map_sync_memtype(start, size, new_type) < 0)
  548. goto out_free;
  549. *type = new_type;
  550. return 0;
  551. out_free:
  552. free_memtype(start, end);
  553. ret = -EBUSY;
  554. out_err:
  555. return ret;
  556. }
  557. /**
  558. * io_free_memtype - Release a memory type mapping for a region of memory
  559. * @start: start (physical address) of the region
  560. * @end: end (physical address) of the region
  561. */
  562. void io_free_memtype(resource_size_t start, resource_size_t end)
  563. {
  564. free_memtype(start, end);
  565. }
  566. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  567. unsigned long size, pgprot_t vma_prot)
  568. {
  569. return vma_prot;
  570. }
  571. #ifdef CONFIG_STRICT_DEVMEM
  572. /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
  573. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  574. {
  575. return 1;
  576. }
  577. #else
  578. /* This check is needed to avoid cache aliasing when PAT is enabled */
  579. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  580. {
  581. u64 from = ((u64)pfn) << PAGE_SHIFT;
  582. u64 to = from + size;
  583. u64 cursor = from;
  584. if (!pat_enabled)
  585. return 1;
  586. while (cursor < to) {
  587. if (!devmem_is_allowed(pfn)) {
  588. printk(KERN_INFO
  589. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  590. current->comm, from, to);
  591. return 0;
  592. }
  593. cursor += PAGE_SIZE;
  594. pfn++;
  595. }
  596. return 1;
  597. }
  598. #endif /* CONFIG_STRICT_DEVMEM */
  599. int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  600. unsigned long size, pgprot_t *vma_prot)
  601. {
  602. unsigned long flags = _PAGE_CACHE_WB;
  603. if (!range_is_allowed(pfn, size))
  604. return 0;
  605. if (file->f_flags & O_SYNC) {
  606. flags = _PAGE_CACHE_UC_MINUS;
  607. }
  608. #ifdef CONFIG_X86_32
  609. /*
  610. * On the PPro and successors, the MTRRs are used to set
  611. * memory types for physical addresses outside main memory,
  612. * so blindly setting UC or PWT on those pages is wrong.
  613. * For Pentiums and earlier, the surround logic should disable
  614. * caching for the high addresses through the KEN pin, but
  615. * we maintain the tradition of paranoia in this code.
  616. */
  617. if (!pat_enabled &&
  618. !(boot_cpu_has(X86_FEATURE_MTRR) ||
  619. boot_cpu_has(X86_FEATURE_K6_MTRR) ||
  620. boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
  621. boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
  622. (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
  623. flags = _PAGE_CACHE_UC;
  624. }
  625. #endif
  626. *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
  627. flags);
  628. return 1;
  629. }
  630. /*
  631. * Change the memory type for the physial address range in kernel identity
  632. * mapping space if that range is a part of identity map.
  633. */
  634. int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
  635. {
  636. unsigned long id_sz;
  637. if (base >= __pa(high_memory))
  638. return 0;
  639. id_sz = (__pa(high_memory) < base + size) ?
  640. __pa(high_memory) - base :
  641. size;
  642. if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
  643. printk(KERN_INFO
  644. "%s:%d ioremap_change_attr failed %s "
  645. "for %Lx-%Lx\n",
  646. current->comm, current->pid,
  647. cattr_name(flags),
  648. base, (unsigned long long)(base + size));
  649. return -EINVAL;
  650. }
  651. return 0;
  652. }
  653. /*
  654. * Internal interface to reserve a range of physical memory with prot.
  655. * Reserved non RAM regions only and after successful reserve_memtype,
  656. * this func also keeps identity mapping (if any) in sync with this new prot.
  657. */
  658. static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
  659. int strict_prot)
  660. {
  661. int is_ram = 0;
  662. int ret;
  663. unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
  664. unsigned long flags = want_flags;
  665. is_ram = pat_pagerange_is_ram(paddr, paddr + size);
  666. /*
  667. * reserve_pfn_range() for RAM pages. We do not refcount to keep
  668. * track of number of mappings of RAM pages. We can assert that
  669. * the type requested matches the type of first page in the range.
  670. */
  671. if (is_ram) {
  672. if (!pat_enabled)
  673. return 0;
  674. flags = lookup_memtype(paddr);
  675. if (want_flags != flags) {
  676. printk(KERN_WARNING
  677. "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
  678. current->comm, current->pid,
  679. cattr_name(want_flags),
  680. (unsigned long long)paddr,
  681. (unsigned long long)(paddr + size),
  682. cattr_name(flags));
  683. *vma_prot = __pgprot((pgprot_val(*vma_prot) &
  684. (~_PAGE_CACHE_MASK)) |
  685. flags);
  686. }
  687. return 0;
  688. }
  689. ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
  690. if (ret)
  691. return ret;
  692. if (flags != want_flags) {
  693. if (strict_prot ||
  694. !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
  695. free_memtype(paddr, paddr + size);
  696. printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
  697. " for %Lx-%Lx, got %s\n",
  698. current->comm, current->pid,
  699. cattr_name(want_flags),
  700. (unsigned long long)paddr,
  701. (unsigned long long)(paddr + size),
  702. cattr_name(flags));
  703. return -EINVAL;
  704. }
  705. /*
  706. * We allow returning different type than the one requested in
  707. * non strict case.
  708. */
  709. *vma_prot = __pgprot((pgprot_val(*vma_prot) &
  710. (~_PAGE_CACHE_MASK)) |
  711. flags);
  712. }
  713. if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
  714. free_memtype(paddr, paddr + size);
  715. return -EINVAL;
  716. }
  717. return 0;
  718. }
  719. /*
  720. * Internal interface to free a range of physical memory.
  721. * Frees non RAM regions only.
  722. */
  723. static void free_pfn_range(u64 paddr, unsigned long size)
  724. {
  725. int is_ram;
  726. is_ram = pat_pagerange_is_ram(paddr, paddr + size);
  727. if (is_ram == 0)
  728. free_memtype(paddr, paddr + size);
  729. }
  730. /*
  731. * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
  732. * copied through copy_page_range().
  733. *
  734. * If the vma has a linear pfn mapping for the entire range, we get the prot
  735. * from pte and reserve the entire vma range with single reserve_pfn_range call.
  736. */
  737. int track_pfn_vma_copy(struct vm_area_struct *vma)
  738. {
  739. resource_size_t paddr;
  740. unsigned long prot;
  741. unsigned long vma_size = vma->vm_end - vma->vm_start;
  742. pgprot_t pgprot;
  743. if (is_linear_pfn_mapping(vma)) {
  744. /*
  745. * reserve the whole chunk covered by vma. We need the
  746. * starting address and protection from pte.
  747. */
  748. if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
  749. WARN_ON_ONCE(1);
  750. return -EINVAL;
  751. }
  752. pgprot = __pgprot(prot);
  753. return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
  754. }
  755. return 0;
  756. }
  757. /*
  758. * track_pfn_vma_new is called when a _new_ pfn mapping is being established
  759. * for physical range indicated by pfn and size.
  760. *
  761. * prot is passed in as a parameter for the new mapping. If the vma has a
  762. * linear pfn mapping for the entire range reserve the entire vma range with
  763. * single reserve_pfn_range call.
  764. */
  765. int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
  766. unsigned long pfn, unsigned long size)
  767. {
  768. unsigned long flags;
  769. resource_size_t paddr;
  770. unsigned long vma_size = vma->vm_end - vma->vm_start;
  771. if (is_linear_pfn_mapping(vma)) {
  772. /* reserve the whole chunk starting from vm_pgoff */
  773. paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
  774. return reserve_pfn_range(paddr, vma_size, prot, 0);
  775. }
  776. if (!pat_enabled)
  777. return 0;
  778. /* for vm_insert_pfn and friends, we set prot based on lookup */
  779. flags = lookup_memtype(pfn << PAGE_SHIFT);
  780. *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
  781. flags);
  782. return 0;
  783. }
  784. /*
  785. * untrack_pfn_vma is called while unmapping a pfnmap for a region.
  786. * untrack can be called for a specific region indicated by pfn and size or
  787. * can be for the entire vma (in which case size can be zero).
  788. */
  789. void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
  790. unsigned long size)
  791. {
  792. resource_size_t paddr;
  793. unsigned long vma_size = vma->vm_end - vma->vm_start;
  794. if (is_linear_pfn_mapping(vma)) {
  795. /* free the whole chunk starting from vm_pgoff */
  796. paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
  797. free_pfn_range(paddr, vma_size);
  798. return;
  799. }
  800. }
  801. pgprot_t pgprot_writecombine(pgprot_t prot)
  802. {
  803. if (pat_enabled)
  804. return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
  805. else
  806. return pgprot_noncached(prot);
  807. }
  808. EXPORT_SYMBOL_GPL(pgprot_writecombine);
  809. #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
  810. /* get Nth element of the linked list */
  811. static struct memtype *memtype_get_idx(loff_t pos)
  812. {
  813. struct memtype *list_node, *print_entry;
  814. int i = 1;
  815. print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
  816. if (!print_entry)
  817. return NULL;
  818. spin_lock(&memtype_lock);
  819. list_for_each_entry(list_node, &memtype_list, nd) {
  820. if (pos == i) {
  821. *print_entry = *list_node;
  822. spin_unlock(&memtype_lock);
  823. return print_entry;
  824. }
  825. ++i;
  826. }
  827. spin_unlock(&memtype_lock);
  828. kfree(print_entry);
  829. return NULL;
  830. }
  831. static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
  832. {
  833. if (*pos == 0) {
  834. ++*pos;
  835. seq_printf(seq, "PAT memtype list:\n");
  836. }
  837. return memtype_get_idx(*pos);
  838. }
  839. static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  840. {
  841. ++*pos;
  842. return memtype_get_idx(*pos);
  843. }
  844. static void memtype_seq_stop(struct seq_file *seq, void *v)
  845. {
  846. }
  847. static int memtype_seq_show(struct seq_file *seq, void *v)
  848. {
  849. struct memtype *print_entry = (struct memtype *)v;
  850. seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
  851. print_entry->start, print_entry->end);
  852. kfree(print_entry);
  853. return 0;
  854. }
  855. static const struct seq_operations memtype_seq_ops = {
  856. .start = memtype_seq_start,
  857. .next = memtype_seq_next,
  858. .stop = memtype_seq_stop,
  859. .show = memtype_seq_show,
  860. };
  861. static int memtype_seq_open(struct inode *inode, struct file *file)
  862. {
  863. return seq_open(file, &memtype_seq_ops);
  864. }
  865. static const struct file_operations memtype_fops = {
  866. .open = memtype_seq_open,
  867. .read = seq_read,
  868. .llseek = seq_lseek,
  869. .release = seq_release,
  870. };
  871. static int __init pat_memtype_list_init(void)
  872. {
  873. debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
  874. NULL, &memtype_fops);
  875. return 0;
  876. }
  877. late_initcall(pat_memtype_list_init);
  878. #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */