p2m.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /*
  2. * Xen leaves the responsibility for maintaining p2m mappings to the
  3. * guests themselves, but it must also access and update the p2m array
  4. * during suspend/resume when all the pages are reallocated.
  5. *
  6. * The p2m table is logically a flat array, but we implement it as a
  7. * three-level tree to allow the address space to be sparse.
  8. *
  9. * Xen
  10. * |
  11. * p2m_top p2m_top_mfn
  12. * / \ / \
  13. * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
  14. * / \ / \ / /
  15. * p2m p2m p2m p2m p2m p2m p2m ...
  16. *
  17. * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
  18. *
  19. * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
  20. * maximum representable pseudo-physical address space is:
  21. * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
  22. *
  23. * P2M_PER_PAGE depends on the architecture, as a mfn is always
  24. * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
  25. * 512 and 1024 entries respectively.
  26. */
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #include <linux/list.h>
  30. #include <linux/hash.h>
  31. #include <linux/sched.h>
  32. #include <asm/cache.h>
  33. #include <asm/setup.h>
  34. #include <asm/xen/page.h>
  35. #include <asm/xen/hypercall.h>
  36. #include <asm/xen/hypervisor.h>
  37. #include "xen-ops.h"
  38. static void __init m2p_override_init(void);
  39. unsigned long xen_max_p2m_pfn __read_mostly;
  40. #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
  41. #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
  42. #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
  43. #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
  44. /* Placeholders for holes in the address space */
  45. static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
  46. static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
  47. static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
  48. static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
  49. static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
  50. static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
  51. RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
  52. RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
  53. static inline unsigned p2m_top_index(unsigned long pfn)
  54. {
  55. BUG_ON(pfn >= MAX_P2M_PFN);
  56. return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
  57. }
  58. static inline unsigned p2m_mid_index(unsigned long pfn)
  59. {
  60. return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
  61. }
  62. static inline unsigned p2m_index(unsigned long pfn)
  63. {
  64. return pfn % P2M_PER_PAGE;
  65. }
  66. static void p2m_top_init(unsigned long ***top)
  67. {
  68. unsigned i;
  69. for (i = 0; i < P2M_TOP_PER_PAGE; i++)
  70. top[i] = p2m_mid_missing;
  71. }
  72. static void p2m_top_mfn_init(unsigned long *top)
  73. {
  74. unsigned i;
  75. for (i = 0; i < P2M_TOP_PER_PAGE; i++)
  76. top[i] = virt_to_mfn(p2m_mid_missing_mfn);
  77. }
  78. static void p2m_top_mfn_p_init(unsigned long **top)
  79. {
  80. unsigned i;
  81. for (i = 0; i < P2M_TOP_PER_PAGE; i++)
  82. top[i] = p2m_mid_missing_mfn;
  83. }
  84. static void p2m_mid_init(unsigned long **mid)
  85. {
  86. unsigned i;
  87. for (i = 0; i < P2M_MID_PER_PAGE; i++)
  88. mid[i] = p2m_missing;
  89. }
  90. static void p2m_mid_mfn_init(unsigned long *mid)
  91. {
  92. unsigned i;
  93. for (i = 0; i < P2M_MID_PER_PAGE; i++)
  94. mid[i] = virt_to_mfn(p2m_missing);
  95. }
  96. static void p2m_init(unsigned long *p2m)
  97. {
  98. unsigned i;
  99. for (i = 0; i < P2M_MID_PER_PAGE; i++)
  100. p2m[i] = INVALID_P2M_ENTRY;
  101. }
  102. /*
  103. * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
  104. *
  105. * This is called both at boot time, and after resuming from suspend:
  106. * - At boot time we're called very early, and must use extend_brk()
  107. * to allocate memory.
  108. *
  109. * - After resume we're called from within stop_machine, but the mfn
  110. * tree should alreay be completely allocated.
  111. */
  112. void xen_build_mfn_list_list(void)
  113. {
  114. unsigned long pfn;
  115. /* Pre-initialize p2m_top_mfn to be completely missing */
  116. if (p2m_top_mfn == NULL) {
  117. p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
  118. p2m_mid_mfn_init(p2m_mid_missing_mfn);
  119. p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
  120. p2m_top_mfn_p_init(p2m_top_mfn_p);
  121. p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
  122. p2m_top_mfn_init(p2m_top_mfn);
  123. } else {
  124. /* Reinitialise, mfn's all change after migration */
  125. p2m_mid_mfn_init(p2m_mid_missing_mfn);
  126. }
  127. for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
  128. unsigned topidx = p2m_top_index(pfn);
  129. unsigned mididx = p2m_mid_index(pfn);
  130. unsigned long **mid;
  131. unsigned long *mid_mfn_p;
  132. mid = p2m_top[topidx];
  133. mid_mfn_p = p2m_top_mfn_p[topidx];
  134. /* Don't bother allocating any mfn mid levels if
  135. * they're just missing, just update the stored mfn,
  136. * since all could have changed over a migrate.
  137. */
  138. if (mid == p2m_mid_missing) {
  139. BUG_ON(mididx);
  140. BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
  141. p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
  142. pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
  143. continue;
  144. }
  145. if (mid_mfn_p == p2m_mid_missing_mfn) {
  146. /*
  147. * XXX boot-time only! We should never find
  148. * missing parts of the mfn tree after
  149. * runtime. extend_brk() will BUG if we call
  150. * it too late.
  151. */
  152. mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
  153. p2m_mid_mfn_init(mid_mfn_p);
  154. p2m_top_mfn_p[topidx] = mid_mfn_p;
  155. }
  156. p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
  157. mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
  158. }
  159. }
  160. void xen_setup_mfn_list_list(void)
  161. {
  162. BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
  163. HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
  164. virt_to_mfn(p2m_top_mfn);
  165. HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
  166. }
  167. /* Set up p2m_top to point to the domain-builder provided p2m pages */
  168. void __init xen_build_dynamic_phys_to_machine(void)
  169. {
  170. unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
  171. unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
  172. unsigned long pfn;
  173. xen_max_p2m_pfn = max_pfn;
  174. p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
  175. p2m_init(p2m_missing);
  176. p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
  177. p2m_mid_init(p2m_mid_missing);
  178. p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
  179. p2m_top_init(p2m_top);
  180. /*
  181. * The domain builder gives us a pre-constructed p2m array in
  182. * mfn_list for all the pages initially given to us, so we just
  183. * need to graft that into our tree structure.
  184. */
  185. for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
  186. unsigned topidx = p2m_top_index(pfn);
  187. unsigned mididx = p2m_mid_index(pfn);
  188. if (p2m_top[topidx] == p2m_mid_missing) {
  189. unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
  190. p2m_mid_init(mid);
  191. p2m_top[topidx] = mid;
  192. }
  193. /*
  194. * As long as the mfn_list has enough entries to completely
  195. * fill a p2m page, pointing into the array is ok. But if
  196. * not the entries beyond the last pfn will be undefined.
  197. */
  198. if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
  199. unsigned long p2midx;
  200. p2midx = max_pfn % P2M_PER_PAGE;
  201. for ( ; p2midx < P2M_PER_PAGE; p2midx++)
  202. mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
  203. }
  204. p2m_top[topidx][mididx] = &mfn_list[pfn];
  205. }
  206. m2p_override_init();
  207. }
  208. unsigned long get_phys_to_machine(unsigned long pfn)
  209. {
  210. unsigned topidx, mididx, idx;
  211. if (unlikely(pfn >= MAX_P2M_PFN))
  212. return INVALID_P2M_ENTRY;
  213. topidx = p2m_top_index(pfn);
  214. mididx = p2m_mid_index(pfn);
  215. idx = p2m_index(pfn);
  216. return p2m_top[topidx][mididx][idx];
  217. }
  218. EXPORT_SYMBOL_GPL(get_phys_to_machine);
  219. static void *alloc_p2m_page(void)
  220. {
  221. return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
  222. }
  223. static void free_p2m_page(void *p)
  224. {
  225. free_page((unsigned long)p);
  226. }
  227. /*
  228. * Fully allocate the p2m structure for a given pfn. We need to check
  229. * that both the top and mid levels are allocated, and make sure the
  230. * parallel mfn tree is kept in sync. We may race with other cpus, so
  231. * the new pages are installed with cmpxchg; if we lose the race then
  232. * simply free the page we allocated and use the one that's there.
  233. */
  234. static bool alloc_p2m(unsigned long pfn)
  235. {
  236. unsigned topidx, mididx;
  237. unsigned long ***top_p, **mid;
  238. unsigned long *top_mfn_p, *mid_mfn;
  239. topidx = p2m_top_index(pfn);
  240. mididx = p2m_mid_index(pfn);
  241. top_p = &p2m_top[topidx];
  242. mid = *top_p;
  243. if (mid == p2m_mid_missing) {
  244. /* Mid level is missing, allocate a new one */
  245. mid = alloc_p2m_page();
  246. if (!mid)
  247. return false;
  248. p2m_mid_init(mid);
  249. if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
  250. free_p2m_page(mid);
  251. }
  252. top_mfn_p = &p2m_top_mfn[topidx];
  253. mid_mfn = p2m_top_mfn_p[topidx];
  254. BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
  255. if (mid_mfn == p2m_mid_missing_mfn) {
  256. /* Separately check the mid mfn level */
  257. unsigned long missing_mfn;
  258. unsigned long mid_mfn_mfn;
  259. mid_mfn = alloc_p2m_page();
  260. if (!mid_mfn)
  261. return false;
  262. p2m_mid_mfn_init(mid_mfn);
  263. missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
  264. mid_mfn_mfn = virt_to_mfn(mid_mfn);
  265. if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
  266. free_p2m_page(mid_mfn);
  267. else
  268. p2m_top_mfn_p[topidx] = mid_mfn;
  269. }
  270. if (p2m_top[topidx][mididx] == p2m_missing) {
  271. /* p2m leaf page is missing */
  272. unsigned long *p2m;
  273. p2m = alloc_p2m_page();
  274. if (!p2m)
  275. return false;
  276. p2m_init(p2m);
  277. if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
  278. free_p2m_page(p2m);
  279. else
  280. mid_mfn[mididx] = virt_to_mfn(p2m);
  281. }
  282. return true;
  283. }
  284. /* Try to install p2m mapping; fail if intermediate bits missing */
  285. bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
  286. {
  287. unsigned topidx, mididx, idx;
  288. if (unlikely(pfn >= MAX_P2M_PFN)) {
  289. BUG_ON(mfn != INVALID_P2M_ENTRY);
  290. return true;
  291. }
  292. topidx = p2m_top_index(pfn);
  293. mididx = p2m_mid_index(pfn);
  294. idx = p2m_index(pfn);
  295. if (p2m_top[topidx][mididx] == p2m_missing)
  296. return mfn == INVALID_P2M_ENTRY;
  297. p2m_top[topidx][mididx][idx] = mfn;
  298. return true;
  299. }
  300. bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
  301. {
  302. if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
  303. BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
  304. return true;
  305. }
  306. if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
  307. if (!alloc_p2m(pfn))
  308. return false;
  309. if (!__set_phys_to_machine(pfn, mfn))
  310. return false;
  311. }
  312. return true;
  313. }
  314. #define M2P_OVERRIDE_HASH_SHIFT 10
  315. #define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
  316. static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
  317. static DEFINE_SPINLOCK(m2p_override_lock);
  318. static void __init m2p_override_init(void)
  319. {
  320. unsigned i;
  321. m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
  322. sizeof(unsigned long));
  323. for (i = 0; i < M2P_OVERRIDE_HASH; i++)
  324. INIT_LIST_HEAD(&m2p_overrides[i]);
  325. }
  326. static unsigned long mfn_hash(unsigned long mfn)
  327. {
  328. return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
  329. }
  330. /* Add an MFN override for a particular page */
  331. int m2p_add_override(unsigned long mfn, struct page *page)
  332. {
  333. unsigned long flags;
  334. unsigned long pfn;
  335. unsigned long address;
  336. unsigned level;
  337. pte_t *ptep = NULL;
  338. pfn = page_to_pfn(page);
  339. if (!PageHighMem(page)) {
  340. address = (unsigned long)__va(pfn << PAGE_SHIFT);
  341. ptep = lookup_address(address, &level);
  342. if (WARN(ptep == NULL || level != PG_LEVEL_4K,
  343. "m2p_add_override: pfn %lx not mapped", pfn))
  344. return -EINVAL;
  345. }
  346. page->private = mfn;
  347. page->index = pfn_to_mfn(pfn);
  348. __set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
  349. if (!PageHighMem(page))
  350. /* Just zap old mapping for now */
  351. pte_clear(&init_mm, address, ptep);
  352. spin_lock_irqsave(&m2p_override_lock, flags);
  353. list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
  354. spin_unlock_irqrestore(&m2p_override_lock, flags);
  355. return 0;
  356. }
  357. int m2p_remove_override(struct page *page)
  358. {
  359. unsigned long flags;
  360. unsigned long mfn;
  361. unsigned long pfn;
  362. unsigned long address;
  363. unsigned level;
  364. pte_t *ptep = NULL;
  365. pfn = page_to_pfn(page);
  366. mfn = get_phys_to_machine(pfn);
  367. if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
  368. return -EINVAL;
  369. if (!PageHighMem(page)) {
  370. address = (unsigned long)__va(pfn << PAGE_SHIFT);
  371. ptep = lookup_address(address, &level);
  372. if (WARN(ptep == NULL || level != PG_LEVEL_4K,
  373. "m2p_remove_override: pfn %lx not mapped", pfn))
  374. return -EINVAL;
  375. }
  376. spin_lock_irqsave(&m2p_override_lock, flags);
  377. list_del(&page->lru);
  378. spin_unlock_irqrestore(&m2p_override_lock, flags);
  379. __set_phys_to_machine(pfn, page->index);
  380. if (!PageHighMem(page))
  381. set_pte_at(&init_mm, address, ptep,
  382. pfn_pte(pfn, PAGE_KERNEL));
  383. /* No tlb flush necessary because the caller already
  384. * left the pte unmapped. */
  385. return 0;
  386. }
  387. struct page *m2p_find_override(unsigned long mfn)
  388. {
  389. unsigned long flags;
  390. struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
  391. struct page *p, *ret;
  392. ret = NULL;
  393. spin_lock_irqsave(&m2p_override_lock, flags);
  394. list_for_each_entry(p, bucket, lru) {
  395. if (p->private == mfn) {
  396. ret = p;
  397. break;
  398. }
  399. }
  400. spin_unlock_irqrestore(&m2p_override_lock, flags);
  401. return ret;
  402. }
  403. unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
  404. {
  405. struct page *p = m2p_find_override(mfn);
  406. unsigned long ret = pfn;
  407. if (p)
  408. ret = page_to_pfn(p);
  409. return ret;
  410. }
  411. EXPORT_SYMBOL_GPL(m2p_find_override_pfn);