pgtable.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. /*
  2. * Copyright IBM Corp. 2007,2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/system.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long *crst_table_alloc(struct mm_struct *mm)
  33. {
  34. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  35. if (!page)
  36. return NULL;
  37. return (unsigned long *) page_to_phys(page);
  38. }
  39. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  40. {
  41. free_pages((unsigned long) table, ALLOC_ORDER);
  42. }
  43. #ifdef CONFIG_64BIT
  44. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  45. {
  46. unsigned long *table, *pgd;
  47. unsigned long entry;
  48. BUG_ON(limit > (1UL << 53));
  49. repeat:
  50. table = crst_table_alloc(mm);
  51. if (!table)
  52. return -ENOMEM;
  53. spin_lock_bh(&mm->page_table_lock);
  54. if (mm->context.asce_limit < limit) {
  55. pgd = (unsigned long *) mm->pgd;
  56. if (mm->context.asce_limit <= (1UL << 31)) {
  57. entry = _REGION3_ENTRY_EMPTY;
  58. mm->context.asce_limit = 1UL << 42;
  59. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  60. _ASCE_USER_BITS |
  61. _ASCE_TYPE_REGION3;
  62. } else {
  63. entry = _REGION2_ENTRY_EMPTY;
  64. mm->context.asce_limit = 1UL << 53;
  65. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  66. _ASCE_USER_BITS |
  67. _ASCE_TYPE_REGION2;
  68. }
  69. crst_table_init(table, entry);
  70. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  71. mm->pgd = (pgd_t *) table;
  72. mm->task_size = mm->context.asce_limit;
  73. table = NULL;
  74. }
  75. spin_unlock_bh(&mm->page_table_lock);
  76. if (table)
  77. crst_table_free(mm, table);
  78. if (mm->context.asce_limit < limit)
  79. goto repeat;
  80. update_mm(mm, current);
  81. return 0;
  82. }
  83. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  84. {
  85. pgd_t *pgd;
  86. if (mm->context.asce_limit <= limit)
  87. return;
  88. __tlb_flush_mm(mm);
  89. while (mm->context.asce_limit > limit) {
  90. pgd = mm->pgd;
  91. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  92. case _REGION_ENTRY_TYPE_R2:
  93. mm->context.asce_limit = 1UL << 42;
  94. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  95. _ASCE_USER_BITS |
  96. _ASCE_TYPE_REGION3;
  97. break;
  98. case _REGION_ENTRY_TYPE_R3:
  99. mm->context.asce_limit = 1UL << 31;
  100. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  101. _ASCE_USER_BITS |
  102. _ASCE_TYPE_SEGMENT;
  103. break;
  104. default:
  105. BUG();
  106. }
  107. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  108. mm->task_size = mm->context.asce_limit;
  109. crst_table_free(mm, (unsigned long *) pgd);
  110. }
  111. update_mm(mm, current);
  112. }
  113. #endif
  114. #ifdef CONFIG_PGSTE
  115. /**
  116. * gmap_alloc - allocate a guest address space
  117. * @mm: pointer to the parent mm_struct
  118. *
  119. * Returns a guest address space structure.
  120. */
  121. struct gmap *gmap_alloc(struct mm_struct *mm)
  122. {
  123. struct gmap *gmap;
  124. struct page *page;
  125. unsigned long *table;
  126. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  127. if (!gmap)
  128. goto out;
  129. INIT_LIST_HEAD(&gmap->crst_list);
  130. gmap->mm = mm;
  131. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  132. if (!page)
  133. goto out_free;
  134. list_add(&page->lru, &gmap->crst_list);
  135. table = (unsigned long *) page_to_phys(page);
  136. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  137. gmap->table = table;
  138. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  139. _ASCE_USER_BITS | __pa(table);
  140. list_add(&gmap->list, &mm->context.gmap_list);
  141. return gmap;
  142. out_free:
  143. kfree(gmap);
  144. out:
  145. return NULL;
  146. }
  147. EXPORT_SYMBOL_GPL(gmap_alloc);
  148. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  149. {
  150. struct gmap_pgtable *mp;
  151. struct gmap_rmap *rmap;
  152. struct page *page;
  153. if (*table & _SEGMENT_ENTRY_INV)
  154. return 0;
  155. page = pfn_to_page(*table >> PAGE_SHIFT);
  156. mp = (struct gmap_pgtable *) page->index;
  157. list_for_each_entry(rmap, &mp->mapper, list) {
  158. if (rmap->entry != table)
  159. continue;
  160. list_del(&rmap->list);
  161. kfree(rmap);
  162. break;
  163. }
  164. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  165. return 1;
  166. }
  167. static void gmap_flush_tlb(struct gmap *gmap)
  168. {
  169. if (MACHINE_HAS_IDTE)
  170. __tlb_flush_idte((unsigned long) gmap->table |
  171. _ASCE_TYPE_REGION1);
  172. else
  173. __tlb_flush_global();
  174. }
  175. /**
  176. * gmap_free - free a guest address space
  177. * @gmap: pointer to the guest address space structure
  178. */
  179. void gmap_free(struct gmap *gmap)
  180. {
  181. struct page *page, *next;
  182. unsigned long *table;
  183. int i;
  184. /* Flush tlb. */
  185. if (MACHINE_HAS_IDTE)
  186. __tlb_flush_idte((unsigned long) gmap->table |
  187. _ASCE_TYPE_REGION1);
  188. else
  189. __tlb_flush_global();
  190. /* Free all segment & region tables. */
  191. down_read(&gmap->mm->mmap_sem);
  192. spin_lock(&gmap->mm->page_table_lock);
  193. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  194. table = (unsigned long *) page_to_phys(page);
  195. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  196. /* Remove gmap rmap structures for segment table. */
  197. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  198. gmap_unlink_segment(gmap, table);
  199. __free_pages(page, ALLOC_ORDER);
  200. }
  201. spin_unlock(&gmap->mm->page_table_lock);
  202. up_read(&gmap->mm->mmap_sem);
  203. list_del(&gmap->list);
  204. kfree(gmap);
  205. }
  206. EXPORT_SYMBOL_GPL(gmap_free);
  207. /**
  208. * gmap_enable - switch primary space to the guest address space
  209. * @gmap: pointer to the guest address space structure
  210. */
  211. void gmap_enable(struct gmap *gmap)
  212. {
  213. S390_lowcore.gmap = (unsigned long) gmap;
  214. }
  215. EXPORT_SYMBOL_GPL(gmap_enable);
  216. /**
  217. * gmap_disable - switch back to the standard primary address space
  218. * @gmap: pointer to the guest address space structure
  219. */
  220. void gmap_disable(struct gmap *gmap)
  221. {
  222. S390_lowcore.gmap = 0UL;
  223. }
  224. EXPORT_SYMBOL_GPL(gmap_disable);
  225. /*
  226. * gmap_alloc_table is assumed to be called with mmap_sem held
  227. */
  228. static int gmap_alloc_table(struct gmap *gmap,
  229. unsigned long *table, unsigned long init)
  230. {
  231. struct page *page;
  232. unsigned long *new;
  233. /* since we dont free the gmap table until gmap_free we can unlock */
  234. spin_unlock(&gmap->mm->page_table_lock);
  235. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  236. spin_lock(&gmap->mm->page_table_lock);
  237. if (!page)
  238. return -ENOMEM;
  239. new = (unsigned long *) page_to_phys(page);
  240. crst_table_init(new, init);
  241. if (*table & _REGION_ENTRY_INV) {
  242. list_add(&page->lru, &gmap->crst_list);
  243. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  244. (*table & _REGION_ENTRY_TYPE_MASK);
  245. } else
  246. __free_pages(page, ALLOC_ORDER);
  247. return 0;
  248. }
  249. /**
  250. * gmap_unmap_segment - unmap segment from the guest address space
  251. * @gmap: pointer to the guest address space structure
  252. * @addr: address in the guest address space
  253. * @len: length of the memory area to unmap
  254. *
  255. * Returns 0 if the unmap succeded, -EINVAL if not.
  256. */
  257. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  258. {
  259. unsigned long *table;
  260. unsigned long off;
  261. int flush;
  262. if ((to | len) & (PMD_SIZE - 1))
  263. return -EINVAL;
  264. if (len == 0 || to + len < to)
  265. return -EINVAL;
  266. flush = 0;
  267. down_read(&gmap->mm->mmap_sem);
  268. spin_lock(&gmap->mm->page_table_lock);
  269. for (off = 0; off < len; off += PMD_SIZE) {
  270. /* Walk the guest addr space page table */
  271. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  272. if (*table & _REGION_ENTRY_INV)
  273. goto out;
  274. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  275. table = table + (((to + off) >> 42) & 0x7ff);
  276. if (*table & _REGION_ENTRY_INV)
  277. goto out;
  278. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  279. table = table + (((to + off) >> 31) & 0x7ff);
  280. if (*table & _REGION_ENTRY_INV)
  281. goto out;
  282. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  283. table = table + (((to + off) >> 20) & 0x7ff);
  284. /* Clear segment table entry in guest address space. */
  285. flush |= gmap_unlink_segment(gmap, table);
  286. *table = _SEGMENT_ENTRY_INV;
  287. }
  288. out:
  289. spin_unlock(&gmap->mm->page_table_lock);
  290. up_read(&gmap->mm->mmap_sem);
  291. if (flush)
  292. gmap_flush_tlb(gmap);
  293. return 0;
  294. }
  295. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  296. /**
  297. * gmap_mmap_segment - map a segment to the guest address space
  298. * @gmap: pointer to the guest address space structure
  299. * @from: source address in the parent address space
  300. * @to: target address in the guest address space
  301. *
  302. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  303. */
  304. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  305. unsigned long to, unsigned long len)
  306. {
  307. unsigned long *table;
  308. unsigned long off;
  309. int flush;
  310. if ((from | to | len) & (PMD_SIZE - 1))
  311. return -EINVAL;
  312. if (len == 0 || from + len > PGDIR_SIZE ||
  313. from + len < from || to + len < to)
  314. return -EINVAL;
  315. flush = 0;
  316. down_read(&gmap->mm->mmap_sem);
  317. spin_lock(&gmap->mm->page_table_lock);
  318. for (off = 0; off < len; off += PMD_SIZE) {
  319. /* Walk the gmap address space page table */
  320. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  321. if ((*table & _REGION_ENTRY_INV) &&
  322. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  323. goto out_unmap;
  324. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  325. table = table + (((to + off) >> 42) & 0x7ff);
  326. if ((*table & _REGION_ENTRY_INV) &&
  327. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  328. goto out_unmap;
  329. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  330. table = table + (((to + off) >> 31) & 0x7ff);
  331. if ((*table & _REGION_ENTRY_INV) &&
  332. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  333. goto out_unmap;
  334. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  335. table = table + (((to + off) >> 20) & 0x7ff);
  336. /* Store 'from' address in an invalid segment table entry. */
  337. flush |= gmap_unlink_segment(gmap, table);
  338. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  339. }
  340. spin_unlock(&gmap->mm->page_table_lock);
  341. up_read(&gmap->mm->mmap_sem);
  342. if (flush)
  343. gmap_flush_tlb(gmap);
  344. return 0;
  345. out_unmap:
  346. spin_unlock(&gmap->mm->page_table_lock);
  347. up_read(&gmap->mm->mmap_sem);
  348. gmap_unmap_segment(gmap, to, len);
  349. return -ENOMEM;
  350. }
  351. EXPORT_SYMBOL_GPL(gmap_map_segment);
  352. /*
  353. * this function is assumed to be called with mmap_sem held
  354. */
  355. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  356. {
  357. unsigned long *table, vmaddr, segment;
  358. struct mm_struct *mm;
  359. struct gmap_pgtable *mp;
  360. struct gmap_rmap *rmap;
  361. struct vm_area_struct *vma;
  362. struct page *page;
  363. pgd_t *pgd;
  364. pud_t *pud;
  365. pmd_t *pmd;
  366. current->thread.gmap_addr = address;
  367. mm = gmap->mm;
  368. /* Walk the gmap address space page table */
  369. table = gmap->table + ((address >> 53) & 0x7ff);
  370. if (unlikely(*table & _REGION_ENTRY_INV))
  371. return -EFAULT;
  372. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  373. table = table + ((address >> 42) & 0x7ff);
  374. if (unlikely(*table & _REGION_ENTRY_INV))
  375. return -EFAULT;
  376. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  377. table = table + ((address >> 31) & 0x7ff);
  378. if (unlikely(*table & _REGION_ENTRY_INV))
  379. return -EFAULT;
  380. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  381. table = table + ((address >> 20) & 0x7ff);
  382. /* Convert the gmap address to an mm address. */
  383. segment = *table;
  384. if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
  385. page = pfn_to_page(segment >> PAGE_SHIFT);
  386. mp = (struct gmap_pgtable *) page->index;
  387. return mp->vmaddr | (address & ~PMD_MASK);
  388. } else if (segment & _SEGMENT_ENTRY_RO) {
  389. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  390. vma = find_vma(mm, vmaddr);
  391. if (!vma || vma->vm_start > vmaddr)
  392. return -EFAULT;
  393. /* Walk the parent mm page table */
  394. pgd = pgd_offset(mm, vmaddr);
  395. pud = pud_alloc(mm, pgd, vmaddr);
  396. if (!pud)
  397. return -ENOMEM;
  398. pmd = pmd_alloc(mm, pud, vmaddr);
  399. if (!pmd)
  400. return -ENOMEM;
  401. if (!pmd_present(*pmd) &&
  402. __pte_alloc(mm, vma, pmd, vmaddr))
  403. return -ENOMEM;
  404. /* pmd now points to a valid segment table entry. */
  405. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  406. if (!rmap)
  407. return -ENOMEM;
  408. /* Link gmap segment table entry location to page table. */
  409. page = pmd_page(*pmd);
  410. mp = (struct gmap_pgtable *) page->index;
  411. rmap->entry = table;
  412. spin_lock(&mm->page_table_lock);
  413. list_add(&rmap->list, &mp->mapper);
  414. spin_unlock(&mm->page_table_lock);
  415. /* Set gmap segment table entry to page table. */
  416. *table = pmd_val(*pmd) & PAGE_MASK;
  417. return vmaddr | (address & ~PMD_MASK);
  418. }
  419. return -EFAULT;
  420. }
  421. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  422. {
  423. unsigned long rc;
  424. down_read(&gmap->mm->mmap_sem);
  425. rc = __gmap_fault(address, gmap);
  426. up_read(&gmap->mm->mmap_sem);
  427. return rc;
  428. }
  429. EXPORT_SYMBOL_GPL(gmap_fault);
  430. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  431. {
  432. unsigned long *table, address, size;
  433. struct vm_area_struct *vma;
  434. struct gmap_pgtable *mp;
  435. struct page *page;
  436. down_read(&gmap->mm->mmap_sem);
  437. address = from;
  438. while (address < to) {
  439. /* Walk the gmap address space page table */
  440. table = gmap->table + ((address >> 53) & 0x7ff);
  441. if (unlikely(*table & _REGION_ENTRY_INV)) {
  442. address = (address + PMD_SIZE) & PMD_MASK;
  443. continue;
  444. }
  445. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  446. table = table + ((address >> 42) & 0x7ff);
  447. if (unlikely(*table & _REGION_ENTRY_INV)) {
  448. address = (address + PMD_SIZE) & PMD_MASK;
  449. continue;
  450. }
  451. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  452. table = table + ((address >> 31) & 0x7ff);
  453. if (unlikely(*table & _REGION_ENTRY_INV)) {
  454. address = (address + PMD_SIZE) & PMD_MASK;
  455. continue;
  456. }
  457. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  458. table = table + ((address >> 20) & 0x7ff);
  459. if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
  460. address = (address + PMD_SIZE) & PMD_MASK;
  461. continue;
  462. }
  463. page = pfn_to_page(*table >> PAGE_SHIFT);
  464. mp = (struct gmap_pgtable *) page->index;
  465. vma = find_vma(gmap->mm, mp->vmaddr);
  466. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  467. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  468. size, NULL);
  469. address = (address + PMD_SIZE) & PMD_MASK;
  470. }
  471. up_read(&gmap->mm->mmap_sem);
  472. }
  473. EXPORT_SYMBOL_GPL(gmap_discard);
  474. void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
  475. {
  476. struct gmap_rmap *rmap, *next;
  477. struct gmap_pgtable *mp;
  478. struct page *page;
  479. int flush;
  480. flush = 0;
  481. spin_lock(&mm->page_table_lock);
  482. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  483. mp = (struct gmap_pgtable *) page->index;
  484. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  485. *rmap->entry =
  486. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  487. list_del(&rmap->list);
  488. kfree(rmap);
  489. flush = 1;
  490. }
  491. spin_unlock(&mm->page_table_lock);
  492. if (flush)
  493. __tlb_flush_global();
  494. }
  495. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  496. unsigned long vmaddr)
  497. {
  498. struct page *page;
  499. unsigned long *table;
  500. struct gmap_pgtable *mp;
  501. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  502. if (!page)
  503. return NULL;
  504. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  505. if (!mp) {
  506. __free_page(page);
  507. return NULL;
  508. }
  509. pgtable_page_ctor(page);
  510. mp->vmaddr = vmaddr & PMD_MASK;
  511. INIT_LIST_HEAD(&mp->mapper);
  512. page->index = (unsigned long) mp;
  513. atomic_set(&page->_mapcount, 3);
  514. table = (unsigned long *) page_to_phys(page);
  515. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  516. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  517. return table;
  518. }
  519. static inline void page_table_free_pgste(unsigned long *table)
  520. {
  521. struct page *page;
  522. struct gmap_pgtable *mp;
  523. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  524. mp = (struct gmap_pgtable *) page->index;
  525. BUG_ON(!list_empty(&mp->mapper));
  526. pgtable_page_dtor(page);
  527. atomic_set(&page->_mapcount, -1);
  528. kfree(mp);
  529. __free_page(page);
  530. }
  531. #else /* CONFIG_PGSTE */
  532. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  533. unsigned long vmaddr)
  534. {
  535. return NULL;
  536. }
  537. static inline void page_table_free_pgste(unsigned long *table)
  538. {
  539. }
  540. static inline void gmap_unmap_notifier(struct mm_struct *mm,
  541. unsigned long *table)
  542. {
  543. }
  544. #endif /* CONFIG_PGSTE */
  545. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  546. {
  547. unsigned int old, new;
  548. do {
  549. old = atomic_read(v);
  550. new = old ^ bits;
  551. } while (atomic_cmpxchg(v, old, new) != old);
  552. return new;
  553. }
  554. /*
  555. * page table entry allocation/free routines.
  556. */
  557. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  558. {
  559. struct page *page;
  560. unsigned long *table;
  561. unsigned int mask, bit;
  562. if (mm_has_pgste(mm))
  563. return page_table_alloc_pgste(mm, vmaddr);
  564. /* Allocate fragments of a 4K page as 1K/2K page table */
  565. spin_lock_bh(&mm->context.list_lock);
  566. mask = FRAG_MASK;
  567. if (!list_empty(&mm->context.pgtable_list)) {
  568. page = list_first_entry(&mm->context.pgtable_list,
  569. struct page, lru);
  570. table = (unsigned long *) page_to_phys(page);
  571. mask = atomic_read(&page->_mapcount);
  572. mask = mask | (mask >> 4);
  573. }
  574. if ((mask & FRAG_MASK) == FRAG_MASK) {
  575. spin_unlock_bh(&mm->context.list_lock);
  576. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  577. if (!page)
  578. return NULL;
  579. pgtable_page_ctor(page);
  580. atomic_set(&page->_mapcount, 1);
  581. table = (unsigned long *) page_to_phys(page);
  582. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  583. spin_lock_bh(&mm->context.list_lock);
  584. list_add(&page->lru, &mm->context.pgtable_list);
  585. } else {
  586. for (bit = 1; mask & bit; bit <<= 1)
  587. table += PTRS_PER_PTE;
  588. mask = atomic_xor_bits(&page->_mapcount, bit);
  589. if ((mask & FRAG_MASK) == FRAG_MASK)
  590. list_del(&page->lru);
  591. }
  592. spin_unlock_bh(&mm->context.list_lock);
  593. return table;
  594. }
  595. void page_table_free(struct mm_struct *mm, unsigned long *table)
  596. {
  597. struct page *page;
  598. unsigned int bit, mask;
  599. if (mm_has_pgste(mm)) {
  600. gmap_unmap_notifier(mm, table);
  601. return page_table_free_pgste(table);
  602. }
  603. /* Free 1K/2K page table fragment of a 4K page */
  604. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  605. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  606. spin_lock_bh(&mm->context.list_lock);
  607. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  608. list_del(&page->lru);
  609. mask = atomic_xor_bits(&page->_mapcount, bit);
  610. if (mask & FRAG_MASK)
  611. list_add(&page->lru, &mm->context.pgtable_list);
  612. spin_unlock_bh(&mm->context.list_lock);
  613. if (mask == 0) {
  614. pgtable_page_dtor(page);
  615. atomic_set(&page->_mapcount, -1);
  616. __free_page(page);
  617. }
  618. }
  619. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  620. static void __page_table_free_rcu(void *table, unsigned bit)
  621. {
  622. struct page *page;
  623. if (bit == FRAG_MASK)
  624. return page_table_free_pgste(table);
  625. /* Free 1K/2K page table fragment of a 4K page */
  626. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  627. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  628. pgtable_page_dtor(page);
  629. atomic_set(&page->_mapcount, -1);
  630. __free_page(page);
  631. }
  632. }
  633. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  634. {
  635. struct mm_struct *mm;
  636. struct page *page;
  637. unsigned int bit, mask;
  638. mm = tlb->mm;
  639. if (mm_has_pgste(mm)) {
  640. gmap_unmap_notifier(mm, table);
  641. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  642. tlb_remove_table(tlb, table);
  643. return;
  644. }
  645. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  646. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  647. spin_lock_bh(&mm->context.list_lock);
  648. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  649. list_del(&page->lru);
  650. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  651. if (mask & FRAG_MASK)
  652. list_add_tail(&page->lru, &mm->context.pgtable_list);
  653. spin_unlock_bh(&mm->context.list_lock);
  654. table = (unsigned long *) (__pa(table) | (bit << 4));
  655. tlb_remove_table(tlb, table);
  656. }
  657. void __tlb_remove_table(void *_table)
  658. {
  659. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  660. void *table = (void *)((unsigned long) _table & ~mask);
  661. unsigned type = (unsigned long) _table & mask;
  662. if (type)
  663. __page_table_free_rcu(table, type);
  664. else
  665. free_pages((unsigned long) table, ALLOC_ORDER);
  666. }
  667. #endif
  668. /*
  669. * switch on pgstes for its userspace process (for kvm)
  670. */
  671. int s390_enable_sie(void)
  672. {
  673. struct task_struct *tsk = current;
  674. struct mm_struct *mm, *old_mm;
  675. /* Do we have switched amode? If no, we cannot do sie */
  676. if (user_mode == HOME_SPACE_MODE)
  677. return -EINVAL;
  678. /* Do we have pgstes? if yes, we are done */
  679. if (mm_has_pgste(tsk->mm))
  680. return 0;
  681. /* lets check if we are allowed to replace the mm */
  682. task_lock(tsk);
  683. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  684. #ifdef CONFIG_AIO
  685. !hlist_empty(&tsk->mm->ioctx_list) ||
  686. #endif
  687. tsk->mm != tsk->active_mm) {
  688. task_unlock(tsk);
  689. return -EINVAL;
  690. }
  691. task_unlock(tsk);
  692. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  693. tsk->mm->context.alloc_pgste = 1;
  694. mm = dup_mm(tsk);
  695. tsk->mm->context.alloc_pgste = 0;
  696. if (!mm)
  697. return -ENOMEM;
  698. /* Now lets check again if something happened */
  699. task_lock(tsk);
  700. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  701. #ifdef CONFIG_AIO
  702. !hlist_empty(&tsk->mm->ioctx_list) ||
  703. #endif
  704. tsk->mm != tsk->active_mm) {
  705. mmput(mm);
  706. task_unlock(tsk);
  707. return -EINVAL;
  708. }
  709. /* ok, we are alone. No ptrace, no threads, etc. */
  710. old_mm = tsk->mm;
  711. tsk->mm = tsk->active_mm = mm;
  712. preempt_disable();
  713. update_mm(mm, tsk);
  714. atomic_inc(&mm->context.attach_count);
  715. atomic_dec(&old_mm->context.attach_count);
  716. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  717. preempt_enable();
  718. task_unlock(tsk);
  719. mmput(old_mm);
  720. return 0;
  721. }
  722. EXPORT_SYMBOL_GPL(s390_enable_sie);
  723. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  724. bool kernel_page_present(struct page *page)
  725. {
  726. unsigned long addr;
  727. int cc;
  728. addr = page_to_phys(page);
  729. asm volatile(
  730. " lra %1,0(%1)\n"
  731. " ipm %0\n"
  732. " srl %0,28"
  733. : "=d" (cc), "+a" (addr) : : "cc");
  734. return cc == 0;
  735. }
  736. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */