pgtable.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright IBM Corp. 2007,2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/system.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
  33. EXPORT_SYMBOL(VMALLOC_START);
  34. static int __init parse_vmalloc(char *arg)
  35. {
  36. if (!arg)
  37. return -EINVAL;
  38. VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
  39. return 0;
  40. }
  41. early_param("vmalloc", parse_vmalloc);
  42. unsigned long *crst_table_alloc(struct mm_struct *mm)
  43. {
  44. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  45. if (!page)
  46. return NULL;
  47. return (unsigned long *) page_to_phys(page);
  48. }
  49. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  50. {
  51. free_pages((unsigned long) table, ALLOC_ORDER);
  52. }
  53. #ifdef CONFIG_64BIT
  54. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  55. {
  56. unsigned long *table, *pgd;
  57. unsigned long entry;
  58. BUG_ON(limit > (1UL << 53));
  59. repeat:
  60. table = crst_table_alloc(mm);
  61. if (!table)
  62. return -ENOMEM;
  63. spin_lock_bh(&mm->page_table_lock);
  64. if (mm->context.asce_limit < limit) {
  65. pgd = (unsigned long *) mm->pgd;
  66. if (mm->context.asce_limit <= (1UL << 31)) {
  67. entry = _REGION3_ENTRY_EMPTY;
  68. mm->context.asce_limit = 1UL << 42;
  69. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  70. _ASCE_USER_BITS |
  71. _ASCE_TYPE_REGION3;
  72. } else {
  73. entry = _REGION2_ENTRY_EMPTY;
  74. mm->context.asce_limit = 1UL << 53;
  75. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  76. _ASCE_USER_BITS |
  77. _ASCE_TYPE_REGION2;
  78. }
  79. crst_table_init(table, entry);
  80. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  81. mm->pgd = (pgd_t *) table;
  82. mm->task_size = mm->context.asce_limit;
  83. table = NULL;
  84. }
  85. spin_unlock_bh(&mm->page_table_lock);
  86. if (table)
  87. crst_table_free(mm, table);
  88. if (mm->context.asce_limit < limit)
  89. goto repeat;
  90. update_mm(mm, current);
  91. return 0;
  92. }
  93. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  94. {
  95. pgd_t *pgd;
  96. if (mm->context.asce_limit <= limit)
  97. return;
  98. __tlb_flush_mm(mm);
  99. while (mm->context.asce_limit > limit) {
  100. pgd = mm->pgd;
  101. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  102. case _REGION_ENTRY_TYPE_R2:
  103. mm->context.asce_limit = 1UL << 42;
  104. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  105. _ASCE_USER_BITS |
  106. _ASCE_TYPE_REGION3;
  107. break;
  108. case _REGION_ENTRY_TYPE_R3:
  109. mm->context.asce_limit = 1UL << 31;
  110. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  111. _ASCE_USER_BITS |
  112. _ASCE_TYPE_SEGMENT;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  118. mm->task_size = mm->context.asce_limit;
  119. crst_table_free(mm, (unsigned long *) pgd);
  120. }
  121. update_mm(mm, current);
  122. }
  123. #endif
  124. #ifdef CONFIG_PGSTE
  125. /**
  126. * gmap_alloc - allocate a guest address space
  127. * @mm: pointer to the parent mm_struct
  128. *
  129. * Returns a guest address space structure.
  130. */
  131. struct gmap *gmap_alloc(struct mm_struct *mm)
  132. {
  133. struct gmap *gmap;
  134. struct page *page;
  135. unsigned long *table;
  136. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  137. if (!gmap)
  138. goto out;
  139. INIT_LIST_HEAD(&gmap->crst_list);
  140. gmap->mm = mm;
  141. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  142. if (!page)
  143. goto out_free;
  144. list_add(&page->lru, &gmap->crst_list);
  145. table = (unsigned long *) page_to_phys(page);
  146. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  147. gmap->table = table;
  148. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  149. _ASCE_USER_BITS | __pa(table);
  150. list_add(&gmap->list, &mm->context.gmap_list);
  151. return gmap;
  152. out_free:
  153. kfree(gmap);
  154. out:
  155. return NULL;
  156. }
  157. EXPORT_SYMBOL_GPL(gmap_alloc);
  158. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  159. {
  160. struct gmap_pgtable *mp;
  161. struct gmap_rmap *rmap;
  162. struct page *page;
  163. if (*table & _SEGMENT_ENTRY_INV)
  164. return 0;
  165. page = pfn_to_page(*table >> PAGE_SHIFT);
  166. mp = (struct gmap_pgtable *) page->index;
  167. list_for_each_entry(rmap, &mp->mapper, list) {
  168. if (rmap->entry != table)
  169. continue;
  170. list_del(&rmap->list);
  171. kfree(rmap);
  172. break;
  173. }
  174. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  175. return 1;
  176. }
  177. static void gmap_flush_tlb(struct gmap *gmap)
  178. {
  179. if (MACHINE_HAS_IDTE)
  180. __tlb_flush_idte((unsigned long) gmap->table |
  181. _ASCE_TYPE_REGION1);
  182. else
  183. __tlb_flush_global();
  184. }
  185. /**
  186. * gmap_free - free a guest address space
  187. * @gmap: pointer to the guest address space structure
  188. */
  189. void gmap_free(struct gmap *gmap)
  190. {
  191. struct page *page, *next;
  192. unsigned long *table;
  193. int i;
  194. /* Flush tlb. */
  195. if (MACHINE_HAS_IDTE)
  196. __tlb_flush_idte((unsigned long) gmap->table |
  197. _ASCE_TYPE_REGION1);
  198. else
  199. __tlb_flush_global();
  200. /* Free all segment & region tables. */
  201. down_read(&gmap->mm->mmap_sem);
  202. spin_lock(&gmap->mm->page_table_lock);
  203. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  204. table = (unsigned long *) page_to_phys(page);
  205. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  206. /* Remove gmap rmap structures for segment table. */
  207. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  208. gmap_unlink_segment(gmap, table);
  209. __free_pages(page, ALLOC_ORDER);
  210. }
  211. spin_unlock(&gmap->mm->page_table_lock);
  212. up_read(&gmap->mm->mmap_sem);
  213. list_del(&gmap->list);
  214. kfree(gmap);
  215. }
  216. EXPORT_SYMBOL_GPL(gmap_free);
  217. /**
  218. * gmap_enable - switch primary space to the guest address space
  219. * @gmap: pointer to the guest address space structure
  220. */
  221. void gmap_enable(struct gmap *gmap)
  222. {
  223. S390_lowcore.gmap = (unsigned long) gmap;
  224. }
  225. EXPORT_SYMBOL_GPL(gmap_enable);
  226. /**
  227. * gmap_disable - switch back to the standard primary address space
  228. * @gmap: pointer to the guest address space structure
  229. */
  230. void gmap_disable(struct gmap *gmap)
  231. {
  232. S390_lowcore.gmap = 0UL;
  233. }
  234. EXPORT_SYMBOL_GPL(gmap_disable);
  235. /*
  236. * gmap_alloc_table is assumed to be called with mmap_sem held
  237. */
  238. static int gmap_alloc_table(struct gmap *gmap,
  239. unsigned long *table, unsigned long init)
  240. {
  241. struct page *page;
  242. unsigned long *new;
  243. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  244. if (!page)
  245. return -ENOMEM;
  246. new = (unsigned long *) page_to_phys(page);
  247. crst_table_init(new, init);
  248. if (*table & _REGION_ENTRY_INV) {
  249. list_add(&page->lru, &gmap->crst_list);
  250. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  251. (*table & _REGION_ENTRY_TYPE_MASK);
  252. } else
  253. __free_pages(page, ALLOC_ORDER);
  254. return 0;
  255. }
  256. /**
  257. * gmap_unmap_segment - unmap segment from the guest address space
  258. * @gmap: pointer to the guest address space structure
  259. * @addr: address in the guest address space
  260. * @len: length of the memory area to unmap
  261. *
  262. * Returns 0 if the unmap succeded, -EINVAL if not.
  263. */
  264. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  265. {
  266. unsigned long *table;
  267. unsigned long off;
  268. int flush;
  269. if ((to | len) & (PMD_SIZE - 1))
  270. return -EINVAL;
  271. if (len == 0 || to + len < to)
  272. return -EINVAL;
  273. flush = 0;
  274. down_read(&gmap->mm->mmap_sem);
  275. spin_lock(&gmap->mm->page_table_lock);
  276. for (off = 0; off < len; off += PMD_SIZE) {
  277. /* Walk the guest addr space page table */
  278. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  279. if (*table & _REGION_ENTRY_INV)
  280. goto out;
  281. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  282. table = table + (((to + off) >> 42) & 0x7ff);
  283. if (*table & _REGION_ENTRY_INV)
  284. goto out;
  285. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  286. table = table + (((to + off) >> 31) & 0x7ff);
  287. if (*table & _REGION_ENTRY_INV)
  288. goto out;
  289. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  290. table = table + (((to + off) >> 20) & 0x7ff);
  291. /* Clear segment table entry in guest address space. */
  292. flush |= gmap_unlink_segment(gmap, table);
  293. *table = _SEGMENT_ENTRY_INV;
  294. }
  295. out:
  296. spin_unlock(&gmap->mm->page_table_lock);
  297. up_read(&gmap->mm->mmap_sem);
  298. if (flush)
  299. gmap_flush_tlb(gmap);
  300. return 0;
  301. }
  302. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  303. /**
  304. * gmap_mmap_segment - map a segment to the guest address space
  305. * @gmap: pointer to the guest address space structure
  306. * @from: source address in the parent address space
  307. * @to: target address in the guest address space
  308. *
  309. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  310. */
  311. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  312. unsigned long to, unsigned long len)
  313. {
  314. unsigned long *table;
  315. unsigned long off;
  316. int flush;
  317. if ((from | to | len) & (PMD_SIZE - 1))
  318. return -EINVAL;
  319. if (len == 0 || from + len > PGDIR_SIZE ||
  320. from + len < from || to + len < to)
  321. return -EINVAL;
  322. flush = 0;
  323. down_read(&gmap->mm->mmap_sem);
  324. spin_lock(&gmap->mm->page_table_lock);
  325. for (off = 0; off < len; off += PMD_SIZE) {
  326. /* Walk the gmap address space page table */
  327. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  328. if ((*table & _REGION_ENTRY_INV) &&
  329. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  330. goto out_unmap;
  331. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  332. table = table + (((to + off) >> 42) & 0x7ff);
  333. if ((*table & _REGION_ENTRY_INV) &&
  334. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  335. goto out_unmap;
  336. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  337. table = table + (((to + off) >> 31) & 0x7ff);
  338. if ((*table & _REGION_ENTRY_INV) &&
  339. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  340. goto out_unmap;
  341. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  342. table = table + (((to + off) >> 20) & 0x7ff);
  343. /* Store 'from' address in an invalid segment table entry. */
  344. flush |= gmap_unlink_segment(gmap, table);
  345. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  346. }
  347. spin_unlock(&gmap->mm->page_table_lock);
  348. up_read(&gmap->mm->mmap_sem);
  349. if (flush)
  350. gmap_flush_tlb(gmap);
  351. return 0;
  352. out_unmap:
  353. spin_unlock(&gmap->mm->page_table_lock);
  354. up_read(&gmap->mm->mmap_sem);
  355. gmap_unmap_segment(gmap, to, len);
  356. return -ENOMEM;
  357. }
  358. EXPORT_SYMBOL_GPL(gmap_map_segment);
  359. /*
  360. * this function is assumed to be called with mmap_sem held
  361. */
  362. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  363. {
  364. unsigned long *table, vmaddr, segment;
  365. struct mm_struct *mm;
  366. struct gmap_pgtable *mp;
  367. struct gmap_rmap *rmap;
  368. struct vm_area_struct *vma;
  369. struct page *page;
  370. pgd_t *pgd;
  371. pud_t *pud;
  372. pmd_t *pmd;
  373. current->thread.gmap_addr = address;
  374. mm = gmap->mm;
  375. /* Walk the gmap address space page table */
  376. table = gmap->table + ((address >> 53) & 0x7ff);
  377. if (unlikely(*table & _REGION_ENTRY_INV))
  378. return -EFAULT;
  379. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  380. table = table + ((address >> 42) & 0x7ff);
  381. if (unlikely(*table & _REGION_ENTRY_INV))
  382. return -EFAULT;
  383. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  384. table = table + ((address >> 31) & 0x7ff);
  385. if (unlikely(*table & _REGION_ENTRY_INV))
  386. return -EFAULT;
  387. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  388. table = table + ((address >> 20) & 0x7ff);
  389. /* Convert the gmap address to an mm address. */
  390. segment = *table;
  391. if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
  392. page = pfn_to_page(segment >> PAGE_SHIFT);
  393. mp = (struct gmap_pgtable *) page->index;
  394. return mp->vmaddr | (address & ~PMD_MASK);
  395. } else if (segment & _SEGMENT_ENTRY_RO) {
  396. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  397. vma = find_vma(mm, vmaddr);
  398. if (!vma || vma->vm_start > vmaddr)
  399. return -EFAULT;
  400. /* Walk the parent mm page table */
  401. pgd = pgd_offset(mm, vmaddr);
  402. pud = pud_alloc(mm, pgd, vmaddr);
  403. if (!pud)
  404. return -ENOMEM;
  405. pmd = pmd_alloc(mm, pud, vmaddr);
  406. if (!pmd)
  407. return -ENOMEM;
  408. if (!pmd_present(*pmd) &&
  409. __pte_alloc(mm, vma, pmd, vmaddr))
  410. return -ENOMEM;
  411. /* pmd now points to a valid segment table entry. */
  412. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  413. if (!rmap)
  414. return -ENOMEM;
  415. /* Link gmap segment table entry location to page table. */
  416. page = pmd_page(*pmd);
  417. mp = (struct gmap_pgtable *) page->index;
  418. rmap->entry = table;
  419. spin_lock(&mm->page_table_lock);
  420. list_add(&rmap->list, &mp->mapper);
  421. spin_unlock(&mm->page_table_lock);
  422. /* Set gmap segment table entry to page table. */
  423. *table = pmd_val(*pmd) & PAGE_MASK;
  424. return vmaddr | (address & ~PMD_MASK);
  425. }
  426. return -EFAULT;
  427. }
  428. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  429. {
  430. unsigned long rc;
  431. down_read(&gmap->mm->mmap_sem);
  432. rc = __gmap_fault(address, gmap);
  433. up_read(&gmap->mm->mmap_sem);
  434. return rc;
  435. }
  436. EXPORT_SYMBOL_GPL(gmap_fault);
  437. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  438. {
  439. unsigned long *table, address, size;
  440. struct vm_area_struct *vma;
  441. struct gmap_pgtable *mp;
  442. struct page *page;
  443. down_read(&gmap->mm->mmap_sem);
  444. address = from;
  445. while (address < to) {
  446. /* Walk the gmap address space page table */
  447. table = gmap->table + ((address >> 53) & 0x7ff);
  448. if (unlikely(*table & _REGION_ENTRY_INV)) {
  449. address = (address + PMD_SIZE) & PMD_MASK;
  450. continue;
  451. }
  452. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  453. table = table + ((address >> 42) & 0x7ff);
  454. if (unlikely(*table & _REGION_ENTRY_INV)) {
  455. address = (address + PMD_SIZE) & PMD_MASK;
  456. continue;
  457. }
  458. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  459. table = table + ((address >> 31) & 0x7ff);
  460. if (unlikely(*table & _REGION_ENTRY_INV)) {
  461. address = (address + PMD_SIZE) & PMD_MASK;
  462. continue;
  463. }
  464. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  465. table = table + ((address >> 20) & 0x7ff);
  466. if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
  467. address = (address + PMD_SIZE) & PMD_MASK;
  468. continue;
  469. }
  470. page = pfn_to_page(*table >> PAGE_SHIFT);
  471. mp = (struct gmap_pgtable *) page->index;
  472. vma = find_vma(gmap->mm, mp->vmaddr);
  473. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  474. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  475. size, NULL);
  476. address = (address + PMD_SIZE) & PMD_MASK;
  477. }
  478. up_read(&gmap->mm->mmap_sem);
  479. }
  480. EXPORT_SYMBOL_GPL(gmap_discard);
  481. void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
  482. {
  483. struct gmap_rmap *rmap, *next;
  484. struct gmap_pgtable *mp;
  485. struct page *page;
  486. int flush;
  487. flush = 0;
  488. spin_lock(&mm->page_table_lock);
  489. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  490. mp = (struct gmap_pgtable *) page->index;
  491. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  492. *rmap->entry =
  493. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  494. list_del(&rmap->list);
  495. kfree(rmap);
  496. flush = 1;
  497. }
  498. spin_unlock(&mm->page_table_lock);
  499. if (flush)
  500. __tlb_flush_global();
  501. }
  502. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  503. unsigned long vmaddr)
  504. {
  505. struct page *page;
  506. unsigned long *table;
  507. struct gmap_pgtable *mp;
  508. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  509. if (!page)
  510. return NULL;
  511. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  512. if (!mp) {
  513. __free_page(page);
  514. return NULL;
  515. }
  516. pgtable_page_ctor(page);
  517. mp->vmaddr = vmaddr & PMD_MASK;
  518. INIT_LIST_HEAD(&mp->mapper);
  519. page->index = (unsigned long) mp;
  520. atomic_set(&page->_mapcount, 3);
  521. table = (unsigned long *) page_to_phys(page);
  522. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  523. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  524. return table;
  525. }
  526. static inline void page_table_free_pgste(unsigned long *table)
  527. {
  528. struct page *page;
  529. struct gmap_pgtable *mp;
  530. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  531. mp = (struct gmap_pgtable *) page->index;
  532. BUG_ON(!list_empty(&mp->mapper));
  533. pgtable_page_ctor(page);
  534. atomic_set(&page->_mapcount, -1);
  535. kfree(mp);
  536. __free_page(page);
  537. }
  538. #else /* CONFIG_PGSTE */
  539. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  540. unsigned long vmaddr)
  541. {
  542. return NULL;
  543. }
  544. static inline void page_table_free_pgste(unsigned long *table)
  545. {
  546. }
  547. static inline void gmap_unmap_notifier(struct mm_struct *mm,
  548. unsigned long *table)
  549. {
  550. }
  551. #endif /* CONFIG_PGSTE */
  552. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  553. {
  554. unsigned int old, new;
  555. do {
  556. old = atomic_read(v);
  557. new = old ^ bits;
  558. } while (atomic_cmpxchg(v, old, new) != old);
  559. return new;
  560. }
  561. /*
  562. * page table entry allocation/free routines.
  563. */
  564. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  565. {
  566. struct page *page;
  567. unsigned long *table;
  568. unsigned int mask, bit;
  569. if (mm_has_pgste(mm))
  570. return page_table_alloc_pgste(mm, vmaddr);
  571. /* Allocate fragments of a 4K page as 1K/2K page table */
  572. spin_lock_bh(&mm->context.list_lock);
  573. mask = FRAG_MASK;
  574. if (!list_empty(&mm->context.pgtable_list)) {
  575. page = list_first_entry(&mm->context.pgtable_list,
  576. struct page, lru);
  577. table = (unsigned long *) page_to_phys(page);
  578. mask = atomic_read(&page->_mapcount);
  579. mask = mask | (mask >> 4);
  580. }
  581. if ((mask & FRAG_MASK) == FRAG_MASK) {
  582. spin_unlock_bh(&mm->context.list_lock);
  583. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  584. if (!page)
  585. return NULL;
  586. pgtable_page_ctor(page);
  587. atomic_set(&page->_mapcount, 1);
  588. table = (unsigned long *) page_to_phys(page);
  589. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  590. spin_lock_bh(&mm->context.list_lock);
  591. list_add(&page->lru, &mm->context.pgtable_list);
  592. } else {
  593. for (bit = 1; mask & bit; bit <<= 1)
  594. table += PTRS_PER_PTE;
  595. mask = atomic_xor_bits(&page->_mapcount, bit);
  596. if ((mask & FRAG_MASK) == FRAG_MASK)
  597. list_del(&page->lru);
  598. }
  599. spin_unlock_bh(&mm->context.list_lock);
  600. return table;
  601. }
  602. void page_table_free(struct mm_struct *mm, unsigned long *table)
  603. {
  604. struct page *page;
  605. unsigned int bit, mask;
  606. if (mm_has_pgste(mm)) {
  607. gmap_unmap_notifier(mm, table);
  608. return page_table_free_pgste(table);
  609. }
  610. /* Free 1K/2K page table fragment of a 4K page */
  611. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  612. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  613. spin_lock_bh(&mm->context.list_lock);
  614. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  615. list_del(&page->lru);
  616. mask = atomic_xor_bits(&page->_mapcount, bit);
  617. if (mask & FRAG_MASK)
  618. list_add(&page->lru, &mm->context.pgtable_list);
  619. spin_unlock_bh(&mm->context.list_lock);
  620. if (mask == 0) {
  621. pgtable_page_dtor(page);
  622. atomic_set(&page->_mapcount, -1);
  623. __free_page(page);
  624. }
  625. }
  626. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  627. static void __page_table_free_rcu(void *table, unsigned bit)
  628. {
  629. struct page *page;
  630. if (bit == FRAG_MASK)
  631. return page_table_free_pgste(table);
  632. /* Free 1K/2K page table fragment of a 4K page */
  633. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  634. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  635. pgtable_page_dtor(page);
  636. atomic_set(&page->_mapcount, -1);
  637. __free_page(page);
  638. }
  639. }
  640. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  641. {
  642. struct mm_struct *mm;
  643. struct page *page;
  644. unsigned int bit, mask;
  645. mm = tlb->mm;
  646. if (mm_has_pgste(mm)) {
  647. gmap_unmap_notifier(mm, table);
  648. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  649. tlb_remove_table(tlb, table);
  650. return;
  651. }
  652. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  653. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  654. spin_lock_bh(&mm->context.list_lock);
  655. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  656. list_del(&page->lru);
  657. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  658. if (mask & FRAG_MASK)
  659. list_add_tail(&page->lru, &mm->context.pgtable_list);
  660. spin_unlock_bh(&mm->context.list_lock);
  661. table = (unsigned long *) (__pa(table) | (bit << 4));
  662. tlb_remove_table(tlb, table);
  663. }
  664. void __tlb_remove_table(void *_table)
  665. {
  666. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  667. void *table = (void *)((unsigned long) _table & ~mask);
  668. unsigned type = (unsigned long) _table & mask;
  669. if (type)
  670. __page_table_free_rcu(table, type);
  671. else
  672. free_pages((unsigned long) table, ALLOC_ORDER);
  673. }
  674. #endif
  675. /*
  676. * switch on pgstes for its userspace process (for kvm)
  677. */
  678. int s390_enable_sie(void)
  679. {
  680. struct task_struct *tsk = current;
  681. struct mm_struct *mm, *old_mm;
  682. /* Do we have switched amode? If no, we cannot do sie */
  683. if (user_mode == HOME_SPACE_MODE)
  684. return -EINVAL;
  685. /* Do we have pgstes? if yes, we are done */
  686. if (mm_has_pgste(tsk->mm))
  687. return 0;
  688. /* lets check if we are allowed to replace the mm */
  689. task_lock(tsk);
  690. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  691. #ifdef CONFIG_AIO
  692. !hlist_empty(&tsk->mm->ioctx_list) ||
  693. #endif
  694. tsk->mm != tsk->active_mm) {
  695. task_unlock(tsk);
  696. return -EINVAL;
  697. }
  698. task_unlock(tsk);
  699. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  700. tsk->mm->context.alloc_pgste = 1;
  701. mm = dup_mm(tsk);
  702. tsk->mm->context.alloc_pgste = 0;
  703. if (!mm)
  704. return -ENOMEM;
  705. /* Now lets check again if something happened */
  706. task_lock(tsk);
  707. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  708. #ifdef CONFIG_AIO
  709. !hlist_empty(&tsk->mm->ioctx_list) ||
  710. #endif
  711. tsk->mm != tsk->active_mm) {
  712. mmput(mm);
  713. task_unlock(tsk);
  714. return -EINVAL;
  715. }
  716. /* ok, we are alone. No ptrace, no threads, etc. */
  717. old_mm = tsk->mm;
  718. tsk->mm = tsk->active_mm = mm;
  719. preempt_disable();
  720. update_mm(mm, tsk);
  721. atomic_inc(&mm->context.attach_count);
  722. atomic_dec(&old_mm->context.attach_count);
  723. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  724. preempt_enable();
  725. task_unlock(tsk);
  726. mmput(old_mm);
  727. return 0;
  728. }
  729. EXPORT_SYMBOL_GPL(s390_enable_sie);
  730. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  731. bool kernel_page_present(struct page *page)
  732. {
  733. unsigned long addr;
  734. int cc;
  735. addr = page_to_phys(page);
  736. asm volatile(
  737. " lra %1,0(%1)\n"
  738. " ipm %0\n"
  739. " srl %0,28"
  740. : "=d" (cc), "+a" (addr) : : "cc");
  741. return cc == 0;
  742. }
  743. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */