pgtable.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. /*
  2. * Copyright IBM Corp. 2007,2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/system.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
  33. EXPORT_SYMBOL(VMALLOC_START);
  34. static int __init parse_vmalloc(char *arg)
  35. {
  36. if (!arg)
  37. return -EINVAL;
  38. VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
  39. return 0;
  40. }
  41. early_param("vmalloc", parse_vmalloc);
  42. unsigned long *crst_table_alloc(struct mm_struct *mm)
  43. {
  44. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  45. if (!page)
  46. return NULL;
  47. return (unsigned long *) page_to_phys(page);
  48. }
  49. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  50. {
  51. free_pages((unsigned long) table, ALLOC_ORDER);
  52. }
  53. #ifdef CONFIG_64BIT
  54. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  55. {
  56. unsigned long *table, *pgd;
  57. unsigned long entry;
  58. BUG_ON(limit > (1UL << 53));
  59. repeat:
  60. table = crst_table_alloc(mm);
  61. if (!table)
  62. return -ENOMEM;
  63. spin_lock_bh(&mm->page_table_lock);
  64. if (mm->context.asce_limit < limit) {
  65. pgd = (unsigned long *) mm->pgd;
  66. if (mm->context.asce_limit <= (1UL << 31)) {
  67. entry = _REGION3_ENTRY_EMPTY;
  68. mm->context.asce_limit = 1UL << 42;
  69. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  70. _ASCE_USER_BITS |
  71. _ASCE_TYPE_REGION3;
  72. } else {
  73. entry = _REGION2_ENTRY_EMPTY;
  74. mm->context.asce_limit = 1UL << 53;
  75. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  76. _ASCE_USER_BITS |
  77. _ASCE_TYPE_REGION2;
  78. }
  79. crst_table_init(table, entry);
  80. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  81. mm->pgd = (pgd_t *) table;
  82. mm->task_size = mm->context.asce_limit;
  83. table = NULL;
  84. }
  85. spin_unlock_bh(&mm->page_table_lock);
  86. if (table)
  87. crst_table_free(mm, table);
  88. if (mm->context.asce_limit < limit)
  89. goto repeat;
  90. update_mm(mm, current);
  91. return 0;
  92. }
  93. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  94. {
  95. pgd_t *pgd;
  96. if (mm->context.asce_limit <= limit)
  97. return;
  98. __tlb_flush_mm(mm);
  99. while (mm->context.asce_limit > limit) {
  100. pgd = mm->pgd;
  101. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  102. case _REGION_ENTRY_TYPE_R2:
  103. mm->context.asce_limit = 1UL << 42;
  104. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  105. _ASCE_USER_BITS |
  106. _ASCE_TYPE_REGION3;
  107. break;
  108. case _REGION_ENTRY_TYPE_R3:
  109. mm->context.asce_limit = 1UL << 31;
  110. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  111. _ASCE_USER_BITS |
  112. _ASCE_TYPE_SEGMENT;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  118. mm->task_size = mm->context.asce_limit;
  119. crst_table_free(mm, (unsigned long *) pgd);
  120. }
  121. update_mm(mm, current);
  122. }
  123. #endif
  124. #ifdef CONFIG_PGSTE
  125. /**
  126. * gmap_alloc - allocate a guest address space
  127. * @mm: pointer to the parent mm_struct
  128. *
  129. * Returns a guest address space structure.
  130. */
  131. struct gmap *gmap_alloc(struct mm_struct *mm)
  132. {
  133. struct gmap *gmap;
  134. struct page *page;
  135. unsigned long *table;
  136. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  137. if (!gmap)
  138. goto out;
  139. INIT_LIST_HEAD(&gmap->crst_list);
  140. gmap->mm = mm;
  141. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  142. if (!page)
  143. goto out_free;
  144. list_add(&page->lru, &gmap->crst_list);
  145. table = (unsigned long *) page_to_phys(page);
  146. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  147. gmap->table = table;
  148. list_add(&gmap->list, &mm->context.gmap_list);
  149. return gmap;
  150. out_free:
  151. kfree(gmap);
  152. out:
  153. return NULL;
  154. }
  155. EXPORT_SYMBOL_GPL(gmap_alloc);
  156. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  157. {
  158. struct gmap_pgtable *mp;
  159. struct gmap_rmap *rmap;
  160. struct page *page;
  161. if (*table & _SEGMENT_ENTRY_INV)
  162. return 0;
  163. page = pfn_to_page(*table >> PAGE_SHIFT);
  164. mp = (struct gmap_pgtable *) page->index;
  165. list_for_each_entry(rmap, &mp->mapper, list) {
  166. if (rmap->entry != table)
  167. continue;
  168. list_del(&rmap->list);
  169. kfree(rmap);
  170. break;
  171. }
  172. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  173. return 1;
  174. }
  175. static void gmap_flush_tlb(struct gmap *gmap)
  176. {
  177. if (MACHINE_HAS_IDTE)
  178. __tlb_flush_idte((unsigned long) gmap->table |
  179. _ASCE_TYPE_REGION1);
  180. else
  181. __tlb_flush_global();
  182. }
  183. /**
  184. * gmap_free - free a guest address space
  185. * @gmap: pointer to the guest address space structure
  186. */
  187. void gmap_free(struct gmap *gmap)
  188. {
  189. struct page *page, *next;
  190. unsigned long *table;
  191. int i;
  192. /* Flush tlb. */
  193. if (MACHINE_HAS_IDTE)
  194. __tlb_flush_idte((unsigned long) gmap->table |
  195. _ASCE_TYPE_REGION1);
  196. else
  197. __tlb_flush_global();
  198. /* Free all segment & region tables. */
  199. down_read(&gmap->mm->mmap_sem);
  200. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  201. table = (unsigned long *) page_to_phys(page);
  202. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  203. /* Remove gmap rmap structures for segment table. */
  204. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  205. gmap_unlink_segment(gmap, table);
  206. __free_pages(page, ALLOC_ORDER);
  207. }
  208. up_read(&gmap->mm->mmap_sem);
  209. list_del(&gmap->list);
  210. kfree(gmap);
  211. }
  212. EXPORT_SYMBOL_GPL(gmap_free);
  213. /**
  214. * gmap_enable - switch primary space to the guest address space
  215. * @gmap: pointer to the guest address space structure
  216. */
  217. void gmap_enable(struct gmap *gmap)
  218. {
  219. /* Load primary space page table origin. */
  220. S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  221. _ASCE_USER_BITS | __pa(gmap->table);
  222. asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
  223. S390_lowcore.gmap = (unsigned long) gmap;
  224. }
  225. EXPORT_SYMBOL_GPL(gmap_enable);
  226. /**
  227. * gmap_disable - switch back to the standard primary address space
  228. * @gmap: pointer to the guest address space structure
  229. */
  230. void gmap_disable(struct gmap *gmap)
  231. {
  232. /* Load primary space page table origin. */
  233. S390_lowcore.user_asce =
  234. gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
  235. asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
  236. S390_lowcore.gmap = 0UL;
  237. }
  238. EXPORT_SYMBOL_GPL(gmap_disable);
  239. static int gmap_alloc_table(struct gmap *gmap,
  240. unsigned long *table, unsigned long init)
  241. {
  242. struct page *page;
  243. unsigned long *new;
  244. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  245. if (!page)
  246. return -ENOMEM;
  247. new = (unsigned long *) page_to_phys(page);
  248. crst_table_init(new, init);
  249. down_read(&gmap->mm->mmap_sem);
  250. if (*table & _REGION_ENTRY_INV) {
  251. list_add(&page->lru, &gmap->crst_list);
  252. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  253. (*table & _REGION_ENTRY_TYPE_MASK);
  254. } else
  255. __free_pages(page, ALLOC_ORDER);
  256. up_read(&gmap->mm->mmap_sem);
  257. return 0;
  258. }
  259. /**
  260. * gmap_unmap_segment - unmap segment from the guest address space
  261. * @gmap: pointer to the guest address space structure
  262. * @addr: address in the guest address space
  263. * @len: length of the memory area to unmap
  264. *
  265. * Returns 0 if the unmap succeded, -EINVAL if not.
  266. */
  267. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  268. {
  269. unsigned long *table;
  270. unsigned long off;
  271. int flush;
  272. if ((to | len) & (PMD_SIZE - 1))
  273. return -EINVAL;
  274. if (len == 0 || to + len < to)
  275. return -EINVAL;
  276. flush = 0;
  277. down_read(&gmap->mm->mmap_sem);
  278. for (off = 0; off < len; off += PMD_SIZE) {
  279. /* Walk the guest addr space page table */
  280. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  281. if (*table & _REGION_ENTRY_INV)
  282. return 0;
  283. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  284. table = table + (((to + off) >> 42) & 0x7ff);
  285. if (*table & _REGION_ENTRY_INV)
  286. return 0;
  287. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  288. table = table + (((to + off) >> 31) & 0x7ff);
  289. if (*table & _REGION_ENTRY_INV)
  290. return 0;
  291. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  292. table = table + (((to + off) >> 20) & 0x7ff);
  293. /* Clear segment table entry in guest address space. */
  294. flush |= gmap_unlink_segment(gmap, table);
  295. *table = _SEGMENT_ENTRY_INV;
  296. }
  297. up_read(&gmap->mm->mmap_sem);
  298. if (flush)
  299. gmap_flush_tlb(gmap);
  300. return 0;
  301. }
  302. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  303. /**
  304. * gmap_mmap_segment - map a segment to the guest address space
  305. * @gmap: pointer to the guest address space structure
  306. * @from: source address in the parent address space
  307. * @to: target address in the guest address space
  308. *
  309. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  310. */
  311. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  312. unsigned long to, unsigned long len)
  313. {
  314. unsigned long *table;
  315. unsigned long off;
  316. int flush;
  317. if ((from | to | len) & (PMD_SIZE - 1))
  318. return -EINVAL;
  319. if (len == 0 || from + len > PGDIR_SIZE ||
  320. from + len < from || to + len < to)
  321. return -EINVAL;
  322. flush = 0;
  323. down_read(&gmap->mm->mmap_sem);
  324. for (off = 0; off < len; off += PMD_SIZE) {
  325. /* Walk the gmap address space page table */
  326. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  327. if ((*table & _REGION_ENTRY_INV) &&
  328. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  329. goto out_unmap;
  330. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  331. table = table + (((to + off) >> 42) & 0x7ff);
  332. if ((*table & _REGION_ENTRY_INV) &&
  333. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  334. goto out_unmap;
  335. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  336. table = table + (((to + off) >> 31) & 0x7ff);
  337. if ((*table & _REGION_ENTRY_INV) &&
  338. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  339. goto out_unmap;
  340. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  341. table = table + (((to + off) >> 20) & 0x7ff);
  342. /* Store 'from' address in an invalid segment table entry. */
  343. flush |= gmap_unlink_segment(gmap, table);
  344. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  345. }
  346. up_read(&gmap->mm->mmap_sem);
  347. if (flush)
  348. gmap_flush_tlb(gmap);
  349. return 0;
  350. out_unmap:
  351. up_read(&gmap->mm->mmap_sem);
  352. gmap_unmap_segment(gmap, to, len);
  353. return -ENOMEM;
  354. }
  355. EXPORT_SYMBOL_GPL(gmap_map_segment);
  356. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  357. {
  358. unsigned long *table, vmaddr, segment;
  359. struct mm_struct *mm;
  360. struct gmap_pgtable *mp;
  361. struct gmap_rmap *rmap;
  362. struct vm_area_struct *vma;
  363. struct page *page;
  364. pgd_t *pgd;
  365. pud_t *pud;
  366. pmd_t *pmd;
  367. current->thread.gmap_addr = address;
  368. mm = gmap->mm;
  369. /* Walk the gmap address space page table */
  370. table = gmap->table + ((address >> 53) & 0x7ff);
  371. if (unlikely(*table & _REGION_ENTRY_INV))
  372. return -EFAULT;
  373. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  374. table = table + ((address >> 42) & 0x7ff);
  375. if (unlikely(*table & _REGION_ENTRY_INV))
  376. return -EFAULT;
  377. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  378. table = table + ((address >> 31) & 0x7ff);
  379. if (unlikely(*table & _REGION_ENTRY_INV))
  380. return -EFAULT;
  381. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  382. table = table + ((address >> 20) & 0x7ff);
  383. /* Convert the gmap address to an mm address. */
  384. segment = *table;
  385. if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
  386. page = pfn_to_page(segment >> PAGE_SHIFT);
  387. mp = (struct gmap_pgtable *) page->index;
  388. return mp->vmaddr | (address & ~PMD_MASK);
  389. } else if (segment & _SEGMENT_ENTRY_RO) {
  390. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  391. vma = find_vma(mm, vmaddr);
  392. if (!vma || vma->vm_start > vmaddr)
  393. return -EFAULT;
  394. /* Walk the parent mm page table */
  395. pgd = pgd_offset(mm, vmaddr);
  396. pud = pud_alloc(mm, pgd, vmaddr);
  397. if (!pud)
  398. return -ENOMEM;
  399. pmd = pmd_alloc(mm, pud, vmaddr);
  400. if (!pmd)
  401. return -ENOMEM;
  402. if (!pmd_present(*pmd) &&
  403. __pte_alloc(mm, vma, pmd, vmaddr))
  404. return -ENOMEM;
  405. /* pmd now points to a valid segment table entry. */
  406. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  407. if (!rmap)
  408. return -ENOMEM;
  409. /* Link gmap segment table entry location to page table. */
  410. page = pmd_page(*pmd);
  411. mp = (struct gmap_pgtable *) page->index;
  412. rmap->entry = table;
  413. list_add(&rmap->list, &mp->mapper);
  414. /* Set gmap segment table entry to page table. */
  415. *table = pmd_val(*pmd) & PAGE_MASK;
  416. return vmaddr | (address & ~PMD_MASK);
  417. }
  418. return -EFAULT;
  419. }
  420. EXPORT_SYMBOL_GPL(gmap_fault);
  421. void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
  422. {
  423. struct gmap_rmap *rmap, *next;
  424. struct gmap_pgtable *mp;
  425. struct page *page;
  426. int flush;
  427. flush = 0;
  428. spin_lock(&mm->page_table_lock);
  429. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  430. mp = (struct gmap_pgtable *) page->index;
  431. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  432. *rmap->entry =
  433. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  434. list_del(&rmap->list);
  435. kfree(rmap);
  436. flush = 1;
  437. }
  438. spin_unlock(&mm->page_table_lock);
  439. if (flush)
  440. __tlb_flush_global();
  441. }
  442. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  443. unsigned long vmaddr)
  444. {
  445. struct page *page;
  446. unsigned long *table;
  447. struct gmap_pgtable *mp;
  448. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  449. if (!page)
  450. return NULL;
  451. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  452. if (!mp) {
  453. __free_page(page);
  454. return NULL;
  455. }
  456. pgtable_page_ctor(page);
  457. mp->vmaddr = vmaddr & PMD_MASK;
  458. INIT_LIST_HEAD(&mp->mapper);
  459. page->index = (unsigned long) mp;
  460. atomic_set(&page->_mapcount, 3);
  461. table = (unsigned long *) page_to_phys(page);
  462. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  463. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  464. return table;
  465. }
  466. static inline void page_table_free_pgste(unsigned long *table)
  467. {
  468. struct page *page;
  469. struct gmap_pgtable *mp;
  470. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  471. mp = (struct gmap_pgtable *) page->index;
  472. BUG_ON(!list_empty(&mp->mapper));
  473. pgtable_page_ctor(page);
  474. atomic_set(&page->_mapcount, -1);
  475. kfree(mp);
  476. __free_page(page);
  477. }
  478. #else /* CONFIG_PGSTE */
  479. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  480. unsigned long vmaddr)
  481. {
  482. return NULL;
  483. }
  484. static inline void page_table_free_pgste(unsigned long *table)
  485. {
  486. }
  487. static inline void gmap_unmap_notifier(struct mm_struct *mm,
  488. unsigned long *table)
  489. {
  490. }
  491. #endif /* CONFIG_PGSTE */
  492. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  493. {
  494. unsigned int old, new;
  495. do {
  496. old = atomic_read(v);
  497. new = old ^ bits;
  498. } while (atomic_cmpxchg(v, old, new) != old);
  499. return new;
  500. }
  501. /*
  502. * page table entry allocation/free routines.
  503. */
  504. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  505. {
  506. struct page *page;
  507. unsigned long *table;
  508. unsigned int mask, bit;
  509. if (mm_has_pgste(mm))
  510. return page_table_alloc_pgste(mm, vmaddr);
  511. /* Allocate fragments of a 4K page as 1K/2K page table */
  512. spin_lock_bh(&mm->context.list_lock);
  513. mask = FRAG_MASK;
  514. if (!list_empty(&mm->context.pgtable_list)) {
  515. page = list_first_entry(&mm->context.pgtable_list,
  516. struct page, lru);
  517. table = (unsigned long *) page_to_phys(page);
  518. mask = atomic_read(&page->_mapcount);
  519. mask = mask | (mask >> 4);
  520. }
  521. if ((mask & FRAG_MASK) == FRAG_MASK) {
  522. spin_unlock_bh(&mm->context.list_lock);
  523. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  524. if (!page)
  525. return NULL;
  526. pgtable_page_ctor(page);
  527. atomic_set(&page->_mapcount, 1);
  528. table = (unsigned long *) page_to_phys(page);
  529. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  530. spin_lock_bh(&mm->context.list_lock);
  531. list_add(&page->lru, &mm->context.pgtable_list);
  532. } else {
  533. for (bit = 1; mask & bit; bit <<= 1)
  534. table += PTRS_PER_PTE;
  535. mask = atomic_xor_bits(&page->_mapcount, bit);
  536. if ((mask & FRAG_MASK) == FRAG_MASK)
  537. list_del(&page->lru);
  538. }
  539. spin_unlock_bh(&mm->context.list_lock);
  540. return table;
  541. }
  542. void page_table_free(struct mm_struct *mm, unsigned long *table)
  543. {
  544. struct page *page;
  545. unsigned int bit, mask;
  546. if (mm_has_pgste(mm)) {
  547. gmap_unmap_notifier(mm, table);
  548. return page_table_free_pgste(table);
  549. }
  550. /* Free 1K/2K page table fragment of a 4K page */
  551. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  552. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  553. spin_lock_bh(&mm->context.list_lock);
  554. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  555. list_del(&page->lru);
  556. mask = atomic_xor_bits(&page->_mapcount, bit);
  557. if (mask & FRAG_MASK)
  558. list_add(&page->lru, &mm->context.pgtable_list);
  559. spin_unlock_bh(&mm->context.list_lock);
  560. if (mask == 0) {
  561. pgtable_page_dtor(page);
  562. atomic_set(&page->_mapcount, -1);
  563. __free_page(page);
  564. }
  565. }
  566. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  567. static void __page_table_free_rcu(void *table, unsigned bit)
  568. {
  569. struct page *page;
  570. if (bit == FRAG_MASK)
  571. return page_table_free_pgste(table);
  572. /* Free 1K/2K page table fragment of a 4K page */
  573. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  574. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  575. pgtable_page_dtor(page);
  576. atomic_set(&page->_mapcount, -1);
  577. __free_page(page);
  578. }
  579. }
  580. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  581. {
  582. struct mm_struct *mm;
  583. struct page *page;
  584. unsigned int bit, mask;
  585. mm = tlb->mm;
  586. if (mm_has_pgste(mm)) {
  587. gmap_unmap_notifier(mm, table);
  588. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  589. tlb_remove_table(tlb, table);
  590. return;
  591. }
  592. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  593. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  594. spin_lock_bh(&mm->context.list_lock);
  595. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  596. list_del(&page->lru);
  597. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  598. if (mask & FRAG_MASK)
  599. list_add_tail(&page->lru, &mm->context.pgtable_list);
  600. spin_unlock_bh(&mm->context.list_lock);
  601. table = (unsigned long *) (__pa(table) | (bit << 4));
  602. tlb_remove_table(tlb, table);
  603. }
  604. void __tlb_remove_table(void *_table)
  605. {
  606. void *table = (void *)((unsigned long) _table & PAGE_MASK);
  607. unsigned type = (unsigned long) _table & ~PAGE_MASK;
  608. if (type)
  609. __page_table_free_rcu(table, type);
  610. else
  611. free_pages((unsigned long) table, ALLOC_ORDER);
  612. }
  613. #endif
  614. /*
  615. * switch on pgstes for its userspace process (for kvm)
  616. */
  617. int s390_enable_sie(void)
  618. {
  619. struct task_struct *tsk = current;
  620. struct mm_struct *mm, *old_mm;
  621. /* Do we have switched amode? If no, we cannot do sie */
  622. if (user_mode == HOME_SPACE_MODE)
  623. return -EINVAL;
  624. /* Do we have pgstes? if yes, we are done */
  625. if (mm_has_pgste(tsk->mm))
  626. return 0;
  627. /* lets check if we are allowed to replace the mm */
  628. task_lock(tsk);
  629. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  630. #ifdef CONFIG_AIO
  631. !hlist_empty(&tsk->mm->ioctx_list) ||
  632. #endif
  633. tsk->mm != tsk->active_mm) {
  634. task_unlock(tsk);
  635. return -EINVAL;
  636. }
  637. task_unlock(tsk);
  638. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  639. tsk->mm->context.alloc_pgste = 1;
  640. mm = dup_mm(tsk);
  641. tsk->mm->context.alloc_pgste = 0;
  642. if (!mm)
  643. return -ENOMEM;
  644. /* Now lets check again if something happened */
  645. task_lock(tsk);
  646. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  647. #ifdef CONFIG_AIO
  648. !hlist_empty(&tsk->mm->ioctx_list) ||
  649. #endif
  650. tsk->mm != tsk->active_mm) {
  651. mmput(mm);
  652. task_unlock(tsk);
  653. return -EINVAL;
  654. }
  655. /* ok, we are alone. No ptrace, no threads, etc. */
  656. old_mm = tsk->mm;
  657. tsk->mm = tsk->active_mm = mm;
  658. preempt_disable();
  659. update_mm(mm, tsk);
  660. atomic_inc(&mm->context.attach_count);
  661. atomic_dec(&old_mm->context.attach_count);
  662. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  663. preempt_enable();
  664. task_unlock(tsk);
  665. mmput(old_mm);
  666. return 0;
  667. }
  668. EXPORT_SYMBOL_GPL(s390_enable_sie);
  669. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  670. bool kernel_page_present(struct page *page)
  671. {
  672. unsigned long addr;
  673. int cc;
  674. addr = page_to_phys(page);
  675. asm volatile(
  676. " lra %1,0(%1)\n"
  677. " ipm %0\n"
  678. " srl %0,28"
  679. : "=d" (cc), "+a" (addr) : : "cc");
  680. return cc == 0;
  681. }
  682. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */