pgtable.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Copyright IBM Corp. 2007,2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/system.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
  33. EXPORT_SYMBOL(VMALLOC_START);
  34. static int __init parse_vmalloc(char *arg)
  35. {
  36. if (!arg)
  37. return -EINVAL;
  38. VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
  39. return 0;
  40. }
  41. early_param("vmalloc", parse_vmalloc);
  42. unsigned long *crst_table_alloc(struct mm_struct *mm)
  43. {
  44. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  45. if (!page)
  46. return NULL;
  47. return (unsigned long *) page_to_phys(page);
  48. }
  49. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  50. {
  51. free_pages((unsigned long) table, ALLOC_ORDER);
  52. }
  53. #ifdef CONFIG_64BIT
  54. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  55. {
  56. unsigned long *table, *pgd;
  57. unsigned long entry;
  58. BUG_ON(limit > (1UL << 53));
  59. repeat:
  60. table = crst_table_alloc(mm);
  61. if (!table)
  62. return -ENOMEM;
  63. spin_lock_bh(&mm->page_table_lock);
  64. if (mm->context.asce_limit < limit) {
  65. pgd = (unsigned long *) mm->pgd;
  66. if (mm->context.asce_limit <= (1UL << 31)) {
  67. entry = _REGION3_ENTRY_EMPTY;
  68. mm->context.asce_limit = 1UL << 42;
  69. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  70. _ASCE_USER_BITS |
  71. _ASCE_TYPE_REGION3;
  72. } else {
  73. entry = _REGION2_ENTRY_EMPTY;
  74. mm->context.asce_limit = 1UL << 53;
  75. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  76. _ASCE_USER_BITS |
  77. _ASCE_TYPE_REGION2;
  78. }
  79. crst_table_init(table, entry);
  80. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  81. mm->pgd = (pgd_t *) table;
  82. mm->task_size = mm->context.asce_limit;
  83. table = NULL;
  84. }
  85. spin_unlock_bh(&mm->page_table_lock);
  86. if (table)
  87. crst_table_free(mm, table);
  88. if (mm->context.asce_limit < limit)
  89. goto repeat;
  90. update_mm(mm, current);
  91. return 0;
  92. }
  93. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  94. {
  95. pgd_t *pgd;
  96. if (mm->context.asce_limit <= limit)
  97. return;
  98. __tlb_flush_mm(mm);
  99. while (mm->context.asce_limit > limit) {
  100. pgd = mm->pgd;
  101. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  102. case _REGION_ENTRY_TYPE_R2:
  103. mm->context.asce_limit = 1UL << 42;
  104. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  105. _ASCE_USER_BITS |
  106. _ASCE_TYPE_REGION3;
  107. break;
  108. case _REGION_ENTRY_TYPE_R3:
  109. mm->context.asce_limit = 1UL << 31;
  110. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  111. _ASCE_USER_BITS |
  112. _ASCE_TYPE_SEGMENT;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  118. mm->task_size = mm->context.asce_limit;
  119. crst_table_free(mm, (unsigned long *) pgd);
  120. }
  121. update_mm(mm, current);
  122. }
  123. #endif
  124. #ifdef CONFIG_PGSTE
  125. /**
  126. * gmap_alloc - allocate a guest address space
  127. * @mm: pointer to the parent mm_struct
  128. *
  129. * Returns a guest address space structure.
  130. */
  131. struct gmap *gmap_alloc(struct mm_struct *mm)
  132. {
  133. struct gmap *gmap;
  134. struct page *page;
  135. unsigned long *table;
  136. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  137. if (!gmap)
  138. goto out;
  139. INIT_LIST_HEAD(&gmap->crst_list);
  140. gmap->mm = mm;
  141. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  142. if (!page)
  143. goto out_free;
  144. list_add(&page->lru, &gmap->crst_list);
  145. table = (unsigned long *) page_to_phys(page);
  146. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  147. gmap->table = table;
  148. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  149. _ASCE_USER_BITS | __pa(table);
  150. list_add(&gmap->list, &mm->context.gmap_list);
  151. return gmap;
  152. out_free:
  153. kfree(gmap);
  154. out:
  155. return NULL;
  156. }
  157. EXPORT_SYMBOL_GPL(gmap_alloc);
  158. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  159. {
  160. struct gmap_pgtable *mp;
  161. struct gmap_rmap *rmap;
  162. struct page *page;
  163. if (*table & _SEGMENT_ENTRY_INV)
  164. return 0;
  165. page = pfn_to_page(*table >> PAGE_SHIFT);
  166. mp = (struct gmap_pgtable *) page->index;
  167. list_for_each_entry(rmap, &mp->mapper, list) {
  168. if (rmap->entry != table)
  169. continue;
  170. list_del(&rmap->list);
  171. kfree(rmap);
  172. break;
  173. }
  174. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  175. return 1;
  176. }
  177. static void gmap_flush_tlb(struct gmap *gmap)
  178. {
  179. if (MACHINE_HAS_IDTE)
  180. __tlb_flush_idte((unsigned long) gmap->table |
  181. _ASCE_TYPE_REGION1);
  182. else
  183. __tlb_flush_global();
  184. }
  185. /**
  186. * gmap_free - free a guest address space
  187. * @gmap: pointer to the guest address space structure
  188. */
  189. void gmap_free(struct gmap *gmap)
  190. {
  191. struct page *page, *next;
  192. unsigned long *table;
  193. int i;
  194. /* Flush tlb. */
  195. if (MACHINE_HAS_IDTE)
  196. __tlb_flush_idte((unsigned long) gmap->table |
  197. _ASCE_TYPE_REGION1);
  198. else
  199. __tlb_flush_global();
  200. /* Free all segment & region tables. */
  201. down_read(&gmap->mm->mmap_sem);
  202. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  203. table = (unsigned long *) page_to_phys(page);
  204. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  205. /* Remove gmap rmap structures for segment table. */
  206. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  207. gmap_unlink_segment(gmap, table);
  208. __free_pages(page, ALLOC_ORDER);
  209. }
  210. up_read(&gmap->mm->mmap_sem);
  211. list_del(&gmap->list);
  212. kfree(gmap);
  213. }
  214. EXPORT_SYMBOL_GPL(gmap_free);
  215. /**
  216. * gmap_enable - switch primary space to the guest address space
  217. * @gmap: pointer to the guest address space structure
  218. */
  219. void gmap_enable(struct gmap *gmap)
  220. {
  221. S390_lowcore.gmap = (unsigned long) gmap;
  222. }
  223. EXPORT_SYMBOL_GPL(gmap_enable);
  224. /**
  225. * gmap_disable - switch back to the standard primary address space
  226. * @gmap: pointer to the guest address space structure
  227. */
  228. void gmap_disable(struct gmap *gmap)
  229. {
  230. S390_lowcore.gmap = 0UL;
  231. }
  232. EXPORT_SYMBOL_GPL(gmap_disable);
  233. static int gmap_alloc_table(struct gmap *gmap,
  234. unsigned long *table, unsigned long init)
  235. {
  236. struct page *page;
  237. unsigned long *new;
  238. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  239. if (!page)
  240. return -ENOMEM;
  241. new = (unsigned long *) page_to_phys(page);
  242. crst_table_init(new, init);
  243. down_read(&gmap->mm->mmap_sem);
  244. if (*table & _REGION_ENTRY_INV) {
  245. list_add(&page->lru, &gmap->crst_list);
  246. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  247. (*table & _REGION_ENTRY_TYPE_MASK);
  248. } else
  249. __free_pages(page, ALLOC_ORDER);
  250. up_read(&gmap->mm->mmap_sem);
  251. return 0;
  252. }
  253. /**
  254. * gmap_unmap_segment - unmap segment from the guest address space
  255. * @gmap: pointer to the guest address space structure
  256. * @addr: address in the guest address space
  257. * @len: length of the memory area to unmap
  258. *
  259. * Returns 0 if the unmap succeded, -EINVAL if not.
  260. */
  261. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  262. {
  263. unsigned long *table;
  264. unsigned long off;
  265. int flush;
  266. if ((to | len) & (PMD_SIZE - 1))
  267. return -EINVAL;
  268. if (len == 0 || to + len < to)
  269. return -EINVAL;
  270. flush = 0;
  271. down_read(&gmap->mm->mmap_sem);
  272. for (off = 0; off < len; off += PMD_SIZE) {
  273. /* Walk the guest addr space page table */
  274. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  275. if (*table & _REGION_ENTRY_INV)
  276. return 0;
  277. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  278. table = table + (((to + off) >> 42) & 0x7ff);
  279. if (*table & _REGION_ENTRY_INV)
  280. return 0;
  281. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  282. table = table + (((to + off) >> 31) & 0x7ff);
  283. if (*table & _REGION_ENTRY_INV)
  284. return 0;
  285. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  286. table = table + (((to + off) >> 20) & 0x7ff);
  287. /* Clear segment table entry in guest address space. */
  288. flush |= gmap_unlink_segment(gmap, table);
  289. *table = _SEGMENT_ENTRY_INV;
  290. }
  291. up_read(&gmap->mm->mmap_sem);
  292. if (flush)
  293. gmap_flush_tlb(gmap);
  294. return 0;
  295. }
  296. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  297. /**
  298. * gmap_mmap_segment - map a segment to the guest address space
  299. * @gmap: pointer to the guest address space structure
  300. * @from: source address in the parent address space
  301. * @to: target address in the guest address space
  302. *
  303. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  304. */
  305. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  306. unsigned long to, unsigned long len)
  307. {
  308. unsigned long *table;
  309. unsigned long off;
  310. int flush;
  311. if ((from | to | len) & (PMD_SIZE - 1))
  312. return -EINVAL;
  313. if (len == 0 || from + len > PGDIR_SIZE ||
  314. from + len < from || to + len < to)
  315. return -EINVAL;
  316. flush = 0;
  317. down_read(&gmap->mm->mmap_sem);
  318. for (off = 0; off < len; off += PMD_SIZE) {
  319. /* Walk the gmap address space page table */
  320. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  321. if ((*table & _REGION_ENTRY_INV) &&
  322. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  323. goto out_unmap;
  324. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  325. table = table + (((to + off) >> 42) & 0x7ff);
  326. if ((*table & _REGION_ENTRY_INV) &&
  327. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  328. goto out_unmap;
  329. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  330. table = table + (((to + off) >> 31) & 0x7ff);
  331. if ((*table & _REGION_ENTRY_INV) &&
  332. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  333. goto out_unmap;
  334. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  335. table = table + (((to + off) >> 20) & 0x7ff);
  336. /* Store 'from' address in an invalid segment table entry. */
  337. flush |= gmap_unlink_segment(gmap, table);
  338. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  339. }
  340. up_read(&gmap->mm->mmap_sem);
  341. if (flush)
  342. gmap_flush_tlb(gmap);
  343. return 0;
  344. out_unmap:
  345. up_read(&gmap->mm->mmap_sem);
  346. gmap_unmap_segment(gmap, to, len);
  347. return -ENOMEM;
  348. }
  349. EXPORT_SYMBOL_GPL(gmap_map_segment);
  350. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  351. {
  352. unsigned long *table, vmaddr, segment;
  353. struct mm_struct *mm;
  354. struct gmap_pgtable *mp;
  355. struct gmap_rmap *rmap;
  356. struct vm_area_struct *vma;
  357. struct page *page;
  358. pgd_t *pgd;
  359. pud_t *pud;
  360. pmd_t *pmd;
  361. current->thread.gmap_addr = address;
  362. mm = gmap->mm;
  363. /* Walk the gmap address space page table */
  364. table = gmap->table + ((address >> 53) & 0x7ff);
  365. if (unlikely(*table & _REGION_ENTRY_INV))
  366. return -EFAULT;
  367. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  368. table = table + ((address >> 42) & 0x7ff);
  369. if (unlikely(*table & _REGION_ENTRY_INV))
  370. return -EFAULT;
  371. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  372. table = table + ((address >> 31) & 0x7ff);
  373. if (unlikely(*table & _REGION_ENTRY_INV))
  374. return -EFAULT;
  375. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  376. table = table + ((address >> 20) & 0x7ff);
  377. /* Convert the gmap address to an mm address. */
  378. segment = *table;
  379. if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
  380. page = pfn_to_page(segment >> PAGE_SHIFT);
  381. mp = (struct gmap_pgtable *) page->index;
  382. return mp->vmaddr | (address & ~PMD_MASK);
  383. } else if (segment & _SEGMENT_ENTRY_RO) {
  384. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  385. vma = find_vma(mm, vmaddr);
  386. if (!vma || vma->vm_start > vmaddr)
  387. return -EFAULT;
  388. /* Walk the parent mm page table */
  389. pgd = pgd_offset(mm, vmaddr);
  390. pud = pud_alloc(mm, pgd, vmaddr);
  391. if (!pud)
  392. return -ENOMEM;
  393. pmd = pmd_alloc(mm, pud, vmaddr);
  394. if (!pmd)
  395. return -ENOMEM;
  396. if (!pmd_present(*pmd) &&
  397. __pte_alloc(mm, vma, pmd, vmaddr))
  398. return -ENOMEM;
  399. /* pmd now points to a valid segment table entry. */
  400. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  401. if (!rmap)
  402. return -ENOMEM;
  403. /* Link gmap segment table entry location to page table. */
  404. page = pmd_page(*pmd);
  405. mp = (struct gmap_pgtable *) page->index;
  406. rmap->entry = table;
  407. list_add(&rmap->list, &mp->mapper);
  408. /* Set gmap segment table entry to page table. */
  409. *table = pmd_val(*pmd) & PAGE_MASK;
  410. return vmaddr | (address & ~PMD_MASK);
  411. }
  412. return -EFAULT;
  413. }
  414. EXPORT_SYMBOL_GPL(gmap_fault);
  415. void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
  416. {
  417. struct gmap_rmap *rmap, *next;
  418. struct gmap_pgtable *mp;
  419. struct page *page;
  420. int flush;
  421. flush = 0;
  422. spin_lock(&mm->page_table_lock);
  423. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  424. mp = (struct gmap_pgtable *) page->index;
  425. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  426. *rmap->entry =
  427. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  428. list_del(&rmap->list);
  429. kfree(rmap);
  430. flush = 1;
  431. }
  432. spin_unlock(&mm->page_table_lock);
  433. if (flush)
  434. __tlb_flush_global();
  435. }
  436. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  437. unsigned long vmaddr)
  438. {
  439. struct page *page;
  440. unsigned long *table;
  441. struct gmap_pgtable *mp;
  442. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  443. if (!page)
  444. return NULL;
  445. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  446. if (!mp) {
  447. __free_page(page);
  448. return NULL;
  449. }
  450. pgtable_page_ctor(page);
  451. mp->vmaddr = vmaddr & PMD_MASK;
  452. INIT_LIST_HEAD(&mp->mapper);
  453. page->index = (unsigned long) mp;
  454. atomic_set(&page->_mapcount, 3);
  455. table = (unsigned long *) page_to_phys(page);
  456. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  457. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  458. return table;
  459. }
  460. static inline void page_table_free_pgste(unsigned long *table)
  461. {
  462. struct page *page;
  463. struct gmap_pgtable *mp;
  464. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  465. mp = (struct gmap_pgtable *) page->index;
  466. BUG_ON(!list_empty(&mp->mapper));
  467. pgtable_page_ctor(page);
  468. atomic_set(&page->_mapcount, -1);
  469. kfree(mp);
  470. __free_page(page);
  471. }
  472. #else /* CONFIG_PGSTE */
  473. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  474. unsigned long vmaddr)
  475. {
  476. return NULL;
  477. }
  478. static inline void page_table_free_pgste(unsigned long *table)
  479. {
  480. }
  481. static inline void gmap_unmap_notifier(struct mm_struct *mm,
  482. unsigned long *table)
  483. {
  484. }
  485. #endif /* CONFIG_PGSTE */
  486. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  487. {
  488. unsigned int old, new;
  489. do {
  490. old = atomic_read(v);
  491. new = old ^ bits;
  492. } while (atomic_cmpxchg(v, old, new) != old);
  493. return new;
  494. }
  495. /*
  496. * page table entry allocation/free routines.
  497. */
  498. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  499. {
  500. struct page *page;
  501. unsigned long *table;
  502. unsigned int mask, bit;
  503. if (mm_has_pgste(mm))
  504. return page_table_alloc_pgste(mm, vmaddr);
  505. /* Allocate fragments of a 4K page as 1K/2K page table */
  506. spin_lock_bh(&mm->context.list_lock);
  507. mask = FRAG_MASK;
  508. if (!list_empty(&mm->context.pgtable_list)) {
  509. page = list_first_entry(&mm->context.pgtable_list,
  510. struct page, lru);
  511. table = (unsigned long *) page_to_phys(page);
  512. mask = atomic_read(&page->_mapcount);
  513. mask = mask | (mask >> 4);
  514. }
  515. if ((mask & FRAG_MASK) == FRAG_MASK) {
  516. spin_unlock_bh(&mm->context.list_lock);
  517. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  518. if (!page)
  519. return NULL;
  520. pgtable_page_ctor(page);
  521. atomic_set(&page->_mapcount, 1);
  522. table = (unsigned long *) page_to_phys(page);
  523. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  524. spin_lock_bh(&mm->context.list_lock);
  525. list_add(&page->lru, &mm->context.pgtable_list);
  526. } else {
  527. for (bit = 1; mask & bit; bit <<= 1)
  528. table += PTRS_PER_PTE;
  529. mask = atomic_xor_bits(&page->_mapcount, bit);
  530. if ((mask & FRAG_MASK) == FRAG_MASK)
  531. list_del(&page->lru);
  532. }
  533. spin_unlock_bh(&mm->context.list_lock);
  534. return table;
  535. }
  536. void page_table_free(struct mm_struct *mm, unsigned long *table)
  537. {
  538. struct page *page;
  539. unsigned int bit, mask;
  540. if (mm_has_pgste(mm)) {
  541. gmap_unmap_notifier(mm, table);
  542. return page_table_free_pgste(table);
  543. }
  544. /* Free 1K/2K page table fragment of a 4K page */
  545. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  546. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  547. spin_lock_bh(&mm->context.list_lock);
  548. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  549. list_del(&page->lru);
  550. mask = atomic_xor_bits(&page->_mapcount, bit);
  551. if (mask & FRAG_MASK)
  552. list_add(&page->lru, &mm->context.pgtable_list);
  553. spin_unlock_bh(&mm->context.list_lock);
  554. if (mask == 0) {
  555. pgtable_page_dtor(page);
  556. atomic_set(&page->_mapcount, -1);
  557. __free_page(page);
  558. }
  559. }
  560. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  561. static void __page_table_free_rcu(void *table, unsigned bit)
  562. {
  563. struct page *page;
  564. if (bit == FRAG_MASK)
  565. return page_table_free_pgste(table);
  566. /* Free 1K/2K page table fragment of a 4K page */
  567. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  568. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  569. pgtable_page_dtor(page);
  570. atomic_set(&page->_mapcount, -1);
  571. __free_page(page);
  572. }
  573. }
  574. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  575. {
  576. struct mm_struct *mm;
  577. struct page *page;
  578. unsigned int bit, mask;
  579. mm = tlb->mm;
  580. if (mm_has_pgste(mm)) {
  581. gmap_unmap_notifier(mm, table);
  582. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  583. tlb_remove_table(tlb, table);
  584. return;
  585. }
  586. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  587. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  588. spin_lock_bh(&mm->context.list_lock);
  589. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  590. list_del(&page->lru);
  591. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  592. if (mask & FRAG_MASK)
  593. list_add_tail(&page->lru, &mm->context.pgtable_list);
  594. spin_unlock_bh(&mm->context.list_lock);
  595. table = (unsigned long *) (__pa(table) | (bit << 4));
  596. tlb_remove_table(tlb, table);
  597. }
  598. void __tlb_remove_table(void *_table)
  599. {
  600. void *table = (void *)((unsigned long) _table & PAGE_MASK);
  601. unsigned type = (unsigned long) _table & ~PAGE_MASK;
  602. if (type)
  603. __page_table_free_rcu(table, type);
  604. else
  605. free_pages((unsigned long) table, ALLOC_ORDER);
  606. }
  607. #endif
  608. /*
  609. * switch on pgstes for its userspace process (for kvm)
  610. */
  611. int s390_enable_sie(void)
  612. {
  613. struct task_struct *tsk = current;
  614. struct mm_struct *mm, *old_mm;
  615. /* Do we have switched amode? If no, we cannot do sie */
  616. if (user_mode == HOME_SPACE_MODE)
  617. return -EINVAL;
  618. /* Do we have pgstes? if yes, we are done */
  619. if (mm_has_pgste(tsk->mm))
  620. return 0;
  621. /* lets check if we are allowed to replace the mm */
  622. task_lock(tsk);
  623. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  624. #ifdef CONFIG_AIO
  625. !hlist_empty(&tsk->mm->ioctx_list) ||
  626. #endif
  627. tsk->mm != tsk->active_mm) {
  628. task_unlock(tsk);
  629. return -EINVAL;
  630. }
  631. task_unlock(tsk);
  632. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  633. tsk->mm->context.alloc_pgste = 1;
  634. mm = dup_mm(tsk);
  635. tsk->mm->context.alloc_pgste = 0;
  636. if (!mm)
  637. return -ENOMEM;
  638. /* Now lets check again if something happened */
  639. task_lock(tsk);
  640. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  641. #ifdef CONFIG_AIO
  642. !hlist_empty(&tsk->mm->ioctx_list) ||
  643. #endif
  644. tsk->mm != tsk->active_mm) {
  645. mmput(mm);
  646. task_unlock(tsk);
  647. return -EINVAL;
  648. }
  649. /* ok, we are alone. No ptrace, no threads, etc. */
  650. old_mm = tsk->mm;
  651. tsk->mm = tsk->active_mm = mm;
  652. preempt_disable();
  653. update_mm(mm, tsk);
  654. atomic_inc(&mm->context.attach_count);
  655. atomic_dec(&old_mm->context.attach_count);
  656. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  657. preempt_enable();
  658. task_unlock(tsk);
  659. mmput(old_mm);
  660. return 0;
  661. }
  662. EXPORT_SYMBOL_GPL(s390_enable_sie);
  663. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  664. bool kernel_page_present(struct page *page)
  665. {
  666. unsigned long addr;
  667. int cc;
  668. addr = page_to_phys(page);
  669. asm volatile(
  670. " lra %1,0(%1)\n"
  671. " ipm %0\n"
  672. " srl %0,28"
  673. : "=d" (cc), "+a" (addr) : : "cc");
  674. return cc == 0;
  675. }
  676. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */