pgtable.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * Copyright IBM Corp. 2007,2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. #ifndef CONFIG_64BIT
  25. #define ALLOC_ORDER 1
  26. #define FRAG_MASK 0x0f
  27. #else
  28. #define ALLOC_ORDER 2
  29. #define FRAG_MASK 0x03
  30. #endif
  31. unsigned long *crst_table_alloc(struct mm_struct *mm)
  32. {
  33. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  34. if (!page)
  35. return NULL;
  36. return (unsigned long *) page_to_phys(page);
  37. }
  38. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  39. {
  40. free_pages((unsigned long) table, ALLOC_ORDER);
  41. }
  42. #ifdef CONFIG_64BIT
  43. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  44. {
  45. unsigned long *table, *pgd;
  46. unsigned long entry;
  47. BUG_ON(limit > (1UL << 53));
  48. repeat:
  49. table = crst_table_alloc(mm);
  50. if (!table)
  51. return -ENOMEM;
  52. spin_lock_bh(&mm->page_table_lock);
  53. if (mm->context.asce_limit < limit) {
  54. pgd = (unsigned long *) mm->pgd;
  55. if (mm->context.asce_limit <= (1UL << 31)) {
  56. entry = _REGION3_ENTRY_EMPTY;
  57. mm->context.asce_limit = 1UL << 42;
  58. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  59. _ASCE_USER_BITS |
  60. _ASCE_TYPE_REGION3;
  61. } else {
  62. entry = _REGION2_ENTRY_EMPTY;
  63. mm->context.asce_limit = 1UL << 53;
  64. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  65. _ASCE_USER_BITS |
  66. _ASCE_TYPE_REGION2;
  67. }
  68. crst_table_init(table, entry);
  69. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  70. mm->pgd = (pgd_t *) table;
  71. mm->task_size = mm->context.asce_limit;
  72. table = NULL;
  73. }
  74. spin_unlock_bh(&mm->page_table_lock);
  75. if (table)
  76. crst_table_free(mm, table);
  77. if (mm->context.asce_limit < limit)
  78. goto repeat;
  79. update_mm(mm, current);
  80. return 0;
  81. }
  82. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  83. {
  84. pgd_t *pgd;
  85. if (mm->context.asce_limit <= limit)
  86. return;
  87. __tlb_flush_mm(mm);
  88. while (mm->context.asce_limit > limit) {
  89. pgd = mm->pgd;
  90. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  91. case _REGION_ENTRY_TYPE_R2:
  92. mm->context.asce_limit = 1UL << 42;
  93. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  94. _ASCE_USER_BITS |
  95. _ASCE_TYPE_REGION3;
  96. break;
  97. case _REGION_ENTRY_TYPE_R3:
  98. mm->context.asce_limit = 1UL << 31;
  99. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  100. _ASCE_USER_BITS |
  101. _ASCE_TYPE_SEGMENT;
  102. break;
  103. default:
  104. BUG();
  105. }
  106. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  107. mm->task_size = mm->context.asce_limit;
  108. crst_table_free(mm, (unsigned long *) pgd);
  109. }
  110. update_mm(mm, current);
  111. }
  112. #endif
  113. #ifdef CONFIG_PGSTE
  114. /**
  115. * gmap_alloc - allocate a guest address space
  116. * @mm: pointer to the parent mm_struct
  117. *
  118. * Returns a guest address space structure.
  119. */
  120. struct gmap *gmap_alloc(struct mm_struct *mm)
  121. {
  122. struct gmap *gmap;
  123. struct page *page;
  124. unsigned long *table;
  125. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  126. if (!gmap)
  127. goto out;
  128. INIT_LIST_HEAD(&gmap->crst_list);
  129. gmap->mm = mm;
  130. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  131. if (!page)
  132. goto out_free;
  133. list_add(&page->lru, &gmap->crst_list);
  134. table = (unsigned long *) page_to_phys(page);
  135. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  136. gmap->table = table;
  137. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  138. _ASCE_USER_BITS | __pa(table);
  139. list_add(&gmap->list, &mm->context.gmap_list);
  140. return gmap;
  141. out_free:
  142. kfree(gmap);
  143. out:
  144. return NULL;
  145. }
  146. EXPORT_SYMBOL_GPL(gmap_alloc);
  147. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  148. {
  149. struct gmap_pgtable *mp;
  150. struct gmap_rmap *rmap;
  151. struct page *page;
  152. if (*table & _SEGMENT_ENTRY_INV)
  153. return 0;
  154. page = pfn_to_page(*table >> PAGE_SHIFT);
  155. mp = (struct gmap_pgtable *) page->index;
  156. list_for_each_entry(rmap, &mp->mapper, list) {
  157. if (rmap->entry != table)
  158. continue;
  159. list_del(&rmap->list);
  160. kfree(rmap);
  161. break;
  162. }
  163. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  164. return 1;
  165. }
  166. static void gmap_flush_tlb(struct gmap *gmap)
  167. {
  168. if (MACHINE_HAS_IDTE)
  169. __tlb_flush_idte((unsigned long) gmap->table |
  170. _ASCE_TYPE_REGION1);
  171. else
  172. __tlb_flush_global();
  173. }
  174. /**
  175. * gmap_free - free a guest address space
  176. * @gmap: pointer to the guest address space structure
  177. */
  178. void gmap_free(struct gmap *gmap)
  179. {
  180. struct page *page, *next;
  181. unsigned long *table;
  182. int i;
  183. /* Flush tlb. */
  184. if (MACHINE_HAS_IDTE)
  185. __tlb_flush_idte((unsigned long) gmap->table |
  186. _ASCE_TYPE_REGION1);
  187. else
  188. __tlb_flush_global();
  189. /* Free all segment & region tables. */
  190. down_read(&gmap->mm->mmap_sem);
  191. spin_lock(&gmap->mm->page_table_lock);
  192. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  193. table = (unsigned long *) page_to_phys(page);
  194. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  195. /* Remove gmap rmap structures for segment table. */
  196. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  197. gmap_unlink_segment(gmap, table);
  198. __free_pages(page, ALLOC_ORDER);
  199. }
  200. spin_unlock(&gmap->mm->page_table_lock);
  201. up_read(&gmap->mm->mmap_sem);
  202. list_del(&gmap->list);
  203. kfree(gmap);
  204. }
  205. EXPORT_SYMBOL_GPL(gmap_free);
  206. /**
  207. * gmap_enable - switch primary space to the guest address space
  208. * @gmap: pointer to the guest address space structure
  209. */
  210. void gmap_enable(struct gmap *gmap)
  211. {
  212. S390_lowcore.gmap = (unsigned long) gmap;
  213. }
  214. EXPORT_SYMBOL_GPL(gmap_enable);
  215. /**
  216. * gmap_disable - switch back to the standard primary address space
  217. * @gmap: pointer to the guest address space structure
  218. */
  219. void gmap_disable(struct gmap *gmap)
  220. {
  221. S390_lowcore.gmap = 0UL;
  222. }
  223. EXPORT_SYMBOL_GPL(gmap_disable);
  224. /*
  225. * gmap_alloc_table is assumed to be called with mmap_sem held
  226. */
  227. static int gmap_alloc_table(struct gmap *gmap,
  228. unsigned long *table, unsigned long init)
  229. {
  230. struct page *page;
  231. unsigned long *new;
  232. /* since we dont free the gmap table until gmap_free we can unlock */
  233. spin_unlock(&gmap->mm->page_table_lock);
  234. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  235. spin_lock(&gmap->mm->page_table_lock);
  236. if (!page)
  237. return -ENOMEM;
  238. new = (unsigned long *) page_to_phys(page);
  239. crst_table_init(new, init);
  240. if (*table & _REGION_ENTRY_INV) {
  241. list_add(&page->lru, &gmap->crst_list);
  242. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  243. (*table & _REGION_ENTRY_TYPE_MASK);
  244. } else
  245. __free_pages(page, ALLOC_ORDER);
  246. return 0;
  247. }
  248. /**
  249. * gmap_unmap_segment - unmap segment from the guest address space
  250. * @gmap: pointer to the guest address space structure
  251. * @addr: address in the guest address space
  252. * @len: length of the memory area to unmap
  253. *
  254. * Returns 0 if the unmap succeded, -EINVAL if not.
  255. */
  256. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  257. {
  258. unsigned long *table;
  259. unsigned long off;
  260. int flush;
  261. if ((to | len) & (PMD_SIZE - 1))
  262. return -EINVAL;
  263. if (len == 0 || to + len < to)
  264. return -EINVAL;
  265. flush = 0;
  266. down_read(&gmap->mm->mmap_sem);
  267. spin_lock(&gmap->mm->page_table_lock);
  268. for (off = 0; off < len; off += PMD_SIZE) {
  269. /* Walk the guest addr space page table */
  270. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  271. if (*table & _REGION_ENTRY_INV)
  272. goto out;
  273. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  274. table = table + (((to + off) >> 42) & 0x7ff);
  275. if (*table & _REGION_ENTRY_INV)
  276. goto out;
  277. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  278. table = table + (((to + off) >> 31) & 0x7ff);
  279. if (*table & _REGION_ENTRY_INV)
  280. goto out;
  281. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  282. table = table + (((to + off) >> 20) & 0x7ff);
  283. /* Clear segment table entry in guest address space. */
  284. flush |= gmap_unlink_segment(gmap, table);
  285. *table = _SEGMENT_ENTRY_INV;
  286. }
  287. out:
  288. spin_unlock(&gmap->mm->page_table_lock);
  289. up_read(&gmap->mm->mmap_sem);
  290. if (flush)
  291. gmap_flush_tlb(gmap);
  292. return 0;
  293. }
  294. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  295. /**
  296. * gmap_mmap_segment - map a segment to the guest address space
  297. * @gmap: pointer to the guest address space structure
  298. * @from: source address in the parent address space
  299. * @to: target address in the guest address space
  300. *
  301. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  302. */
  303. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  304. unsigned long to, unsigned long len)
  305. {
  306. unsigned long *table;
  307. unsigned long off;
  308. int flush;
  309. if ((from | to | len) & (PMD_SIZE - 1))
  310. return -EINVAL;
  311. if (len == 0 || from + len > PGDIR_SIZE ||
  312. from + len < from || to + len < to)
  313. return -EINVAL;
  314. flush = 0;
  315. down_read(&gmap->mm->mmap_sem);
  316. spin_lock(&gmap->mm->page_table_lock);
  317. for (off = 0; off < len; off += PMD_SIZE) {
  318. /* Walk the gmap address space page table */
  319. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  320. if ((*table & _REGION_ENTRY_INV) &&
  321. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  322. goto out_unmap;
  323. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  324. table = table + (((to + off) >> 42) & 0x7ff);
  325. if ((*table & _REGION_ENTRY_INV) &&
  326. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  327. goto out_unmap;
  328. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  329. table = table + (((to + off) >> 31) & 0x7ff);
  330. if ((*table & _REGION_ENTRY_INV) &&
  331. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  332. goto out_unmap;
  333. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  334. table = table + (((to + off) >> 20) & 0x7ff);
  335. /* Store 'from' address in an invalid segment table entry. */
  336. flush |= gmap_unlink_segment(gmap, table);
  337. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  338. }
  339. spin_unlock(&gmap->mm->page_table_lock);
  340. up_read(&gmap->mm->mmap_sem);
  341. if (flush)
  342. gmap_flush_tlb(gmap);
  343. return 0;
  344. out_unmap:
  345. spin_unlock(&gmap->mm->page_table_lock);
  346. up_read(&gmap->mm->mmap_sem);
  347. gmap_unmap_segment(gmap, to, len);
  348. return -ENOMEM;
  349. }
  350. EXPORT_SYMBOL_GPL(gmap_map_segment);
  351. /*
  352. * this function is assumed to be called with mmap_sem held
  353. */
  354. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  355. {
  356. unsigned long *table, vmaddr, segment;
  357. struct mm_struct *mm;
  358. struct gmap_pgtable *mp;
  359. struct gmap_rmap *rmap;
  360. struct vm_area_struct *vma;
  361. struct page *page;
  362. pgd_t *pgd;
  363. pud_t *pud;
  364. pmd_t *pmd;
  365. current->thread.gmap_addr = address;
  366. mm = gmap->mm;
  367. /* Walk the gmap address space page table */
  368. table = gmap->table + ((address >> 53) & 0x7ff);
  369. if (unlikely(*table & _REGION_ENTRY_INV))
  370. return -EFAULT;
  371. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  372. table = table + ((address >> 42) & 0x7ff);
  373. if (unlikely(*table & _REGION_ENTRY_INV))
  374. return -EFAULT;
  375. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  376. table = table + ((address >> 31) & 0x7ff);
  377. if (unlikely(*table & _REGION_ENTRY_INV))
  378. return -EFAULT;
  379. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  380. table = table + ((address >> 20) & 0x7ff);
  381. /* Convert the gmap address to an mm address. */
  382. segment = *table;
  383. if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
  384. page = pfn_to_page(segment >> PAGE_SHIFT);
  385. mp = (struct gmap_pgtable *) page->index;
  386. return mp->vmaddr | (address & ~PMD_MASK);
  387. } else if (segment & _SEGMENT_ENTRY_RO) {
  388. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  389. vma = find_vma(mm, vmaddr);
  390. if (!vma || vma->vm_start > vmaddr)
  391. return -EFAULT;
  392. /* Walk the parent mm page table */
  393. pgd = pgd_offset(mm, vmaddr);
  394. pud = pud_alloc(mm, pgd, vmaddr);
  395. if (!pud)
  396. return -ENOMEM;
  397. pmd = pmd_alloc(mm, pud, vmaddr);
  398. if (!pmd)
  399. return -ENOMEM;
  400. if (!pmd_present(*pmd) &&
  401. __pte_alloc(mm, vma, pmd, vmaddr))
  402. return -ENOMEM;
  403. /* pmd now points to a valid segment table entry. */
  404. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  405. if (!rmap)
  406. return -ENOMEM;
  407. /* Link gmap segment table entry location to page table. */
  408. page = pmd_page(*pmd);
  409. mp = (struct gmap_pgtable *) page->index;
  410. rmap->entry = table;
  411. spin_lock(&mm->page_table_lock);
  412. list_add(&rmap->list, &mp->mapper);
  413. spin_unlock(&mm->page_table_lock);
  414. /* Set gmap segment table entry to page table. */
  415. *table = pmd_val(*pmd) & PAGE_MASK;
  416. return vmaddr | (address & ~PMD_MASK);
  417. }
  418. return -EFAULT;
  419. }
  420. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  421. {
  422. unsigned long rc;
  423. down_read(&gmap->mm->mmap_sem);
  424. rc = __gmap_fault(address, gmap);
  425. up_read(&gmap->mm->mmap_sem);
  426. return rc;
  427. }
  428. EXPORT_SYMBOL_GPL(gmap_fault);
  429. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  430. {
  431. unsigned long *table, address, size;
  432. struct vm_area_struct *vma;
  433. struct gmap_pgtable *mp;
  434. struct page *page;
  435. down_read(&gmap->mm->mmap_sem);
  436. address = from;
  437. while (address < to) {
  438. /* Walk the gmap address space page table */
  439. table = gmap->table + ((address >> 53) & 0x7ff);
  440. if (unlikely(*table & _REGION_ENTRY_INV)) {
  441. address = (address + PMD_SIZE) & PMD_MASK;
  442. continue;
  443. }
  444. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  445. table = table + ((address >> 42) & 0x7ff);
  446. if (unlikely(*table & _REGION_ENTRY_INV)) {
  447. address = (address + PMD_SIZE) & PMD_MASK;
  448. continue;
  449. }
  450. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  451. table = table + ((address >> 31) & 0x7ff);
  452. if (unlikely(*table & _REGION_ENTRY_INV)) {
  453. address = (address + PMD_SIZE) & PMD_MASK;
  454. continue;
  455. }
  456. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  457. table = table + ((address >> 20) & 0x7ff);
  458. if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
  459. address = (address + PMD_SIZE) & PMD_MASK;
  460. continue;
  461. }
  462. page = pfn_to_page(*table >> PAGE_SHIFT);
  463. mp = (struct gmap_pgtable *) page->index;
  464. vma = find_vma(gmap->mm, mp->vmaddr);
  465. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  466. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  467. size, NULL);
  468. address = (address + PMD_SIZE) & PMD_MASK;
  469. }
  470. up_read(&gmap->mm->mmap_sem);
  471. }
  472. EXPORT_SYMBOL_GPL(gmap_discard);
  473. void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
  474. {
  475. struct gmap_rmap *rmap, *next;
  476. struct gmap_pgtable *mp;
  477. struct page *page;
  478. int flush;
  479. flush = 0;
  480. spin_lock(&mm->page_table_lock);
  481. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  482. mp = (struct gmap_pgtable *) page->index;
  483. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  484. *rmap->entry =
  485. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  486. list_del(&rmap->list);
  487. kfree(rmap);
  488. flush = 1;
  489. }
  490. spin_unlock(&mm->page_table_lock);
  491. if (flush)
  492. __tlb_flush_global();
  493. }
  494. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  495. unsigned long vmaddr)
  496. {
  497. struct page *page;
  498. unsigned long *table;
  499. struct gmap_pgtable *mp;
  500. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  501. if (!page)
  502. return NULL;
  503. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  504. if (!mp) {
  505. __free_page(page);
  506. return NULL;
  507. }
  508. pgtable_page_ctor(page);
  509. mp->vmaddr = vmaddr & PMD_MASK;
  510. INIT_LIST_HEAD(&mp->mapper);
  511. page->index = (unsigned long) mp;
  512. atomic_set(&page->_mapcount, 3);
  513. table = (unsigned long *) page_to_phys(page);
  514. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  515. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  516. return table;
  517. }
  518. static inline void page_table_free_pgste(unsigned long *table)
  519. {
  520. struct page *page;
  521. struct gmap_pgtable *mp;
  522. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  523. mp = (struct gmap_pgtable *) page->index;
  524. BUG_ON(!list_empty(&mp->mapper));
  525. pgtable_page_dtor(page);
  526. atomic_set(&page->_mapcount, -1);
  527. kfree(mp);
  528. __free_page(page);
  529. }
  530. #else /* CONFIG_PGSTE */
  531. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  532. unsigned long vmaddr)
  533. {
  534. return NULL;
  535. }
  536. static inline void page_table_free_pgste(unsigned long *table)
  537. {
  538. }
  539. static inline void gmap_unmap_notifier(struct mm_struct *mm,
  540. unsigned long *table)
  541. {
  542. }
  543. #endif /* CONFIG_PGSTE */
  544. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  545. {
  546. unsigned int old, new;
  547. do {
  548. old = atomic_read(v);
  549. new = old ^ bits;
  550. } while (atomic_cmpxchg(v, old, new) != old);
  551. return new;
  552. }
  553. /*
  554. * page table entry allocation/free routines.
  555. */
  556. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  557. {
  558. struct page *page;
  559. unsigned long *table;
  560. unsigned int mask, bit;
  561. if (mm_has_pgste(mm))
  562. return page_table_alloc_pgste(mm, vmaddr);
  563. /* Allocate fragments of a 4K page as 1K/2K page table */
  564. spin_lock_bh(&mm->context.list_lock);
  565. mask = FRAG_MASK;
  566. if (!list_empty(&mm->context.pgtable_list)) {
  567. page = list_first_entry(&mm->context.pgtable_list,
  568. struct page, lru);
  569. table = (unsigned long *) page_to_phys(page);
  570. mask = atomic_read(&page->_mapcount);
  571. mask = mask | (mask >> 4);
  572. }
  573. if ((mask & FRAG_MASK) == FRAG_MASK) {
  574. spin_unlock_bh(&mm->context.list_lock);
  575. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  576. if (!page)
  577. return NULL;
  578. pgtable_page_ctor(page);
  579. atomic_set(&page->_mapcount, 1);
  580. table = (unsigned long *) page_to_phys(page);
  581. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  582. spin_lock_bh(&mm->context.list_lock);
  583. list_add(&page->lru, &mm->context.pgtable_list);
  584. } else {
  585. for (bit = 1; mask & bit; bit <<= 1)
  586. table += PTRS_PER_PTE;
  587. mask = atomic_xor_bits(&page->_mapcount, bit);
  588. if ((mask & FRAG_MASK) == FRAG_MASK)
  589. list_del(&page->lru);
  590. }
  591. spin_unlock_bh(&mm->context.list_lock);
  592. return table;
  593. }
  594. void page_table_free(struct mm_struct *mm, unsigned long *table)
  595. {
  596. struct page *page;
  597. unsigned int bit, mask;
  598. if (mm_has_pgste(mm)) {
  599. gmap_unmap_notifier(mm, table);
  600. return page_table_free_pgste(table);
  601. }
  602. /* Free 1K/2K page table fragment of a 4K page */
  603. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  604. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  605. spin_lock_bh(&mm->context.list_lock);
  606. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  607. list_del(&page->lru);
  608. mask = atomic_xor_bits(&page->_mapcount, bit);
  609. if (mask & FRAG_MASK)
  610. list_add(&page->lru, &mm->context.pgtable_list);
  611. spin_unlock_bh(&mm->context.list_lock);
  612. if (mask == 0) {
  613. pgtable_page_dtor(page);
  614. atomic_set(&page->_mapcount, -1);
  615. __free_page(page);
  616. }
  617. }
  618. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  619. static void __page_table_free_rcu(void *table, unsigned bit)
  620. {
  621. struct page *page;
  622. if (bit == FRAG_MASK)
  623. return page_table_free_pgste(table);
  624. /* Free 1K/2K page table fragment of a 4K page */
  625. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  626. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  627. pgtable_page_dtor(page);
  628. atomic_set(&page->_mapcount, -1);
  629. __free_page(page);
  630. }
  631. }
  632. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  633. {
  634. struct mm_struct *mm;
  635. struct page *page;
  636. unsigned int bit, mask;
  637. mm = tlb->mm;
  638. if (mm_has_pgste(mm)) {
  639. gmap_unmap_notifier(mm, table);
  640. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  641. tlb_remove_table(tlb, table);
  642. return;
  643. }
  644. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  645. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  646. spin_lock_bh(&mm->context.list_lock);
  647. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  648. list_del(&page->lru);
  649. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  650. if (mask & FRAG_MASK)
  651. list_add_tail(&page->lru, &mm->context.pgtable_list);
  652. spin_unlock_bh(&mm->context.list_lock);
  653. table = (unsigned long *) (__pa(table) | (bit << 4));
  654. tlb_remove_table(tlb, table);
  655. }
  656. void __tlb_remove_table(void *_table)
  657. {
  658. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  659. void *table = (void *)((unsigned long) _table & ~mask);
  660. unsigned type = (unsigned long) _table & mask;
  661. if (type)
  662. __page_table_free_rcu(table, type);
  663. else
  664. free_pages((unsigned long) table, ALLOC_ORDER);
  665. }
  666. #endif
  667. /*
  668. * switch on pgstes for its userspace process (for kvm)
  669. */
  670. int s390_enable_sie(void)
  671. {
  672. struct task_struct *tsk = current;
  673. struct mm_struct *mm, *old_mm;
  674. /* Do we have switched amode? If no, we cannot do sie */
  675. if (user_mode == HOME_SPACE_MODE)
  676. return -EINVAL;
  677. /* Do we have pgstes? if yes, we are done */
  678. if (mm_has_pgste(tsk->mm))
  679. return 0;
  680. /* lets check if we are allowed to replace the mm */
  681. task_lock(tsk);
  682. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  683. #ifdef CONFIG_AIO
  684. !hlist_empty(&tsk->mm->ioctx_list) ||
  685. #endif
  686. tsk->mm != tsk->active_mm) {
  687. task_unlock(tsk);
  688. return -EINVAL;
  689. }
  690. task_unlock(tsk);
  691. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  692. tsk->mm->context.alloc_pgste = 1;
  693. mm = dup_mm(tsk);
  694. tsk->mm->context.alloc_pgste = 0;
  695. if (!mm)
  696. return -ENOMEM;
  697. /* Now lets check again if something happened */
  698. task_lock(tsk);
  699. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  700. #ifdef CONFIG_AIO
  701. !hlist_empty(&tsk->mm->ioctx_list) ||
  702. #endif
  703. tsk->mm != tsk->active_mm) {
  704. mmput(mm);
  705. task_unlock(tsk);
  706. return -EINVAL;
  707. }
  708. /* ok, we are alone. No ptrace, no threads, etc. */
  709. old_mm = tsk->mm;
  710. tsk->mm = tsk->active_mm = mm;
  711. preempt_disable();
  712. update_mm(mm, tsk);
  713. atomic_inc(&mm->context.attach_count);
  714. atomic_dec(&old_mm->context.attach_count);
  715. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  716. preempt_enable();
  717. task_unlock(tsk);
  718. mmput(old_mm);
  719. return 0;
  720. }
  721. EXPORT_SYMBOL_GPL(s390_enable_sie);
  722. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  723. bool kernel_page_present(struct page *page)
  724. {
  725. unsigned long addr;
  726. int cc;
  727. addr = page_to_phys(page);
  728. asm volatile(
  729. " lra %1,0(%1)\n"
  730. " ipm %0\n"
  731. " srl %0,28"
  732. : "=d" (cc), "+a" (addr) : : "cc");
  733. return cc == 0;
  734. }
  735. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */