pgtable.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. /*
  2. * Copyright IBM Corp. 2007, 2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. #ifndef CONFIG_64BIT
  25. #define ALLOC_ORDER 1
  26. #define FRAG_MASK 0x0f
  27. #else
  28. #define ALLOC_ORDER 2
  29. #define FRAG_MASK 0x03
  30. #endif
  31. unsigned long *crst_table_alloc(struct mm_struct *mm)
  32. {
  33. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  34. if (!page)
  35. return NULL;
  36. return (unsigned long *) page_to_phys(page);
  37. }
  38. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  39. {
  40. free_pages((unsigned long) table, ALLOC_ORDER);
  41. }
  42. #ifdef CONFIG_64BIT
  43. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  44. {
  45. unsigned long *table, *pgd;
  46. unsigned long entry;
  47. BUG_ON(limit > (1UL << 53));
  48. repeat:
  49. table = crst_table_alloc(mm);
  50. if (!table)
  51. return -ENOMEM;
  52. spin_lock_bh(&mm->page_table_lock);
  53. if (mm->context.asce_limit < limit) {
  54. pgd = (unsigned long *) mm->pgd;
  55. if (mm->context.asce_limit <= (1UL << 31)) {
  56. entry = _REGION3_ENTRY_EMPTY;
  57. mm->context.asce_limit = 1UL << 42;
  58. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  59. _ASCE_USER_BITS |
  60. _ASCE_TYPE_REGION3;
  61. } else {
  62. entry = _REGION2_ENTRY_EMPTY;
  63. mm->context.asce_limit = 1UL << 53;
  64. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  65. _ASCE_USER_BITS |
  66. _ASCE_TYPE_REGION2;
  67. }
  68. crst_table_init(table, entry);
  69. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  70. mm->pgd = (pgd_t *) table;
  71. mm->task_size = mm->context.asce_limit;
  72. table = NULL;
  73. }
  74. spin_unlock_bh(&mm->page_table_lock);
  75. if (table)
  76. crst_table_free(mm, table);
  77. if (mm->context.asce_limit < limit)
  78. goto repeat;
  79. return 0;
  80. }
  81. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  82. {
  83. pgd_t *pgd;
  84. while (mm->context.asce_limit > limit) {
  85. pgd = mm->pgd;
  86. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  87. case _REGION_ENTRY_TYPE_R2:
  88. mm->context.asce_limit = 1UL << 42;
  89. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  90. _ASCE_USER_BITS |
  91. _ASCE_TYPE_REGION3;
  92. break;
  93. case _REGION_ENTRY_TYPE_R3:
  94. mm->context.asce_limit = 1UL << 31;
  95. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  96. _ASCE_USER_BITS |
  97. _ASCE_TYPE_SEGMENT;
  98. break;
  99. default:
  100. BUG();
  101. }
  102. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  103. mm->task_size = mm->context.asce_limit;
  104. crst_table_free(mm, (unsigned long *) pgd);
  105. }
  106. }
  107. #endif
  108. #ifdef CONFIG_PGSTE
  109. /**
  110. * gmap_alloc - allocate a guest address space
  111. * @mm: pointer to the parent mm_struct
  112. *
  113. * Returns a guest address space structure.
  114. */
  115. struct gmap *gmap_alloc(struct mm_struct *mm)
  116. {
  117. struct gmap *gmap;
  118. struct page *page;
  119. unsigned long *table;
  120. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  121. if (!gmap)
  122. goto out;
  123. INIT_LIST_HEAD(&gmap->crst_list);
  124. gmap->mm = mm;
  125. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  126. if (!page)
  127. goto out_free;
  128. list_add(&page->lru, &gmap->crst_list);
  129. table = (unsigned long *) page_to_phys(page);
  130. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  131. gmap->table = table;
  132. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  133. _ASCE_USER_BITS | __pa(table);
  134. list_add(&gmap->list, &mm->context.gmap_list);
  135. return gmap;
  136. out_free:
  137. kfree(gmap);
  138. out:
  139. return NULL;
  140. }
  141. EXPORT_SYMBOL_GPL(gmap_alloc);
  142. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  143. {
  144. struct gmap_pgtable *mp;
  145. struct gmap_rmap *rmap;
  146. struct page *page;
  147. if (*table & _SEGMENT_ENTRY_INV)
  148. return 0;
  149. page = pfn_to_page(*table >> PAGE_SHIFT);
  150. mp = (struct gmap_pgtable *) page->index;
  151. list_for_each_entry(rmap, &mp->mapper, list) {
  152. if (rmap->entry != table)
  153. continue;
  154. list_del(&rmap->list);
  155. kfree(rmap);
  156. break;
  157. }
  158. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  159. return 1;
  160. }
  161. static void gmap_flush_tlb(struct gmap *gmap)
  162. {
  163. if (MACHINE_HAS_IDTE)
  164. __tlb_flush_idte((unsigned long) gmap->table |
  165. _ASCE_TYPE_REGION1);
  166. else
  167. __tlb_flush_global();
  168. }
  169. /**
  170. * gmap_free - free a guest address space
  171. * @gmap: pointer to the guest address space structure
  172. */
  173. void gmap_free(struct gmap *gmap)
  174. {
  175. struct page *page, *next;
  176. unsigned long *table;
  177. int i;
  178. /* Flush tlb. */
  179. if (MACHINE_HAS_IDTE)
  180. __tlb_flush_idte((unsigned long) gmap->table |
  181. _ASCE_TYPE_REGION1);
  182. else
  183. __tlb_flush_global();
  184. /* Free all segment & region tables. */
  185. down_read(&gmap->mm->mmap_sem);
  186. spin_lock(&gmap->mm->page_table_lock);
  187. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  188. table = (unsigned long *) page_to_phys(page);
  189. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  190. /* Remove gmap rmap structures for segment table. */
  191. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  192. gmap_unlink_segment(gmap, table);
  193. __free_pages(page, ALLOC_ORDER);
  194. }
  195. spin_unlock(&gmap->mm->page_table_lock);
  196. up_read(&gmap->mm->mmap_sem);
  197. list_del(&gmap->list);
  198. kfree(gmap);
  199. }
  200. EXPORT_SYMBOL_GPL(gmap_free);
  201. /**
  202. * gmap_enable - switch primary space to the guest address space
  203. * @gmap: pointer to the guest address space structure
  204. */
  205. void gmap_enable(struct gmap *gmap)
  206. {
  207. S390_lowcore.gmap = (unsigned long) gmap;
  208. }
  209. EXPORT_SYMBOL_GPL(gmap_enable);
  210. /**
  211. * gmap_disable - switch back to the standard primary address space
  212. * @gmap: pointer to the guest address space structure
  213. */
  214. void gmap_disable(struct gmap *gmap)
  215. {
  216. S390_lowcore.gmap = 0UL;
  217. }
  218. EXPORT_SYMBOL_GPL(gmap_disable);
  219. /*
  220. * gmap_alloc_table is assumed to be called with mmap_sem held
  221. */
  222. static int gmap_alloc_table(struct gmap *gmap,
  223. unsigned long *table, unsigned long init)
  224. {
  225. struct page *page;
  226. unsigned long *new;
  227. /* since we dont free the gmap table until gmap_free we can unlock */
  228. spin_unlock(&gmap->mm->page_table_lock);
  229. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  230. spin_lock(&gmap->mm->page_table_lock);
  231. if (!page)
  232. return -ENOMEM;
  233. new = (unsigned long *) page_to_phys(page);
  234. crst_table_init(new, init);
  235. if (*table & _REGION_ENTRY_INV) {
  236. list_add(&page->lru, &gmap->crst_list);
  237. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  238. (*table & _REGION_ENTRY_TYPE_MASK);
  239. } else
  240. __free_pages(page, ALLOC_ORDER);
  241. return 0;
  242. }
  243. /**
  244. * gmap_unmap_segment - unmap segment from the guest address space
  245. * @gmap: pointer to the guest address space structure
  246. * @addr: address in the guest address space
  247. * @len: length of the memory area to unmap
  248. *
  249. * Returns 0 if the unmap succeded, -EINVAL if not.
  250. */
  251. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  252. {
  253. unsigned long *table;
  254. unsigned long off;
  255. int flush;
  256. if ((to | len) & (PMD_SIZE - 1))
  257. return -EINVAL;
  258. if (len == 0 || to + len < to)
  259. return -EINVAL;
  260. flush = 0;
  261. down_read(&gmap->mm->mmap_sem);
  262. spin_lock(&gmap->mm->page_table_lock);
  263. for (off = 0; off < len; off += PMD_SIZE) {
  264. /* Walk the guest addr space page table */
  265. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  266. if (*table & _REGION_ENTRY_INV)
  267. goto out;
  268. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  269. table = table + (((to + off) >> 42) & 0x7ff);
  270. if (*table & _REGION_ENTRY_INV)
  271. goto out;
  272. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  273. table = table + (((to + off) >> 31) & 0x7ff);
  274. if (*table & _REGION_ENTRY_INV)
  275. goto out;
  276. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  277. table = table + (((to + off) >> 20) & 0x7ff);
  278. /* Clear segment table entry in guest address space. */
  279. flush |= gmap_unlink_segment(gmap, table);
  280. *table = _SEGMENT_ENTRY_INV;
  281. }
  282. out:
  283. spin_unlock(&gmap->mm->page_table_lock);
  284. up_read(&gmap->mm->mmap_sem);
  285. if (flush)
  286. gmap_flush_tlb(gmap);
  287. return 0;
  288. }
  289. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  290. /**
  291. * gmap_mmap_segment - map a segment to the guest address space
  292. * @gmap: pointer to the guest address space structure
  293. * @from: source address in the parent address space
  294. * @to: target address in the guest address space
  295. *
  296. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  297. */
  298. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  299. unsigned long to, unsigned long len)
  300. {
  301. unsigned long *table;
  302. unsigned long off;
  303. int flush;
  304. if ((from | to | len) & (PMD_SIZE - 1))
  305. return -EINVAL;
  306. if (len == 0 || from + len > PGDIR_SIZE ||
  307. from + len < from || to + len < to)
  308. return -EINVAL;
  309. flush = 0;
  310. down_read(&gmap->mm->mmap_sem);
  311. spin_lock(&gmap->mm->page_table_lock);
  312. for (off = 0; off < len; off += PMD_SIZE) {
  313. /* Walk the gmap address space page table */
  314. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  315. if ((*table & _REGION_ENTRY_INV) &&
  316. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  317. goto out_unmap;
  318. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  319. table = table + (((to + off) >> 42) & 0x7ff);
  320. if ((*table & _REGION_ENTRY_INV) &&
  321. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  322. goto out_unmap;
  323. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  324. table = table + (((to + off) >> 31) & 0x7ff);
  325. if ((*table & _REGION_ENTRY_INV) &&
  326. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  327. goto out_unmap;
  328. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  329. table = table + (((to + off) >> 20) & 0x7ff);
  330. /* Store 'from' address in an invalid segment table entry. */
  331. flush |= gmap_unlink_segment(gmap, table);
  332. *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
  333. }
  334. spin_unlock(&gmap->mm->page_table_lock);
  335. up_read(&gmap->mm->mmap_sem);
  336. if (flush)
  337. gmap_flush_tlb(gmap);
  338. return 0;
  339. out_unmap:
  340. spin_unlock(&gmap->mm->page_table_lock);
  341. up_read(&gmap->mm->mmap_sem);
  342. gmap_unmap_segment(gmap, to, len);
  343. return -ENOMEM;
  344. }
  345. EXPORT_SYMBOL_GPL(gmap_map_segment);
  346. static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
  347. {
  348. unsigned long *table;
  349. table = gmap->table + ((address >> 53) & 0x7ff);
  350. if (unlikely(*table & _REGION_ENTRY_INV))
  351. return ERR_PTR(-EFAULT);
  352. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  353. table = table + ((address >> 42) & 0x7ff);
  354. if (unlikely(*table & _REGION_ENTRY_INV))
  355. return ERR_PTR(-EFAULT);
  356. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  357. table = table + ((address >> 31) & 0x7ff);
  358. if (unlikely(*table & _REGION_ENTRY_INV))
  359. return ERR_PTR(-EFAULT);
  360. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  361. table = table + ((address >> 20) & 0x7ff);
  362. return table;
  363. }
  364. /**
  365. * __gmap_translate - translate a guest address to a user space address
  366. * @address: guest address
  367. * @gmap: pointer to guest mapping meta data structure
  368. *
  369. * Returns user space address which corresponds to the guest address or
  370. * -EFAULT if no such mapping exists.
  371. * This function does not establish potentially missing page table entries.
  372. * The mmap_sem of the mm that belongs to the address space must be held
  373. * when this function gets called.
  374. */
  375. unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
  376. {
  377. unsigned long *segment_ptr, vmaddr, segment;
  378. struct gmap_pgtable *mp;
  379. struct page *page;
  380. current->thread.gmap_addr = address;
  381. segment_ptr = gmap_table_walk(address, gmap);
  382. if (IS_ERR(segment_ptr))
  383. return PTR_ERR(segment_ptr);
  384. /* Convert the gmap address to an mm address. */
  385. segment = *segment_ptr;
  386. if (!(segment & _SEGMENT_ENTRY_INV)) {
  387. page = pfn_to_page(segment >> PAGE_SHIFT);
  388. mp = (struct gmap_pgtable *) page->index;
  389. return mp->vmaddr | (address & ~PMD_MASK);
  390. } else if (segment & _SEGMENT_ENTRY_RO) {
  391. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  392. return vmaddr | (address & ~PMD_MASK);
  393. }
  394. return -EFAULT;
  395. }
  396. EXPORT_SYMBOL_GPL(__gmap_translate);
  397. /**
  398. * gmap_translate - translate a guest address to a user space address
  399. * @address: guest address
  400. * @gmap: pointer to guest mapping meta data structure
  401. *
  402. * Returns user space address which corresponds to the guest address or
  403. * -EFAULT if no such mapping exists.
  404. * This function does not establish potentially missing page table entries.
  405. */
  406. unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
  407. {
  408. unsigned long rc;
  409. down_read(&gmap->mm->mmap_sem);
  410. rc = __gmap_translate(address, gmap);
  411. up_read(&gmap->mm->mmap_sem);
  412. return rc;
  413. }
  414. EXPORT_SYMBOL_GPL(gmap_translate);
  415. static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
  416. unsigned long *segment_ptr, struct gmap *gmap)
  417. {
  418. unsigned long vmaddr;
  419. struct vm_area_struct *vma;
  420. struct gmap_pgtable *mp;
  421. struct gmap_rmap *rmap;
  422. struct mm_struct *mm;
  423. struct page *page;
  424. pgd_t *pgd;
  425. pud_t *pud;
  426. pmd_t *pmd;
  427. mm = gmap->mm;
  428. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  429. vma = find_vma(mm, vmaddr);
  430. if (!vma || vma->vm_start > vmaddr)
  431. return -EFAULT;
  432. /* Walk the parent mm page table */
  433. pgd = pgd_offset(mm, vmaddr);
  434. pud = pud_alloc(mm, pgd, vmaddr);
  435. if (!pud)
  436. return -ENOMEM;
  437. pmd = pmd_alloc(mm, pud, vmaddr);
  438. if (!pmd)
  439. return -ENOMEM;
  440. if (!pmd_present(*pmd) &&
  441. __pte_alloc(mm, vma, pmd, vmaddr))
  442. return -ENOMEM;
  443. /* pmd now points to a valid segment table entry. */
  444. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  445. if (!rmap)
  446. return -ENOMEM;
  447. /* Link gmap segment table entry location to page table. */
  448. page = pmd_page(*pmd);
  449. mp = (struct gmap_pgtable *) page->index;
  450. rmap->gmap = gmap;
  451. rmap->entry = segment_ptr;
  452. rmap->vmaddr = address & PMD_MASK;
  453. spin_lock(&mm->page_table_lock);
  454. if (*segment_ptr == segment) {
  455. list_add(&rmap->list, &mp->mapper);
  456. /* Set gmap segment table entry to page table. */
  457. *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
  458. rmap = NULL;
  459. }
  460. spin_unlock(&mm->page_table_lock);
  461. kfree(rmap);
  462. return 0;
  463. }
  464. static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
  465. {
  466. struct gmap_rmap *rmap, *next;
  467. struct gmap_pgtable *mp;
  468. struct page *page;
  469. int flush;
  470. flush = 0;
  471. spin_lock(&mm->page_table_lock);
  472. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  473. mp = (struct gmap_pgtable *) page->index;
  474. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  475. *rmap->entry =
  476. _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
  477. list_del(&rmap->list);
  478. kfree(rmap);
  479. flush = 1;
  480. }
  481. spin_unlock(&mm->page_table_lock);
  482. if (flush)
  483. __tlb_flush_global();
  484. }
  485. /*
  486. * this function is assumed to be called with mmap_sem held
  487. */
  488. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  489. {
  490. unsigned long *segment_ptr, segment;
  491. struct gmap_pgtable *mp;
  492. struct page *page;
  493. int rc;
  494. current->thread.gmap_addr = address;
  495. segment_ptr = gmap_table_walk(address, gmap);
  496. if (IS_ERR(segment_ptr))
  497. return -EFAULT;
  498. /* Convert the gmap address to an mm address. */
  499. while (1) {
  500. segment = *segment_ptr;
  501. if (!(segment & _SEGMENT_ENTRY_INV)) {
  502. /* Page table is present */
  503. page = pfn_to_page(segment >> PAGE_SHIFT);
  504. mp = (struct gmap_pgtable *) page->index;
  505. return mp->vmaddr | (address & ~PMD_MASK);
  506. }
  507. if (!(segment & _SEGMENT_ENTRY_RO))
  508. /* Nothing mapped in the gmap address space. */
  509. break;
  510. rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
  511. if (rc)
  512. return rc;
  513. }
  514. return -EFAULT;
  515. }
  516. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  517. {
  518. unsigned long rc;
  519. down_read(&gmap->mm->mmap_sem);
  520. rc = __gmap_fault(address, gmap);
  521. up_read(&gmap->mm->mmap_sem);
  522. return rc;
  523. }
  524. EXPORT_SYMBOL_GPL(gmap_fault);
  525. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  526. {
  527. unsigned long *table, address, size;
  528. struct vm_area_struct *vma;
  529. struct gmap_pgtable *mp;
  530. struct page *page;
  531. down_read(&gmap->mm->mmap_sem);
  532. address = from;
  533. while (address < to) {
  534. /* Walk the gmap address space page table */
  535. table = gmap->table + ((address >> 53) & 0x7ff);
  536. if (unlikely(*table & _REGION_ENTRY_INV)) {
  537. address = (address + PMD_SIZE) & PMD_MASK;
  538. continue;
  539. }
  540. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  541. table = table + ((address >> 42) & 0x7ff);
  542. if (unlikely(*table & _REGION_ENTRY_INV)) {
  543. address = (address + PMD_SIZE) & PMD_MASK;
  544. continue;
  545. }
  546. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  547. table = table + ((address >> 31) & 0x7ff);
  548. if (unlikely(*table & _REGION_ENTRY_INV)) {
  549. address = (address + PMD_SIZE) & PMD_MASK;
  550. continue;
  551. }
  552. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  553. table = table + ((address >> 20) & 0x7ff);
  554. if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
  555. address = (address + PMD_SIZE) & PMD_MASK;
  556. continue;
  557. }
  558. page = pfn_to_page(*table >> PAGE_SHIFT);
  559. mp = (struct gmap_pgtable *) page->index;
  560. vma = find_vma(gmap->mm, mp->vmaddr);
  561. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  562. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  563. size, NULL);
  564. address = (address + PMD_SIZE) & PMD_MASK;
  565. }
  566. up_read(&gmap->mm->mmap_sem);
  567. }
  568. EXPORT_SYMBOL_GPL(gmap_discard);
  569. static LIST_HEAD(gmap_notifier_list);
  570. static DEFINE_SPINLOCK(gmap_notifier_lock);
  571. /**
  572. * gmap_register_ipte_notifier - register a pte invalidation callback
  573. * @nb: pointer to the gmap notifier block
  574. */
  575. void gmap_register_ipte_notifier(struct gmap_notifier *nb)
  576. {
  577. spin_lock(&gmap_notifier_lock);
  578. list_add(&nb->list, &gmap_notifier_list);
  579. spin_unlock(&gmap_notifier_lock);
  580. }
  581. EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
  582. /**
  583. * gmap_unregister_ipte_notifier - remove a pte invalidation callback
  584. * @nb: pointer to the gmap notifier block
  585. */
  586. void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
  587. {
  588. spin_lock(&gmap_notifier_lock);
  589. list_del_init(&nb->list);
  590. spin_unlock(&gmap_notifier_lock);
  591. }
  592. EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
  593. /**
  594. * gmap_ipte_notify - mark a range of ptes for invalidation notification
  595. * @gmap: pointer to guest mapping meta data structure
  596. * @address: virtual address in the guest address space
  597. * @len: size of area
  598. *
  599. * Returns 0 if for each page in the given range a gmap mapping exists and
  600. * the invalidation notification could be set. If the gmap mapping is missing
  601. * for one or more pages -EFAULT is returned. If no memory could be allocated
  602. * -ENOMEM is returned. This function establishes missing page table entries.
  603. */
  604. int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
  605. {
  606. unsigned long addr;
  607. spinlock_t *ptl;
  608. pte_t *ptep, entry;
  609. pgste_t pgste;
  610. int rc = 0;
  611. if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
  612. return -EINVAL;
  613. down_read(&gmap->mm->mmap_sem);
  614. while (len) {
  615. /* Convert gmap address and connect the page tables */
  616. addr = __gmap_fault(start, gmap);
  617. if (IS_ERR_VALUE(addr)) {
  618. rc = addr;
  619. break;
  620. }
  621. /* Get the page mapped */
  622. if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
  623. rc = -EFAULT;
  624. break;
  625. }
  626. /* Walk the process page table, lock and get pte pointer */
  627. ptep = get_locked_pte(gmap->mm, addr, &ptl);
  628. if (unlikely(!ptep))
  629. continue;
  630. /* Set notification bit in the pgste of the pte */
  631. entry = *ptep;
  632. if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
  633. pgste = pgste_get_lock(ptep);
  634. pgste_val(pgste) |= RCP_IN_BIT;
  635. pgste_set_unlock(ptep, pgste);
  636. start += PAGE_SIZE;
  637. len -= PAGE_SIZE;
  638. }
  639. spin_unlock(ptl);
  640. }
  641. up_read(&gmap->mm->mmap_sem);
  642. return rc;
  643. }
  644. EXPORT_SYMBOL_GPL(gmap_ipte_notify);
  645. /**
  646. * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
  647. * @mm: pointer to the process mm_struct
  648. * @addr: virtual address in the process address space
  649. * @pte: pointer to the page table entry
  650. *
  651. * This function is assumed to be called with the page table lock held
  652. * for the pte to notify.
  653. */
  654. void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
  655. {
  656. unsigned long segment_offset;
  657. struct gmap_notifier *nb;
  658. struct gmap_pgtable *mp;
  659. struct gmap_rmap *rmap;
  660. struct page *page;
  661. segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  662. segment_offset = segment_offset * (4096 / sizeof(pte_t));
  663. page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
  664. mp = (struct gmap_pgtable *) page->index;
  665. spin_lock(&gmap_notifier_lock);
  666. list_for_each_entry(rmap, &mp->mapper, list) {
  667. list_for_each_entry(nb, &gmap_notifier_list, list)
  668. nb->notifier_call(rmap->gmap,
  669. rmap->vmaddr + segment_offset);
  670. }
  671. spin_unlock(&gmap_notifier_lock);
  672. }
  673. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  674. unsigned long vmaddr)
  675. {
  676. struct page *page;
  677. unsigned long *table;
  678. struct gmap_pgtable *mp;
  679. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  680. if (!page)
  681. return NULL;
  682. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  683. if (!mp) {
  684. __free_page(page);
  685. return NULL;
  686. }
  687. pgtable_page_ctor(page);
  688. mp->vmaddr = vmaddr & PMD_MASK;
  689. INIT_LIST_HEAD(&mp->mapper);
  690. page->index = (unsigned long) mp;
  691. atomic_set(&page->_mapcount, 3);
  692. table = (unsigned long *) page_to_phys(page);
  693. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  694. clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
  695. return table;
  696. }
  697. static inline void page_table_free_pgste(unsigned long *table)
  698. {
  699. struct page *page;
  700. struct gmap_pgtable *mp;
  701. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  702. mp = (struct gmap_pgtable *) page->index;
  703. BUG_ON(!list_empty(&mp->mapper));
  704. pgtable_page_dtor(page);
  705. atomic_set(&page->_mapcount, -1);
  706. kfree(mp);
  707. __free_page(page);
  708. }
  709. #else /* CONFIG_PGSTE */
  710. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  711. unsigned long vmaddr)
  712. {
  713. return NULL;
  714. }
  715. static inline void page_table_free_pgste(unsigned long *table)
  716. {
  717. }
  718. static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
  719. unsigned long *table)
  720. {
  721. }
  722. #endif /* CONFIG_PGSTE */
  723. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  724. {
  725. unsigned int old, new;
  726. do {
  727. old = atomic_read(v);
  728. new = old ^ bits;
  729. } while (atomic_cmpxchg(v, old, new) != old);
  730. return new;
  731. }
  732. /*
  733. * page table entry allocation/free routines.
  734. */
  735. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  736. {
  737. unsigned long *uninitialized_var(table);
  738. struct page *uninitialized_var(page);
  739. unsigned int mask, bit;
  740. if (mm_has_pgste(mm))
  741. return page_table_alloc_pgste(mm, vmaddr);
  742. /* Allocate fragments of a 4K page as 1K/2K page table */
  743. spin_lock_bh(&mm->context.list_lock);
  744. mask = FRAG_MASK;
  745. if (!list_empty(&mm->context.pgtable_list)) {
  746. page = list_first_entry(&mm->context.pgtable_list,
  747. struct page, lru);
  748. table = (unsigned long *) page_to_phys(page);
  749. mask = atomic_read(&page->_mapcount);
  750. mask = mask | (mask >> 4);
  751. }
  752. if ((mask & FRAG_MASK) == FRAG_MASK) {
  753. spin_unlock_bh(&mm->context.list_lock);
  754. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  755. if (!page)
  756. return NULL;
  757. pgtable_page_ctor(page);
  758. atomic_set(&page->_mapcount, 1);
  759. table = (unsigned long *) page_to_phys(page);
  760. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  761. spin_lock_bh(&mm->context.list_lock);
  762. list_add(&page->lru, &mm->context.pgtable_list);
  763. } else {
  764. for (bit = 1; mask & bit; bit <<= 1)
  765. table += PTRS_PER_PTE;
  766. mask = atomic_xor_bits(&page->_mapcount, bit);
  767. if ((mask & FRAG_MASK) == FRAG_MASK)
  768. list_del(&page->lru);
  769. }
  770. spin_unlock_bh(&mm->context.list_lock);
  771. return table;
  772. }
  773. void page_table_free(struct mm_struct *mm, unsigned long *table)
  774. {
  775. struct page *page;
  776. unsigned int bit, mask;
  777. if (mm_has_pgste(mm)) {
  778. gmap_disconnect_pgtable(mm, table);
  779. return page_table_free_pgste(table);
  780. }
  781. /* Free 1K/2K page table fragment of a 4K page */
  782. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  783. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  784. spin_lock_bh(&mm->context.list_lock);
  785. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  786. list_del(&page->lru);
  787. mask = atomic_xor_bits(&page->_mapcount, bit);
  788. if (mask & FRAG_MASK)
  789. list_add(&page->lru, &mm->context.pgtable_list);
  790. spin_unlock_bh(&mm->context.list_lock);
  791. if (mask == 0) {
  792. pgtable_page_dtor(page);
  793. atomic_set(&page->_mapcount, -1);
  794. __free_page(page);
  795. }
  796. }
  797. static void __page_table_free_rcu(void *table, unsigned bit)
  798. {
  799. struct page *page;
  800. if (bit == FRAG_MASK)
  801. return page_table_free_pgste(table);
  802. /* Free 1K/2K page table fragment of a 4K page */
  803. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  804. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  805. pgtable_page_dtor(page);
  806. atomic_set(&page->_mapcount, -1);
  807. __free_page(page);
  808. }
  809. }
  810. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  811. {
  812. struct mm_struct *mm;
  813. struct page *page;
  814. unsigned int bit, mask;
  815. mm = tlb->mm;
  816. if (mm_has_pgste(mm)) {
  817. gmap_disconnect_pgtable(mm, table);
  818. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  819. tlb_remove_table(tlb, table);
  820. return;
  821. }
  822. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  823. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  824. spin_lock_bh(&mm->context.list_lock);
  825. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  826. list_del(&page->lru);
  827. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  828. if (mask & FRAG_MASK)
  829. list_add_tail(&page->lru, &mm->context.pgtable_list);
  830. spin_unlock_bh(&mm->context.list_lock);
  831. table = (unsigned long *) (__pa(table) | (bit << 4));
  832. tlb_remove_table(tlb, table);
  833. }
  834. void __tlb_remove_table(void *_table)
  835. {
  836. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  837. void *table = (void *)((unsigned long) _table & ~mask);
  838. unsigned type = (unsigned long) _table & mask;
  839. if (type)
  840. __page_table_free_rcu(table, type);
  841. else
  842. free_pages((unsigned long) table, ALLOC_ORDER);
  843. }
  844. static void tlb_remove_table_smp_sync(void *arg)
  845. {
  846. /* Simply deliver the interrupt */
  847. }
  848. static void tlb_remove_table_one(void *table)
  849. {
  850. /*
  851. * This isn't an RCU grace period and hence the page-tables cannot be
  852. * assumed to be actually RCU-freed.
  853. *
  854. * It is however sufficient for software page-table walkers that rely
  855. * on IRQ disabling. See the comment near struct mmu_table_batch.
  856. */
  857. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  858. __tlb_remove_table(table);
  859. }
  860. static void tlb_remove_table_rcu(struct rcu_head *head)
  861. {
  862. struct mmu_table_batch *batch;
  863. int i;
  864. batch = container_of(head, struct mmu_table_batch, rcu);
  865. for (i = 0; i < batch->nr; i++)
  866. __tlb_remove_table(batch->tables[i]);
  867. free_page((unsigned long)batch);
  868. }
  869. void tlb_table_flush(struct mmu_gather *tlb)
  870. {
  871. struct mmu_table_batch **batch = &tlb->batch;
  872. if (*batch) {
  873. __tlb_flush_mm(tlb->mm);
  874. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  875. *batch = NULL;
  876. }
  877. }
  878. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  879. {
  880. struct mmu_table_batch **batch = &tlb->batch;
  881. if (*batch == NULL) {
  882. *batch = (struct mmu_table_batch *)
  883. __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  884. if (*batch == NULL) {
  885. __tlb_flush_mm(tlb->mm);
  886. tlb_remove_table_one(table);
  887. return;
  888. }
  889. (*batch)->nr = 0;
  890. }
  891. (*batch)->tables[(*batch)->nr++] = table;
  892. if ((*batch)->nr == MAX_TABLE_BATCH)
  893. tlb_table_flush(tlb);
  894. }
  895. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  896. void thp_split_vma(struct vm_area_struct *vma)
  897. {
  898. unsigned long addr;
  899. struct page *page;
  900. for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
  901. page = follow_page(vma, addr, FOLL_SPLIT);
  902. }
  903. }
  904. void thp_split_mm(struct mm_struct *mm)
  905. {
  906. struct vm_area_struct *vma = mm->mmap;
  907. while (vma != NULL) {
  908. thp_split_vma(vma);
  909. vma->vm_flags &= ~VM_HUGEPAGE;
  910. vma->vm_flags |= VM_NOHUGEPAGE;
  911. vma = vma->vm_next;
  912. }
  913. }
  914. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  915. /*
  916. * switch on pgstes for its userspace process (for kvm)
  917. */
  918. int s390_enable_sie(void)
  919. {
  920. struct task_struct *tsk = current;
  921. struct mm_struct *mm, *old_mm;
  922. /* Do we have switched amode? If no, we cannot do sie */
  923. if (s390_user_mode == HOME_SPACE_MODE)
  924. return -EINVAL;
  925. /* Do we have pgstes? if yes, we are done */
  926. if (mm_has_pgste(tsk->mm))
  927. return 0;
  928. /* lets check if we are allowed to replace the mm */
  929. task_lock(tsk);
  930. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  931. #ifdef CONFIG_AIO
  932. !hlist_empty(&tsk->mm->ioctx_list) ||
  933. #endif
  934. tsk->mm != tsk->active_mm) {
  935. task_unlock(tsk);
  936. return -EINVAL;
  937. }
  938. task_unlock(tsk);
  939. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  940. tsk->mm->context.alloc_pgste = 1;
  941. /* make sure that both mms have a correct rss state */
  942. sync_mm_rss(tsk->mm);
  943. mm = dup_mm(tsk);
  944. tsk->mm->context.alloc_pgste = 0;
  945. if (!mm)
  946. return -ENOMEM;
  947. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  948. /* split thp mappings and disable thp for future mappings */
  949. thp_split_mm(mm);
  950. mm->def_flags |= VM_NOHUGEPAGE;
  951. #endif
  952. /* Now lets check again if something happened */
  953. task_lock(tsk);
  954. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  955. #ifdef CONFIG_AIO
  956. !hlist_empty(&tsk->mm->ioctx_list) ||
  957. #endif
  958. tsk->mm != tsk->active_mm) {
  959. mmput(mm);
  960. task_unlock(tsk);
  961. return -EINVAL;
  962. }
  963. /* ok, we are alone. No ptrace, no threads, etc. */
  964. old_mm = tsk->mm;
  965. tsk->mm = tsk->active_mm = mm;
  966. preempt_disable();
  967. update_mm(mm, tsk);
  968. atomic_inc(&mm->context.attach_count);
  969. atomic_dec(&old_mm->context.attach_count);
  970. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  971. preempt_enable();
  972. task_unlock(tsk);
  973. mmput(old_mm);
  974. return 0;
  975. }
  976. EXPORT_SYMBOL_GPL(s390_enable_sie);
  977. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  978. int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
  979. pmd_t *pmdp)
  980. {
  981. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  982. /* No need to flush TLB
  983. * On s390 reference bits are in storage key and never in TLB */
  984. return pmdp_test_and_clear_young(vma, address, pmdp);
  985. }
  986. int pmdp_set_access_flags(struct vm_area_struct *vma,
  987. unsigned long address, pmd_t *pmdp,
  988. pmd_t entry, int dirty)
  989. {
  990. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  991. if (pmd_same(*pmdp, entry))
  992. return 0;
  993. pmdp_invalidate(vma, address, pmdp);
  994. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  995. return 1;
  996. }
  997. static void pmdp_splitting_flush_sync(void *arg)
  998. {
  999. /* Simply deliver the interrupt */
  1000. }
  1001. void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
  1002. pmd_t *pmdp)
  1003. {
  1004. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1005. if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
  1006. (unsigned long *) pmdp)) {
  1007. /* need to serialize against gup-fast (IRQ disabled) */
  1008. smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
  1009. }
  1010. }
  1011. void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
  1012. {
  1013. struct list_head *lh = (struct list_head *) pgtable;
  1014. assert_spin_locked(&mm->page_table_lock);
  1015. /* FIFO */
  1016. if (!mm->pmd_huge_pte)
  1017. INIT_LIST_HEAD(lh);
  1018. else
  1019. list_add(lh, (struct list_head *) mm->pmd_huge_pte);
  1020. mm->pmd_huge_pte = pgtable;
  1021. }
  1022. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
  1023. {
  1024. struct list_head *lh;
  1025. pgtable_t pgtable;
  1026. pte_t *ptep;
  1027. assert_spin_locked(&mm->page_table_lock);
  1028. /* FIFO */
  1029. pgtable = mm->pmd_huge_pte;
  1030. lh = (struct list_head *) pgtable;
  1031. if (list_empty(lh))
  1032. mm->pmd_huge_pte = NULL;
  1033. else {
  1034. mm->pmd_huge_pte = (pgtable_t) lh->next;
  1035. list_del(lh);
  1036. }
  1037. ptep = (pte_t *) pgtable;
  1038. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  1039. ptep++;
  1040. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  1041. return pgtable;
  1042. }
  1043. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */