pgtable.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /*
  2. * Copyright IBM Corp. 2007, 2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. #ifndef CONFIG_64BIT
  25. #define ALLOC_ORDER 1
  26. #define FRAG_MASK 0x0f
  27. #else
  28. #define ALLOC_ORDER 2
  29. #define FRAG_MASK 0x03
  30. #endif
  31. unsigned long *crst_table_alloc(struct mm_struct *mm)
  32. {
  33. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  34. if (!page)
  35. return NULL;
  36. return (unsigned long *) page_to_phys(page);
  37. }
  38. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  39. {
  40. free_pages((unsigned long) table, ALLOC_ORDER);
  41. }
  42. #ifdef CONFIG_64BIT
  43. static void __crst_table_upgrade(void *arg)
  44. {
  45. struct mm_struct *mm = arg;
  46. if (current->active_mm == mm)
  47. update_mm(mm, current);
  48. __tlb_flush_local();
  49. }
  50. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  51. {
  52. unsigned long *table, *pgd;
  53. unsigned long entry;
  54. int flush;
  55. BUG_ON(limit > (1UL << 53));
  56. flush = 0;
  57. repeat:
  58. table = crst_table_alloc(mm);
  59. if (!table)
  60. return -ENOMEM;
  61. spin_lock_bh(&mm->page_table_lock);
  62. if (mm->context.asce_limit < limit) {
  63. pgd = (unsigned long *) mm->pgd;
  64. if (mm->context.asce_limit <= (1UL << 31)) {
  65. entry = _REGION3_ENTRY_EMPTY;
  66. mm->context.asce_limit = 1UL << 42;
  67. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  68. _ASCE_USER_BITS |
  69. _ASCE_TYPE_REGION3;
  70. } else {
  71. entry = _REGION2_ENTRY_EMPTY;
  72. mm->context.asce_limit = 1UL << 53;
  73. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  74. _ASCE_USER_BITS |
  75. _ASCE_TYPE_REGION2;
  76. }
  77. crst_table_init(table, entry);
  78. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  79. mm->pgd = (pgd_t *) table;
  80. mm->task_size = mm->context.asce_limit;
  81. table = NULL;
  82. flush = 1;
  83. }
  84. spin_unlock_bh(&mm->page_table_lock);
  85. if (table)
  86. crst_table_free(mm, table);
  87. if (mm->context.asce_limit < limit)
  88. goto repeat;
  89. if (flush)
  90. on_each_cpu(__crst_table_upgrade, mm, 0);
  91. return 0;
  92. }
  93. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  94. {
  95. pgd_t *pgd;
  96. if (current->active_mm == mm)
  97. __tlb_flush_mm(mm);
  98. while (mm->context.asce_limit > limit) {
  99. pgd = mm->pgd;
  100. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  101. case _REGION_ENTRY_TYPE_R2:
  102. mm->context.asce_limit = 1UL << 42;
  103. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  104. _ASCE_USER_BITS |
  105. _ASCE_TYPE_REGION3;
  106. break;
  107. case _REGION_ENTRY_TYPE_R3:
  108. mm->context.asce_limit = 1UL << 31;
  109. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  110. _ASCE_USER_BITS |
  111. _ASCE_TYPE_SEGMENT;
  112. break;
  113. default:
  114. BUG();
  115. }
  116. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  117. mm->task_size = mm->context.asce_limit;
  118. crst_table_free(mm, (unsigned long *) pgd);
  119. }
  120. if (current->active_mm == mm)
  121. update_mm(mm, current);
  122. }
  123. #endif
  124. #ifdef CONFIG_PGSTE
  125. /**
  126. * gmap_alloc - allocate a guest address space
  127. * @mm: pointer to the parent mm_struct
  128. *
  129. * Returns a guest address space structure.
  130. */
  131. struct gmap *gmap_alloc(struct mm_struct *mm)
  132. {
  133. struct gmap *gmap;
  134. struct page *page;
  135. unsigned long *table;
  136. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  137. if (!gmap)
  138. goto out;
  139. INIT_LIST_HEAD(&gmap->crst_list);
  140. gmap->mm = mm;
  141. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  142. if (!page)
  143. goto out_free;
  144. list_add(&page->lru, &gmap->crst_list);
  145. table = (unsigned long *) page_to_phys(page);
  146. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  147. gmap->table = table;
  148. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  149. _ASCE_USER_BITS | __pa(table);
  150. list_add(&gmap->list, &mm->context.gmap_list);
  151. return gmap;
  152. out_free:
  153. kfree(gmap);
  154. out:
  155. return NULL;
  156. }
  157. EXPORT_SYMBOL_GPL(gmap_alloc);
  158. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  159. {
  160. struct gmap_pgtable *mp;
  161. struct gmap_rmap *rmap;
  162. struct page *page;
  163. if (*table & _SEGMENT_ENTRY_INVALID)
  164. return 0;
  165. page = pfn_to_page(*table >> PAGE_SHIFT);
  166. mp = (struct gmap_pgtable *) page->index;
  167. list_for_each_entry(rmap, &mp->mapper, list) {
  168. if (rmap->entry != table)
  169. continue;
  170. list_del(&rmap->list);
  171. kfree(rmap);
  172. break;
  173. }
  174. *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
  175. return 1;
  176. }
  177. static void gmap_flush_tlb(struct gmap *gmap)
  178. {
  179. if (MACHINE_HAS_IDTE)
  180. __tlb_flush_idte((unsigned long) gmap->table |
  181. _ASCE_TYPE_REGION1);
  182. else
  183. __tlb_flush_global();
  184. }
  185. /**
  186. * gmap_free - free a guest address space
  187. * @gmap: pointer to the guest address space structure
  188. */
  189. void gmap_free(struct gmap *gmap)
  190. {
  191. struct page *page, *next;
  192. unsigned long *table;
  193. int i;
  194. /* Flush tlb. */
  195. if (MACHINE_HAS_IDTE)
  196. __tlb_flush_idte((unsigned long) gmap->table |
  197. _ASCE_TYPE_REGION1);
  198. else
  199. __tlb_flush_global();
  200. /* Free all segment & region tables. */
  201. down_read(&gmap->mm->mmap_sem);
  202. spin_lock(&gmap->mm->page_table_lock);
  203. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  204. table = (unsigned long *) page_to_phys(page);
  205. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  206. /* Remove gmap rmap structures for segment table. */
  207. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  208. gmap_unlink_segment(gmap, table);
  209. __free_pages(page, ALLOC_ORDER);
  210. }
  211. spin_unlock(&gmap->mm->page_table_lock);
  212. up_read(&gmap->mm->mmap_sem);
  213. list_del(&gmap->list);
  214. kfree(gmap);
  215. }
  216. EXPORT_SYMBOL_GPL(gmap_free);
  217. /**
  218. * gmap_enable - switch primary space to the guest address space
  219. * @gmap: pointer to the guest address space structure
  220. */
  221. void gmap_enable(struct gmap *gmap)
  222. {
  223. S390_lowcore.gmap = (unsigned long) gmap;
  224. }
  225. EXPORT_SYMBOL_GPL(gmap_enable);
  226. /**
  227. * gmap_disable - switch back to the standard primary address space
  228. * @gmap: pointer to the guest address space structure
  229. */
  230. void gmap_disable(struct gmap *gmap)
  231. {
  232. S390_lowcore.gmap = 0UL;
  233. }
  234. EXPORT_SYMBOL_GPL(gmap_disable);
  235. /*
  236. * gmap_alloc_table is assumed to be called with mmap_sem held
  237. */
  238. static int gmap_alloc_table(struct gmap *gmap,
  239. unsigned long *table, unsigned long init)
  240. __releases(&gmap->mm->page_table_lock)
  241. __acquires(&gmap->mm->page_table_lock)
  242. {
  243. struct page *page;
  244. unsigned long *new;
  245. /* since we dont free the gmap table until gmap_free we can unlock */
  246. spin_unlock(&gmap->mm->page_table_lock);
  247. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  248. spin_lock(&gmap->mm->page_table_lock);
  249. if (!page)
  250. return -ENOMEM;
  251. new = (unsigned long *) page_to_phys(page);
  252. crst_table_init(new, init);
  253. if (*table & _REGION_ENTRY_INVALID) {
  254. list_add(&page->lru, &gmap->crst_list);
  255. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  256. (*table & _REGION_ENTRY_TYPE_MASK);
  257. } else
  258. __free_pages(page, ALLOC_ORDER);
  259. return 0;
  260. }
  261. /**
  262. * gmap_unmap_segment - unmap segment from the guest address space
  263. * @gmap: pointer to the guest address space structure
  264. * @addr: address in the guest address space
  265. * @len: length of the memory area to unmap
  266. *
  267. * Returns 0 if the unmap succeded, -EINVAL if not.
  268. */
  269. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  270. {
  271. unsigned long *table;
  272. unsigned long off;
  273. int flush;
  274. if ((to | len) & (PMD_SIZE - 1))
  275. return -EINVAL;
  276. if (len == 0 || to + len < to)
  277. return -EINVAL;
  278. flush = 0;
  279. down_read(&gmap->mm->mmap_sem);
  280. spin_lock(&gmap->mm->page_table_lock);
  281. for (off = 0; off < len; off += PMD_SIZE) {
  282. /* Walk the guest addr space page table */
  283. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  284. if (*table & _REGION_ENTRY_INVALID)
  285. goto out;
  286. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  287. table = table + (((to + off) >> 42) & 0x7ff);
  288. if (*table & _REGION_ENTRY_INVALID)
  289. goto out;
  290. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  291. table = table + (((to + off) >> 31) & 0x7ff);
  292. if (*table & _REGION_ENTRY_INVALID)
  293. goto out;
  294. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  295. table = table + (((to + off) >> 20) & 0x7ff);
  296. /* Clear segment table entry in guest address space. */
  297. flush |= gmap_unlink_segment(gmap, table);
  298. *table = _SEGMENT_ENTRY_INVALID;
  299. }
  300. out:
  301. spin_unlock(&gmap->mm->page_table_lock);
  302. up_read(&gmap->mm->mmap_sem);
  303. if (flush)
  304. gmap_flush_tlb(gmap);
  305. return 0;
  306. }
  307. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  308. /**
  309. * gmap_mmap_segment - map a segment to the guest address space
  310. * @gmap: pointer to the guest address space structure
  311. * @from: source address in the parent address space
  312. * @to: target address in the guest address space
  313. *
  314. * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
  315. */
  316. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  317. unsigned long to, unsigned long len)
  318. {
  319. unsigned long *table;
  320. unsigned long off;
  321. int flush;
  322. if ((from | to | len) & (PMD_SIZE - 1))
  323. return -EINVAL;
  324. if (len == 0 || from + len > TASK_MAX_SIZE ||
  325. from + len < from || to + len < to)
  326. return -EINVAL;
  327. flush = 0;
  328. down_read(&gmap->mm->mmap_sem);
  329. spin_lock(&gmap->mm->page_table_lock);
  330. for (off = 0; off < len; off += PMD_SIZE) {
  331. /* Walk the gmap address space page table */
  332. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  333. if ((*table & _REGION_ENTRY_INVALID) &&
  334. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  335. goto out_unmap;
  336. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  337. table = table + (((to + off) >> 42) & 0x7ff);
  338. if ((*table & _REGION_ENTRY_INVALID) &&
  339. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  340. goto out_unmap;
  341. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  342. table = table + (((to + off) >> 31) & 0x7ff);
  343. if ((*table & _REGION_ENTRY_INVALID) &&
  344. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  345. goto out_unmap;
  346. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  347. table = table + (((to + off) >> 20) & 0x7ff);
  348. /* Store 'from' address in an invalid segment table entry. */
  349. flush |= gmap_unlink_segment(gmap, table);
  350. *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
  351. _SEGMENT_ENTRY_PROTECT);
  352. }
  353. spin_unlock(&gmap->mm->page_table_lock);
  354. up_read(&gmap->mm->mmap_sem);
  355. if (flush)
  356. gmap_flush_tlb(gmap);
  357. return 0;
  358. out_unmap:
  359. spin_unlock(&gmap->mm->page_table_lock);
  360. up_read(&gmap->mm->mmap_sem);
  361. gmap_unmap_segment(gmap, to, len);
  362. return -ENOMEM;
  363. }
  364. EXPORT_SYMBOL_GPL(gmap_map_segment);
  365. static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
  366. {
  367. unsigned long *table;
  368. table = gmap->table + ((address >> 53) & 0x7ff);
  369. if (unlikely(*table & _REGION_ENTRY_INVALID))
  370. return ERR_PTR(-EFAULT);
  371. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  372. table = table + ((address >> 42) & 0x7ff);
  373. if (unlikely(*table & _REGION_ENTRY_INVALID))
  374. return ERR_PTR(-EFAULT);
  375. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  376. table = table + ((address >> 31) & 0x7ff);
  377. if (unlikely(*table & _REGION_ENTRY_INVALID))
  378. return ERR_PTR(-EFAULT);
  379. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  380. table = table + ((address >> 20) & 0x7ff);
  381. return table;
  382. }
  383. /**
  384. * __gmap_translate - translate a guest address to a user space address
  385. * @address: guest address
  386. * @gmap: pointer to guest mapping meta data structure
  387. *
  388. * Returns user space address which corresponds to the guest address or
  389. * -EFAULT if no such mapping exists.
  390. * This function does not establish potentially missing page table entries.
  391. * The mmap_sem of the mm that belongs to the address space must be held
  392. * when this function gets called.
  393. */
  394. unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
  395. {
  396. unsigned long *segment_ptr, vmaddr, segment;
  397. struct gmap_pgtable *mp;
  398. struct page *page;
  399. current->thread.gmap_addr = address;
  400. segment_ptr = gmap_table_walk(address, gmap);
  401. if (IS_ERR(segment_ptr))
  402. return PTR_ERR(segment_ptr);
  403. /* Convert the gmap address to an mm address. */
  404. segment = *segment_ptr;
  405. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  406. page = pfn_to_page(segment >> PAGE_SHIFT);
  407. mp = (struct gmap_pgtable *) page->index;
  408. return mp->vmaddr | (address & ~PMD_MASK);
  409. } else if (segment & _SEGMENT_ENTRY_PROTECT) {
  410. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  411. return vmaddr | (address & ~PMD_MASK);
  412. }
  413. return -EFAULT;
  414. }
  415. EXPORT_SYMBOL_GPL(__gmap_translate);
  416. /**
  417. * gmap_translate - translate a guest address to a user space address
  418. * @address: guest address
  419. * @gmap: pointer to guest mapping meta data structure
  420. *
  421. * Returns user space address which corresponds to the guest address or
  422. * -EFAULT if no such mapping exists.
  423. * This function does not establish potentially missing page table entries.
  424. */
  425. unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
  426. {
  427. unsigned long rc;
  428. down_read(&gmap->mm->mmap_sem);
  429. rc = __gmap_translate(address, gmap);
  430. up_read(&gmap->mm->mmap_sem);
  431. return rc;
  432. }
  433. EXPORT_SYMBOL_GPL(gmap_translate);
  434. static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
  435. unsigned long *segment_ptr, struct gmap *gmap)
  436. {
  437. unsigned long vmaddr;
  438. struct vm_area_struct *vma;
  439. struct gmap_pgtable *mp;
  440. struct gmap_rmap *rmap;
  441. struct mm_struct *mm;
  442. struct page *page;
  443. pgd_t *pgd;
  444. pud_t *pud;
  445. pmd_t *pmd;
  446. mm = gmap->mm;
  447. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  448. vma = find_vma(mm, vmaddr);
  449. if (!vma || vma->vm_start > vmaddr)
  450. return -EFAULT;
  451. /* Walk the parent mm page table */
  452. pgd = pgd_offset(mm, vmaddr);
  453. pud = pud_alloc(mm, pgd, vmaddr);
  454. if (!pud)
  455. return -ENOMEM;
  456. pmd = pmd_alloc(mm, pud, vmaddr);
  457. if (!pmd)
  458. return -ENOMEM;
  459. if (!pmd_present(*pmd) &&
  460. __pte_alloc(mm, vma, pmd, vmaddr))
  461. return -ENOMEM;
  462. /* pmd now points to a valid segment table entry. */
  463. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  464. if (!rmap)
  465. return -ENOMEM;
  466. /* Link gmap segment table entry location to page table. */
  467. page = pmd_page(*pmd);
  468. mp = (struct gmap_pgtable *) page->index;
  469. rmap->gmap = gmap;
  470. rmap->entry = segment_ptr;
  471. rmap->vmaddr = address & PMD_MASK;
  472. spin_lock(&mm->page_table_lock);
  473. if (*segment_ptr == segment) {
  474. list_add(&rmap->list, &mp->mapper);
  475. /* Set gmap segment table entry to page table. */
  476. *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
  477. rmap = NULL;
  478. }
  479. spin_unlock(&mm->page_table_lock);
  480. kfree(rmap);
  481. return 0;
  482. }
  483. static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
  484. {
  485. struct gmap_rmap *rmap, *next;
  486. struct gmap_pgtable *mp;
  487. struct page *page;
  488. int flush;
  489. flush = 0;
  490. spin_lock(&mm->page_table_lock);
  491. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  492. mp = (struct gmap_pgtable *) page->index;
  493. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  494. *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
  495. _SEGMENT_ENTRY_PROTECT);
  496. list_del(&rmap->list);
  497. kfree(rmap);
  498. flush = 1;
  499. }
  500. spin_unlock(&mm->page_table_lock);
  501. if (flush)
  502. __tlb_flush_global();
  503. }
  504. /*
  505. * this function is assumed to be called with mmap_sem held
  506. */
  507. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  508. {
  509. unsigned long *segment_ptr, segment;
  510. struct gmap_pgtable *mp;
  511. struct page *page;
  512. int rc;
  513. current->thread.gmap_addr = address;
  514. segment_ptr = gmap_table_walk(address, gmap);
  515. if (IS_ERR(segment_ptr))
  516. return -EFAULT;
  517. /* Convert the gmap address to an mm address. */
  518. while (1) {
  519. segment = *segment_ptr;
  520. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  521. /* Page table is present */
  522. page = pfn_to_page(segment >> PAGE_SHIFT);
  523. mp = (struct gmap_pgtable *) page->index;
  524. return mp->vmaddr | (address & ~PMD_MASK);
  525. }
  526. if (!(segment & _SEGMENT_ENTRY_PROTECT))
  527. /* Nothing mapped in the gmap address space. */
  528. break;
  529. rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
  530. if (rc)
  531. return rc;
  532. }
  533. return -EFAULT;
  534. }
  535. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  536. {
  537. unsigned long rc;
  538. down_read(&gmap->mm->mmap_sem);
  539. rc = __gmap_fault(address, gmap);
  540. up_read(&gmap->mm->mmap_sem);
  541. return rc;
  542. }
  543. EXPORT_SYMBOL_GPL(gmap_fault);
  544. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  545. {
  546. unsigned long *table, address, size;
  547. struct vm_area_struct *vma;
  548. struct gmap_pgtable *mp;
  549. struct page *page;
  550. down_read(&gmap->mm->mmap_sem);
  551. address = from;
  552. while (address < to) {
  553. /* Walk the gmap address space page table */
  554. table = gmap->table + ((address >> 53) & 0x7ff);
  555. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  556. address = (address + PMD_SIZE) & PMD_MASK;
  557. continue;
  558. }
  559. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  560. table = table + ((address >> 42) & 0x7ff);
  561. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  562. address = (address + PMD_SIZE) & PMD_MASK;
  563. continue;
  564. }
  565. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  566. table = table + ((address >> 31) & 0x7ff);
  567. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  568. address = (address + PMD_SIZE) & PMD_MASK;
  569. continue;
  570. }
  571. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  572. table = table + ((address >> 20) & 0x7ff);
  573. if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
  574. address = (address + PMD_SIZE) & PMD_MASK;
  575. continue;
  576. }
  577. page = pfn_to_page(*table >> PAGE_SHIFT);
  578. mp = (struct gmap_pgtable *) page->index;
  579. vma = find_vma(gmap->mm, mp->vmaddr);
  580. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  581. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  582. size, NULL);
  583. address = (address + PMD_SIZE) & PMD_MASK;
  584. }
  585. up_read(&gmap->mm->mmap_sem);
  586. }
  587. EXPORT_SYMBOL_GPL(gmap_discard);
  588. static LIST_HEAD(gmap_notifier_list);
  589. static DEFINE_SPINLOCK(gmap_notifier_lock);
  590. /**
  591. * gmap_register_ipte_notifier - register a pte invalidation callback
  592. * @nb: pointer to the gmap notifier block
  593. */
  594. void gmap_register_ipte_notifier(struct gmap_notifier *nb)
  595. {
  596. spin_lock(&gmap_notifier_lock);
  597. list_add(&nb->list, &gmap_notifier_list);
  598. spin_unlock(&gmap_notifier_lock);
  599. }
  600. EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
  601. /**
  602. * gmap_unregister_ipte_notifier - remove a pte invalidation callback
  603. * @nb: pointer to the gmap notifier block
  604. */
  605. void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
  606. {
  607. spin_lock(&gmap_notifier_lock);
  608. list_del_init(&nb->list);
  609. spin_unlock(&gmap_notifier_lock);
  610. }
  611. EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
  612. /**
  613. * gmap_ipte_notify - mark a range of ptes for invalidation notification
  614. * @gmap: pointer to guest mapping meta data structure
  615. * @address: virtual address in the guest address space
  616. * @len: size of area
  617. *
  618. * Returns 0 if for each page in the given range a gmap mapping exists and
  619. * the invalidation notification could be set. If the gmap mapping is missing
  620. * for one or more pages -EFAULT is returned. If no memory could be allocated
  621. * -ENOMEM is returned. This function establishes missing page table entries.
  622. */
  623. int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
  624. {
  625. unsigned long addr;
  626. spinlock_t *ptl;
  627. pte_t *ptep, entry;
  628. pgste_t pgste;
  629. int rc = 0;
  630. if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
  631. return -EINVAL;
  632. down_read(&gmap->mm->mmap_sem);
  633. while (len) {
  634. /* Convert gmap address and connect the page tables */
  635. addr = __gmap_fault(start, gmap);
  636. if (IS_ERR_VALUE(addr)) {
  637. rc = addr;
  638. break;
  639. }
  640. /* Get the page mapped */
  641. if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
  642. rc = -EFAULT;
  643. break;
  644. }
  645. /* Walk the process page table, lock and get pte pointer */
  646. ptep = get_locked_pte(gmap->mm, addr, &ptl);
  647. if (unlikely(!ptep))
  648. continue;
  649. /* Set notification bit in the pgste of the pte */
  650. entry = *ptep;
  651. if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
  652. pgste = pgste_get_lock(ptep);
  653. pgste_val(pgste) |= PGSTE_IN_BIT;
  654. pgste_set_unlock(ptep, pgste);
  655. start += PAGE_SIZE;
  656. len -= PAGE_SIZE;
  657. }
  658. spin_unlock(ptl);
  659. }
  660. up_read(&gmap->mm->mmap_sem);
  661. return rc;
  662. }
  663. EXPORT_SYMBOL_GPL(gmap_ipte_notify);
  664. /**
  665. * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
  666. * @mm: pointer to the process mm_struct
  667. * @addr: virtual address in the process address space
  668. * @pte: pointer to the page table entry
  669. *
  670. * This function is assumed to be called with the page table lock held
  671. * for the pte to notify.
  672. */
  673. void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
  674. {
  675. unsigned long segment_offset;
  676. struct gmap_notifier *nb;
  677. struct gmap_pgtable *mp;
  678. struct gmap_rmap *rmap;
  679. struct page *page;
  680. segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  681. segment_offset = segment_offset * (4096 / sizeof(pte_t));
  682. page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
  683. mp = (struct gmap_pgtable *) page->index;
  684. spin_lock(&gmap_notifier_lock);
  685. list_for_each_entry(rmap, &mp->mapper, list) {
  686. list_for_each_entry(nb, &gmap_notifier_list, list)
  687. nb->notifier_call(rmap->gmap,
  688. rmap->vmaddr + segment_offset);
  689. }
  690. spin_unlock(&gmap_notifier_lock);
  691. }
  692. static inline int page_table_with_pgste(struct page *page)
  693. {
  694. return atomic_read(&page->_mapcount) == 0;
  695. }
  696. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  697. unsigned long vmaddr)
  698. {
  699. struct page *page;
  700. unsigned long *table;
  701. struct gmap_pgtable *mp;
  702. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  703. if (!page)
  704. return NULL;
  705. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  706. if (!mp) {
  707. __free_page(page);
  708. return NULL;
  709. }
  710. pgtable_page_ctor(page);
  711. mp->vmaddr = vmaddr & PMD_MASK;
  712. INIT_LIST_HEAD(&mp->mapper);
  713. page->index = (unsigned long) mp;
  714. atomic_set(&page->_mapcount, 0);
  715. table = (unsigned long *) page_to_phys(page);
  716. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  717. clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
  718. PAGE_SIZE/2);
  719. return table;
  720. }
  721. static inline void page_table_free_pgste(unsigned long *table)
  722. {
  723. struct page *page;
  724. struct gmap_pgtable *mp;
  725. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  726. mp = (struct gmap_pgtable *) page->index;
  727. BUG_ON(!list_empty(&mp->mapper));
  728. pgtable_page_dtor(page);
  729. atomic_set(&page->_mapcount, -1);
  730. kfree(mp);
  731. __free_page(page);
  732. }
  733. int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  734. unsigned long key, bool nq)
  735. {
  736. spinlock_t *ptl;
  737. pgste_t old, new;
  738. pte_t *ptep;
  739. down_read(&mm->mmap_sem);
  740. ptep = get_locked_pte(current->mm, addr, &ptl);
  741. if (unlikely(!ptep)) {
  742. up_read(&mm->mmap_sem);
  743. return -EFAULT;
  744. }
  745. new = old = pgste_get_lock(ptep);
  746. pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
  747. PGSTE_ACC_BITS | PGSTE_FP_BIT);
  748. pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
  749. pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  750. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  751. unsigned long address, bits, skey;
  752. address = pte_val(*ptep) & PAGE_MASK;
  753. skey = (unsigned long) page_get_storage_key(address);
  754. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  755. skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
  756. /* Set storage key ACC and FP */
  757. page_set_storage_key(address, skey, !nq);
  758. /* Merge host changed & referenced into pgste */
  759. pgste_val(new) |= bits << 52;
  760. }
  761. /* changing the guest storage key is considered a change of the page */
  762. if ((pgste_val(new) ^ pgste_val(old)) &
  763. (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
  764. pgste_val(new) |= PGSTE_HC_BIT;
  765. pgste_set_unlock(ptep, new);
  766. pte_unmap_unlock(*ptep, ptl);
  767. up_read(&mm->mmap_sem);
  768. return 0;
  769. }
  770. EXPORT_SYMBOL(set_guest_storage_key);
  771. #else /* CONFIG_PGSTE */
  772. static inline int page_table_with_pgste(struct page *page)
  773. {
  774. return 0;
  775. }
  776. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  777. unsigned long vmaddr)
  778. {
  779. return NULL;
  780. }
  781. static inline void page_table_free_pgste(unsigned long *table)
  782. {
  783. }
  784. static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
  785. unsigned long *table)
  786. {
  787. }
  788. #endif /* CONFIG_PGSTE */
  789. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  790. {
  791. unsigned int old, new;
  792. do {
  793. old = atomic_read(v);
  794. new = old ^ bits;
  795. } while (atomic_cmpxchg(v, old, new) != old);
  796. return new;
  797. }
  798. /*
  799. * page table entry allocation/free routines.
  800. */
  801. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  802. {
  803. unsigned long *uninitialized_var(table);
  804. struct page *uninitialized_var(page);
  805. unsigned int mask, bit;
  806. if (mm_has_pgste(mm))
  807. return page_table_alloc_pgste(mm, vmaddr);
  808. /* Allocate fragments of a 4K page as 1K/2K page table */
  809. spin_lock_bh(&mm->context.list_lock);
  810. mask = FRAG_MASK;
  811. if (!list_empty(&mm->context.pgtable_list)) {
  812. page = list_first_entry(&mm->context.pgtable_list,
  813. struct page, lru);
  814. table = (unsigned long *) page_to_phys(page);
  815. mask = atomic_read(&page->_mapcount);
  816. mask = mask | (mask >> 4);
  817. }
  818. if ((mask & FRAG_MASK) == FRAG_MASK) {
  819. spin_unlock_bh(&mm->context.list_lock);
  820. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  821. if (!page)
  822. return NULL;
  823. pgtable_page_ctor(page);
  824. atomic_set(&page->_mapcount, 1);
  825. table = (unsigned long *) page_to_phys(page);
  826. clear_table(table, _PAGE_INVALID, PAGE_SIZE);
  827. spin_lock_bh(&mm->context.list_lock);
  828. list_add(&page->lru, &mm->context.pgtable_list);
  829. } else {
  830. for (bit = 1; mask & bit; bit <<= 1)
  831. table += PTRS_PER_PTE;
  832. mask = atomic_xor_bits(&page->_mapcount, bit);
  833. if ((mask & FRAG_MASK) == FRAG_MASK)
  834. list_del(&page->lru);
  835. }
  836. spin_unlock_bh(&mm->context.list_lock);
  837. return table;
  838. }
  839. void page_table_free(struct mm_struct *mm, unsigned long *table)
  840. {
  841. struct page *page;
  842. unsigned int bit, mask;
  843. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  844. if (page_table_with_pgste(page)) {
  845. gmap_disconnect_pgtable(mm, table);
  846. return page_table_free_pgste(table);
  847. }
  848. /* Free 1K/2K page table fragment of a 4K page */
  849. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  850. spin_lock_bh(&mm->context.list_lock);
  851. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  852. list_del(&page->lru);
  853. mask = atomic_xor_bits(&page->_mapcount, bit);
  854. if (mask & FRAG_MASK)
  855. list_add(&page->lru, &mm->context.pgtable_list);
  856. spin_unlock_bh(&mm->context.list_lock);
  857. if (mask == 0) {
  858. pgtable_page_dtor(page);
  859. atomic_set(&page->_mapcount, -1);
  860. __free_page(page);
  861. }
  862. }
  863. static void __page_table_free_rcu(void *table, unsigned bit)
  864. {
  865. struct page *page;
  866. if (bit == FRAG_MASK)
  867. return page_table_free_pgste(table);
  868. /* Free 1K/2K page table fragment of a 4K page */
  869. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  870. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  871. pgtable_page_dtor(page);
  872. atomic_set(&page->_mapcount, -1);
  873. __free_page(page);
  874. }
  875. }
  876. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  877. {
  878. struct mm_struct *mm;
  879. struct page *page;
  880. unsigned int bit, mask;
  881. mm = tlb->mm;
  882. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  883. if (page_table_with_pgste(page)) {
  884. gmap_disconnect_pgtable(mm, table);
  885. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  886. tlb_remove_table(tlb, table);
  887. return;
  888. }
  889. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  890. spin_lock_bh(&mm->context.list_lock);
  891. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  892. list_del(&page->lru);
  893. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  894. if (mask & FRAG_MASK)
  895. list_add_tail(&page->lru, &mm->context.pgtable_list);
  896. spin_unlock_bh(&mm->context.list_lock);
  897. table = (unsigned long *) (__pa(table) | (bit << 4));
  898. tlb_remove_table(tlb, table);
  899. }
  900. static void __tlb_remove_table(void *_table)
  901. {
  902. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  903. void *table = (void *)((unsigned long) _table & ~mask);
  904. unsigned type = (unsigned long) _table & mask;
  905. if (type)
  906. __page_table_free_rcu(table, type);
  907. else
  908. free_pages((unsigned long) table, ALLOC_ORDER);
  909. }
  910. static void tlb_remove_table_smp_sync(void *arg)
  911. {
  912. /* Simply deliver the interrupt */
  913. }
  914. static void tlb_remove_table_one(void *table)
  915. {
  916. /*
  917. * This isn't an RCU grace period and hence the page-tables cannot be
  918. * assumed to be actually RCU-freed.
  919. *
  920. * It is however sufficient for software page-table walkers that rely
  921. * on IRQ disabling. See the comment near struct mmu_table_batch.
  922. */
  923. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  924. __tlb_remove_table(table);
  925. }
  926. static void tlb_remove_table_rcu(struct rcu_head *head)
  927. {
  928. struct mmu_table_batch *batch;
  929. int i;
  930. batch = container_of(head, struct mmu_table_batch, rcu);
  931. for (i = 0; i < batch->nr; i++)
  932. __tlb_remove_table(batch->tables[i]);
  933. free_page((unsigned long)batch);
  934. }
  935. void tlb_table_flush(struct mmu_gather *tlb)
  936. {
  937. struct mmu_table_batch **batch = &tlb->batch;
  938. if (*batch) {
  939. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  940. *batch = NULL;
  941. }
  942. }
  943. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  944. {
  945. struct mmu_table_batch **batch = &tlb->batch;
  946. tlb->mm->context.flush_mm = 1;
  947. if (*batch == NULL) {
  948. *batch = (struct mmu_table_batch *)
  949. __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  950. if (*batch == NULL) {
  951. __tlb_flush_mm_lazy(tlb->mm);
  952. tlb_remove_table_one(table);
  953. return;
  954. }
  955. (*batch)->nr = 0;
  956. }
  957. (*batch)->tables[(*batch)->nr++] = table;
  958. if ((*batch)->nr == MAX_TABLE_BATCH)
  959. tlb_flush_mmu(tlb);
  960. }
  961. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  962. static inline void thp_split_vma(struct vm_area_struct *vma)
  963. {
  964. unsigned long addr;
  965. for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
  966. follow_page(vma, addr, FOLL_SPLIT);
  967. }
  968. static inline void thp_split_mm(struct mm_struct *mm)
  969. {
  970. struct vm_area_struct *vma;
  971. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  972. thp_split_vma(vma);
  973. vma->vm_flags &= ~VM_HUGEPAGE;
  974. vma->vm_flags |= VM_NOHUGEPAGE;
  975. }
  976. mm->def_flags |= VM_NOHUGEPAGE;
  977. }
  978. #else
  979. static inline void thp_split_mm(struct mm_struct *mm)
  980. {
  981. }
  982. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  983. static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
  984. struct mm_struct *mm, pud_t *pud,
  985. unsigned long addr, unsigned long end)
  986. {
  987. unsigned long next, *table, *new;
  988. struct page *page;
  989. pmd_t *pmd;
  990. pmd = pmd_offset(pud, addr);
  991. do {
  992. next = pmd_addr_end(addr, end);
  993. again:
  994. if (pmd_none_or_clear_bad(pmd))
  995. continue;
  996. table = (unsigned long *) pmd_deref(*pmd);
  997. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  998. if (page_table_with_pgste(page))
  999. continue;
  1000. /* Allocate new page table with pgstes */
  1001. new = page_table_alloc_pgste(mm, addr);
  1002. if (!new)
  1003. return -ENOMEM;
  1004. spin_lock(&mm->page_table_lock);
  1005. if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
  1006. /* Nuke pmd entry pointing to the "short" page table */
  1007. pmdp_flush_lazy(mm, addr, pmd);
  1008. pmd_clear(pmd);
  1009. /* Copy ptes from old table to new table */
  1010. memcpy(new, table, PAGE_SIZE/2);
  1011. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  1012. /* Establish new table */
  1013. pmd_populate(mm, pmd, (pte_t *) new);
  1014. /* Free old table with rcu, there might be a walker! */
  1015. page_table_free_rcu(tlb, table);
  1016. new = NULL;
  1017. }
  1018. spin_unlock(&mm->page_table_lock);
  1019. if (new) {
  1020. page_table_free_pgste(new);
  1021. goto again;
  1022. }
  1023. } while (pmd++, addr = next, addr != end);
  1024. return addr;
  1025. }
  1026. static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
  1027. struct mm_struct *mm, pgd_t *pgd,
  1028. unsigned long addr, unsigned long end)
  1029. {
  1030. unsigned long next;
  1031. pud_t *pud;
  1032. pud = pud_offset(pgd, addr);
  1033. do {
  1034. next = pud_addr_end(addr, end);
  1035. if (pud_none_or_clear_bad(pud))
  1036. continue;
  1037. next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
  1038. if (unlikely(IS_ERR_VALUE(next)))
  1039. return next;
  1040. } while (pud++, addr = next, addr != end);
  1041. return addr;
  1042. }
  1043. static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
  1044. unsigned long addr, unsigned long end)
  1045. {
  1046. unsigned long next;
  1047. pgd_t *pgd;
  1048. pgd = pgd_offset(mm, addr);
  1049. do {
  1050. next = pgd_addr_end(addr, end);
  1051. if (pgd_none_or_clear_bad(pgd))
  1052. continue;
  1053. next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
  1054. if (unlikely(IS_ERR_VALUE(next)))
  1055. return next;
  1056. } while (pgd++, addr = next, addr != end);
  1057. return 0;
  1058. }
  1059. /*
  1060. * switch on pgstes for its userspace process (for kvm)
  1061. */
  1062. int s390_enable_sie(void)
  1063. {
  1064. struct task_struct *tsk = current;
  1065. struct mm_struct *mm = tsk->mm;
  1066. struct mmu_gather tlb;
  1067. /* Do we have pgstes? if yes, we are done */
  1068. if (mm_has_pgste(tsk->mm))
  1069. return 0;
  1070. down_write(&mm->mmap_sem);
  1071. /* split thp mappings and disable thp for future mappings */
  1072. thp_split_mm(mm);
  1073. /* Reallocate the page tables with pgstes */
  1074. tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
  1075. if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
  1076. mm->context.has_pgste = 1;
  1077. tlb_finish_mmu(&tlb, 0, TASK_SIZE);
  1078. up_write(&mm->mmap_sem);
  1079. return mm->context.has_pgste ? 0 : -ENOMEM;
  1080. }
  1081. EXPORT_SYMBOL_GPL(s390_enable_sie);
  1082. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1083. int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
  1084. pmd_t *pmdp)
  1085. {
  1086. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1087. /* No need to flush TLB
  1088. * On s390 reference bits are in storage key and never in TLB */
  1089. return pmdp_test_and_clear_young(vma, address, pmdp);
  1090. }
  1091. int pmdp_set_access_flags(struct vm_area_struct *vma,
  1092. unsigned long address, pmd_t *pmdp,
  1093. pmd_t entry, int dirty)
  1094. {
  1095. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1096. if (pmd_same(*pmdp, entry))
  1097. return 0;
  1098. pmdp_invalidate(vma, address, pmdp);
  1099. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  1100. return 1;
  1101. }
  1102. static void pmdp_splitting_flush_sync(void *arg)
  1103. {
  1104. /* Simply deliver the interrupt */
  1105. }
  1106. void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
  1107. pmd_t *pmdp)
  1108. {
  1109. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1110. if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
  1111. (unsigned long *) pmdp)) {
  1112. /* need to serialize against gup-fast (IRQ disabled) */
  1113. smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
  1114. }
  1115. }
  1116. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  1117. pgtable_t pgtable)
  1118. {
  1119. struct list_head *lh = (struct list_head *) pgtable;
  1120. assert_spin_locked(&mm->page_table_lock);
  1121. /* FIFO */
  1122. if (!pmd_huge_pte(mm, pmdp))
  1123. INIT_LIST_HEAD(lh);
  1124. else
  1125. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  1126. pmd_huge_pte(mm, pmdp) = pgtable;
  1127. }
  1128. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  1129. {
  1130. struct list_head *lh;
  1131. pgtable_t pgtable;
  1132. pte_t *ptep;
  1133. assert_spin_locked(&mm->page_table_lock);
  1134. /* FIFO */
  1135. pgtable = pmd_huge_pte(mm, pmdp);
  1136. lh = (struct list_head *) pgtable;
  1137. if (list_empty(lh))
  1138. pmd_huge_pte(mm, pmdp) = NULL;
  1139. else {
  1140. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  1141. list_del(lh);
  1142. }
  1143. ptep = (pte_t *) pgtable;
  1144. pte_val(*ptep) = _PAGE_INVALID;
  1145. ptep++;
  1146. pte_val(*ptep) = _PAGE_INVALID;
  1147. return pgtable;
  1148. }
  1149. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */