pgtable.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Copyright IBM Corp. 2007,2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <asm/system.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. struct rcu_table_freelist {
  25. struct rcu_head rcu;
  26. struct mm_struct *mm;
  27. unsigned int pgt_index;
  28. unsigned int crst_index;
  29. unsigned long *table[0];
  30. };
  31. #define RCU_FREELIST_SIZE \
  32. ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
  33. / sizeof(unsigned long))
  34. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  35. static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
  36. static void __page_table_free(struct mm_struct *mm, unsigned long *table);
  37. static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
  38. static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
  39. {
  40. struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
  41. struct rcu_table_freelist *batch = *batchp;
  42. if (batch)
  43. return batch;
  44. batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
  45. if (batch) {
  46. batch->mm = mm;
  47. batch->pgt_index = 0;
  48. batch->crst_index = RCU_FREELIST_SIZE;
  49. *batchp = batch;
  50. }
  51. return batch;
  52. }
  53. static void rcu_table_freelist_callback(struct rcu_head *head)
  54. {
  55. struct rcu_table_freelist *batch =
  56. container_of(head, struct rcu_table_freelist, rcu);
  57. while (batch->pgt_index > 0)
  58. __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
  59. while (batch->crst_index < RCU_FREELIST_SIZE)
  60. __crst_table_free(batch->mm, batch->table[batch->crst_index++]);
  61. free_page((unsigned long) batch);
  62. }
  63. void rcu_table_freelist_finish(void)
  64. {
  65. struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
  66. if (!batch)
  67. return;
  68. call_rcu(&batch->rcu, rcu_table_freelist_callback);
  69. __get_cpu_var(rcu_table_freelist) = NULL;
  70. }
  71. static void smp_sync(void *arg)
  72. {
  73. }
  74. #ifndef CONFIG_64BIT
  75. #define ALLOC_ORDER 1
  76. #define TABLES_PER_PAGE 4
  77. #define FRAG_MASK 15UL
  78. #define SECOND_HALVES 10UL
  79. void clear_table_pgstes(unsigned long *table)
  80. {
  81. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
  82. memset(table + 256, 0, PAGE_SIZE/4);
  83. clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
  84. memset(table + 768, 0, PAGE_SIZE/4);
  85. }
  86. #else
  87. #define ALLOC_ORDER 2
  88. #define TABLES_PER_PAGE 2
  89. #define FRAG_MASK 3UL
  90. #define SECOND_HALVES 2UL
  91. void clear_table_pgstes(unsigned long *table)
  92. {
  93. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  94. memset(table + 256, 0, PAGE_SIZE/2);
  95. }
  96. #endif
  97. unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
  98. EXPORT_SYMBOL(VMALLOC_START);
  99. static int __init parse_vmalloc(char *arg)
  100. {
  101. if (!arg)
  102. return -EINVAL;
  103. VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
  104. return 0;
  105. }
  106. early_param("vmalloc", parse_vmalloc);
  107. unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
  108. {
  109. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  110. if (!page)
  111. return NULL;
  112. page->index = 0;
  113. if (noexec) {
  114. struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  115. if (!shadow) {
  116. __free_pages(page, ALLOC_ORDER);
  117. return NULL;
  118. }
  119. page->index = page_to_phys(shadow);
  120. }
  121. spin_lock_bh(&mm->context.list_lock);
  122. list_add(&page->lru, &mm->context.crst_list);
  123. spin_unlock_bh(&mm->context.list_lock);
  124. return (unsigned long *) page_to_phys(page);
  125. }
  126. static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
  127. {
  128. unsigned long *shadow = get_shadow_table(table);
  129. if (shadow)
  130. free_pages((unsigned long) shadow, ALLOC_ORDER);
  131. free_pages((unsigned long) table, ALLOC_ORDER);
  132. }
  133. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  134. {
  135. struct page *page = virt_to_page(table);
  136. spin_lock_bh(&mm->context.list_lock);
  137. list_del(&page->lru);
  138. spin_unlock_bh(&mm->context.list_lock);
  139. __crst_table_free(mm, table);
  140. }
  141. void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
  142. {
  143. struct rcu_table_freelist *batch;
  144. struct page *page = virt_to_page(table);
  145. spin_lock_bh(&mm->context.list_lock);
  146. list_del(&page->lru);
  147. spin_unlock_bh(&mm->context.list_lock);
  148. if (atomic_read(&mm->mm_users) < 2 &&
  149. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  150. __crst_table_free(mm, table);
  151. return;
  152. }
  153. batch = rcu_table_freelist_get(mm);
  154. if (!batch) {
  155. smp_call_function(smp_sync, NULL, 1);
  156. __crst_table_free(mm, table);
  157. return;
  158. }
  159. batch->table[--batch->crst_index] = table;
  160. if (batch->pgt_index >= batch->crst_index)
  161. rcu_table_freelist_finish();
  162. }
  163. #ifdef CONFIG_64BIT
  164. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  165. {
  166. unsigned long *table, *pgd;
  167. unsigned long entry;
  168. BUG_ON(limit > (1UL << 53));
  169. repeat:
  170. table = crst_table_alloc(mm, mm->context.noexec);
  171. if (!table)
  172. return -ENOMEM;
  173. spin_lock_bh(&mm->page_table_lock);
  174. if (mm->context.asce_limit < limit) {
  175. pgd = (unsigned long *) mm->pgd;
  176. if (mm->context.asce_limit <= (1UL << 31)) {
  177. entry = _REGION3_ENTRY_EMPTY;
  178. mm->context.asce_limit = 1UL << 42;
  179. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  180. _ASCE_USER_BITS |
  181. _ASCE_TYPE_REGION3;
  182. } else {
  183. entry = _REGION2_ENTRY_EMPTY;
  184. mm->context.asce_limit = 1UL << 53;
  185. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  186. _ASCE_USER_BITS |
  187. _ASCE_TYPE_REGION2;
  188. }
  189. crst_table_init(table, entry);
  190. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  191. mm->pgd = (pgd_t *) table;
  192. mm->task_size = mm->context.asce_limit;
  193. table = NULL;
  194. }
  195. spin_unlock_bh(&mm->page_table_lock);
  196. if (table)
  197. crst_table_free(mm, table);
  198. if (mm->context.asce_limit < limit)
  199. goto repeat;
  200. update_mm(mm, current);
  201. return 0;
  202. }
  203. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  204. {
  205. pgd_t *pgd;
  206. if (mm->context.asce_limit <= limit)
  207. return;
  208. __tlb_flush_mm(mm);
  209. while (mm->context.asce_limit > limit) {
  210. pgd = mm->pgd;
  211. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  212. case _REGION_ENTRY_TYPE_R2:
  213. mm->context.asce_limit = 1UL << 42;
  214. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  215. _ASCE_USER_BITS |
  216. _ASCE_TYPE_REGION3;
  217. break;
  218. case _REGION_ENTRY_TYPE_R3:
  219. mm->context.asce_limit = 1UL << 31;
  220. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  221. _ASCE_USER_BITS |
  222. _ASCE_TYPE_SEGMENT;
  223. break;
  224. default:
  225. BUG();
  226. }
  227. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  228. mm->task_size = mm->context.asce_limit;
  229. crst_table_free(mm, (unsigned long *) pgd);
  230. }
  231. update_mm(mm, current);
  232. }
  233. #endif
  234. /*
  235. * page table entry allocation/free routines.
  236. */
  237. unsigned long *page_table_alloc(struct mm_struct *mm)
  238. {
  239. struct page *page;
  240. unsigned long *table;
  241. unsigned long bits;
  242. bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
  243. spin_lock_bh(&mm->context.list_lock);
  244. page = NULL;
  245. if (!list_empty(&mm->context.pgtable_list)) {
  246. page = list_first_entry(&mm->context.pgtable_list,
  247. struct page, lru);
  248. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  249. page = NULL;
  250. }
  251. if (!page) {
  252. spin_unlock_bh(&mm->context.list_lock);
  253. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  254. if (!page)
  255. return NULL;
  256. pgtable_page_ctor(page);
  257. page->flags &= ~FRAG_MASK;
  258. table = (unsigned long *) page_to_phys(page);
  259. if (mm->context.has_pgste)
  260. clear_table_pgstes(table);
  261. else
  262. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  263. spin_lock_bh(&mm->context.list_lock);
  264. list_add(&page->lru, &mm->context.pgtable_list);
  265. }
  266. table = (unsigned long *) page_to_phys(page);
  267. while (page->flags & bits) {
  268. table += 256;
  269. bits <<= 1;
  270. }
  271. page->flags |= bits;
  272. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  273. list_move_tail(&page->lru, &mm->context.pgtable_list);
  274. spin_unlock_bh(&mm->context.list_lock);
  275. return table;
  276. }
  277. static void __page_table_free(struct mm_struct *mm, unsigned long *table)
  278. {
  279. struct page *page;
  280. unsigned long bits;
  281. bits = ((unsigned long) table) & 15;
  282. table = (unsigned long *)(((unsigned long) table) ^ bits);
  283. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  284. page->flags ^= bits;
  285. if (!(page->flags & FRAG_MASK)) {
  286. pgtable_page_dtor(page);
  287. __free_page(page);
  288. }
  289. }
  290. void page_table_free(struct mm_struct *mm, unsigned long *table)
  291. {
  292. struct page *page;
  293. unsigned long bits;
  294. bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
  295. bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
  296. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  297. spin_lock_bh(&mm->context.list_lock);
  298. page->flags ^= bits;
  299. if (page->flags & FRAG_MASK) {
  300. /* Page now has some free pgtable fragments. */
  301. list_move(&page->lru, &mm->context.pgtable_list);
  302. page = NULL;
  303. } else
  304. /* All fragments of the 4K page have been freed. */
  305. list_del(&page->lru);
  306. spin_unlock_bh(&mm->context.list_lock);
  307. if (page) {
  308. pgtable_page_dtor(page);
  309. __free_page(page);
  310. }
  311. }
  312. void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
  313. {
  314. struct rcu_table_freelist *batch;
  315. struct page *page;
  316. unsigned long bits;
  317. if (atomic_read(&mm->mm_users) < 2 &&
  318. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  319. page_table_free(mm, table);
  320. return;
  321. }
  322. batch = rcu_table_freelist_get(mm);
  323. if (!batch) {
  324. smp_call_function(smp_sync, NULL, 1);
  325. page_table_free(mm, table);
  326. return;
  327. }
  328. bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
  329. bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
  330. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  331. spin_lock_bh(&mm->context.list_lock);
  332. /* Delayed freeing with rcu prevents reuse of pgtable fragments */
  333. list_del_init(&page->lru);
  334. spin_unlock_bh(&mm->context.list_lock);
  335. table = (unsigned long *)(((unsigned long) table) | bits);
  336. batch->table[batch->pgt_index++] = table;
  337. if (batch->pgt_index >= batch->crst_index)
  338. rcu_table_freelist_finish();
  339. }
  340. void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
  341. {
  342. struct page *page;
  343. spin_lock_bh(&mm->context.list_lock);
  344. /* Free shadow region and segment tables. */
  345. list_for_each_entry(page, &mm->context.crst_list, lru)
  346. if (page->index) {
  347. free_pages((unsigned long) page->index, ALLOC_ORDER);
  348. page->index = 0;
  349. }
  350. /* "Free" second halves of page tables. */
  351. list_for_each_entry(page, &mm->context.pgtable_list, lru)
  352. page->flags &= ~SECOND_HALVES;
  353. spin_unlock_bh(&mm->context.list_lock);
  354. mm->context.noexec = 0;
  355. update_mm(mm, tsk);
  356. }
  357. /*
  358. * switch on pgstes for its userspace process (for kvm)
  359. */
  360. int s390_enable_sie(void)
  361. {
  362. struct task_struct *tsk = current;
  363. struct mm_struct *mm, *old_mm;
  364. /* Do we have switched amode? If no, we cannot do sie */
  365. if (user_mode == HOME_SPACE_MODE)
  366. return -EINVAL;
  367. /* Do we have pgstes? if yes, we are done */
  368. if (tsk->mm->context.has_pgste)
  369. return 0;
  370. /* lets check if we are allowed to replace the mm */
  371. task_lock(tsk);
  372. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  373. #ifdef CONFIG_AIO
  374. !hlist_empty(&tsk->mm->ioctx_list) ||
  375. #endif
  376. tsk->mm != tsk->active_mm) {
  377. task_unlock(tsk);
  378. return -EINVAL;
  379. }
  380. task_unlock(tsk);
  381. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  382. tsk->mm->context.alloc_pgste = 1;
  383. mm = dup_mm(tsk);
  384. tsk->mm->context.alloc_pgste = 0;
  385. if (!mm)
  386. return -ENOMEM;
  387. /* Now lets check again if something happened */
  388. task_lock(tsk);
  389. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  390. #ifdef CONFIG_AIO
  391. !hlist_empty(&tsk->mm->ioctx_list) ||
  392. #endif
  393. tsk->mm != tsk->active_mm) {
  394. mmput(mm);
  395. task_unlock(tsk);
  396. return -EINVAL;
  397. }
  398. /* ok, we are alone. No ptrace, no threads, etc. */
  399. old_mm = tsk->mm;
  400. tsk->mm = tsk->active_mm = mm;
  401. preempt_disable();
  402. update_mm(mm, tsk);
  403. atomic_inc(&mm->context.attach_count);
  404. atomic_dec(&old_mm->context.attach_count);
  405. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  406. preempt_enable();
  407. task_unlock(tsk);
  408. mmput(old_mm);
  409. return 0;
  410. }
  411. EXPORT_SYMBOL_GPL(s390_enable_sie);
  412. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  413. bool kernel_page_present(struct page *page)
  414. {
  415. unsigned long addr;
  416. int cc;
  417. addr = page_to_phys(page);
  418. asm volatile(
  419. " lra %1,0(%1)\n"
  420. " ipm %0\n"
  421. " srl %0,28"
  422. : "=d" (cc), "+a" (addr) : : "cc");
  423. return cc == 0;
  424. }
  425. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */