pgtable.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * Copyright IBM Corp. 2007,2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <asm/system.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. struct rcu_table_freelist {
  25. struct rcu_head rcu;
  26. struct mm_struct *mm;
  27. unsigned int pgt_index;
  28. unsigned int crst_index;
  29. unsigned long *table[0];
  30. };
  31. #define RCU_FREELIST_SIZE \
  32. ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
  33. / sizeof(unsigned long))
  34. static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
  35. static void __page_table_free(struct mm_struct *mm, unsigned long *table);
  36. static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
  37. {
  38. struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
  39. struct rcu_table_freelist *batch = *batchp;
  40. if (batch)
  41. return batch;
  42. batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
  43. if (batch) {
  44. batch->mm = mm;
  45. batch->pgt_index = 0;
  46. batch->crst_index = RCU_FREELIST_SIZE;
  47. *batchp = batch;
  48. }
  49. return batch;
  50. }
  51. static void rcu_table_freelist_callback(struct rcu_head *head)
  52. {
  53. struct rcu_table_freelist *batch =
  54. container_of(head, struct rcu_table_freelist, rcu);
  55. while (batch->pgt_index > 0)
  56. __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
  57. while (batch->crst_index < RCU_FREELIST_SIZE)
  58. crst_table_free(batch->mm, batch->table[batch->crst_index++]);
  59. free_page((unsigned long) batch);
  60. }
  61. void rcu_table_freelist_finish(void)
  62. {
  63. struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
  64. if (!batch)
  65. return;
  66. call_rcu(&batch->rcu, rcu_table_freelist_callback);
  67. __get_cpu_var(rcu_table_freelist) = NULL;
  68. }
  69. static void smp_sync(void *arg)
  70. {
  71. }
  72. #ifndef CONFIG_64BIT
  73. #define ALLOC_ORDER 1
  74. #define TABLES_PER_PAGE 4
  75. #define FRAG_MASK 15UL
  76. #define SECOND_HALVES 10UL
  77. void clear_table_pgstes(unsigned long *table)
  78. {
  79. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
  80. memset(table + 256, 0, PAGE_SIZE/4);
  81. clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
  82. memset(table + 768, 0, PAGE_SIZE/4);
  83. }
  84. #else
  85. #define ALLOC_ORDER 2
  86. #define TABLES_PER_PAGE 2
  87. #define FRAG_MASK 3UL
  88. #define SECOND_HALVES 2UL
  89. void clear_table_pgstes(unsigned long *table)
  90. {
  91. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
  92. memset(table + 256, 0, PAGE_SIZE/2);
  93. }
  94. #endif
  95. unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
  96. EXPORT_SYMBOL(VMALLOC_START);
  97. static int __init parse_vmalloc(char *arg)
  98. {
  99. if (!arg)
  100. return -EINVAL;
  101. VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
  102. return 0;
  103. }
  104. early_param("vmalloc", parse_vmalloc);
  105. unsigned long *crst_table_alloc(struct mm_struct *mm)
  106. {
  107. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  108. if (!page)
  109. return NULL;
  110. return (unsigned long *) page_to_phys(page);
  111. }
  112. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  113. {
  114. free_pages((unsigned long) table, ALLOC_ORDER);
  115. }
  116. void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
  117. {
  118. struct rcu_table_freelist *batch;
  119. if (atomic_read(&mm->mm_users) < 2 &&
  120. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  121. crst_table_free(mm, table);
  122. return;
  123. }
  124. batch = rcu_table_freelist_get(mm);
  125. if (!batch) {
  126. smp_call_function(smp_sync, NULL, 1);
  127. crst_table_free(mm, table);
  128. return;
  129. }
  130. batch->table[--batch->crst_index] = table;
  131. if (batch->pgt_index >= batch->crst_index)
  132. rcu_table_freelist_finish();
  133. }
  134. #ifdef CONFIG_64BIT
  135. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  136. {
  137. unsigned long *table, *pgd;
  138. unsigned long entry;
  139. BUG_ON(limit > (1UL << 53));
  140. repeat:
  141. table = crst_table_alloc(mm);
  142. if (!table)
  143. return -ENOMEM;
  144. spin_lock_bh(&mm->page_table_lock);
  145. if (mm->context.asce_limit < limit) {
  146. pgd = (unsigned long *) mm->pgd;
  147. if (mm->context.asce_limit <= (1UL << 31)) {
  148. entry = _REGION3_ENTRY_EMPTY;
  149. mm->context.asce_limit = 1UL << 42;
  150. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  151. _ASCE_USER_BITS |
  152. _ASCE_TYPE_REGION3;
  153. } else {
  154. entry = _REGION2_ENTRY_EMPTY;
  155. mm->context.asce_limit = 1UL << 53;
  156. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  157. _ASCE_USER_BITS |
  158. _ASCE_TYPE_REGION2;
  159. }
  160. crst_table_init(table, entry);
  161. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  162. mm->pgd = (pgd_t *) table;
  163. mm->task_size = mm->context.asce_limit;
  164. table = NULL;
  165. }
  166. spin_unlock_bh(&mm->page_table_lock);
  167. if (table)
  168. crst_table_free(mm, table);
  169. if (mm->context.asce_limit < limit)
  170. goto repeat;
  171. update_mm(mm, current);
  172. return 0;
  173. }
  174. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  175. {
  176. pgd_t *pgd;
  177. if (mm->context.asce_limit <= limit)
  178. return;
  179. __tlb_flush_mm(mm);
  180. while (mm->context.asce_limit > limit) {
  181. pgd = mm->pgd;
  182. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  183. case _REGION_ENTRY_TYPE_R2:
  184. mm->context.asce_limit = 1UL << 42;
  185. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  186. _ASCE_USER_BITS |
  187. _ASCE_TYPE_REGION3;
  188. break;
  189. case _REGION_ENTRY_TYPE_R3:
  190. mm->context.asce_limit = 1UL << 31;
  191. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  192. _ASCE_USER_BITS |
  193. _ASCE_TYPE_SEGMENT;
  194. break;
  195. default:
  196. BUG();
  197. }
  198. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  199. mm->task_size = mm->context.asce_limit;
  200. crst_table_free(mm, (unsigned long *) pgd);
  201. }
  202. update_mm(mm, current);
  203. }
  204. #endif
  205. /*
  206. * page table entry allocation/free routines.
  207. */
  208. unsigned long *page_table_alloc(struct mm_struct *mm)
  209. {
  210. struct page *page;
  211. unsigned long *table;
  212. unsigned long bits;
  213. bits = (mm->context.has_pgste) ? 3UL : 1UL;
  214. spin_lock_bh(&mm->context.list_lock);
  215. page = NULL;
  216. if (!list_empty(&mm->context.pgtable_list)) {
  217. page = list_first_entry(&mm->context.pgtable_list,
  218. struct page, lru);
  219. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  220. page = NULL;
  221. }
  222. if (!page) {
  223. spin_unlock_bh(&mm->context.list_lock);
  224. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  225. if (!page)
  226. return NULL;
  227. pgtable_page_ctor(page);
  228. page->flags &= ~FRAG_MASK;
  229. table = (unsigned long *) page_to_phys(page);
  230. if (mm->context.has_pgste)
  231. clear_table_pgstes(table);
  232. else
  233. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  234. spin_lock_bh(&mm->context.list_lock);
  235. list_add(&page->lru, &mm->context.pgtable_list);
  236. }
  237. table = (unsigned long *) page_to_phys(page);
  238. while (page->flags & bits) {
  239. table += 256;
  240. bits <<= 1;
  241. }
  242. page->flags |= bits;
  243. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  244. list_move_tail(&page->lru, &mm->context.pgtable_list);
  245. spin_unlock_bh(&mm->context.list_lock);
  246. return table;
  247. }
  248. static void __page_table_free(struct mm_struct *mm, unsigned long *table)
  249. {
  250. struct page *page;
  251. unsigned long bits;
  252. bits = ((unsigned long) table) & 15;
  253. table = (unsigned long *)(((unsigned long) table) ^ bits);
  254. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  255. page->flags ^= bits;
  256. if (!(page->flags & FRAG_MASK)) {
  257. pgtable_page_dtor(page);
  258. __free_page(page);
  259. }
  260. }
  261. void page_table_free(struct mm_struct *mm, unsigned long *table)
  262. {
  263. struct page *page;
  264. unsigned long bits;
  265. bits = (mm->context.has_pgste) ? 3UL : 1UL;
  266. bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
  267. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  268. spin_lock_bh(&mm->context.list_lock);
  269. page->flags ^= bits;
  270. if (page->flags & FRAG_MASK) {
  271. /* Page now has some free pgtable fragments. */
  272. if (!list_empty(&page->lru))
  273. list_move(&page->lru, &mm->context.pgtable_list);
  274. page = NULL;
  275. } else
  276. /* All fragments of the 4K page have been freed. */
  277. list_del(&page->lru);
  278. spin_unlock_bh(&mm->context.list_lock);
  279. if (page) {
  280. pgtable_page_dtor(page);
  281. __free_page(page);
  282. }
  283. }
  284. void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
  285. {
  286. struct rcu_table_freelist *batch;
  287. struct page *page;
  288. unsigned long bits;
  289. if (atomic_read(&mm->mm_users) < 2 &&
  290. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  291. page_table_free(mm, table);
  292. return;
  293. }
  294. batch = rcu_table_freelist_get(mm);
  295. if (!batch) {
  296. smp_call_function(smp_sync, NULL, 1);
  297. page_table_free(mm, table);
  298. return;
  299. }
  300. bits = (mm->context.has_pgste) ? 3UL : 1UL;
  301. bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
  302. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  303. spin_lock_bh(&mm->context.list_lock);
  304. /* Delayed freeing with rcu prevents reuse of pgtable fragments */
  305. list_del_init(&page->lru);
  306. spin_unlock_bh(&mm->context.list_lock);
  307. table = (unsigned long *)(((unsigned long) table) | bits);
  308. batch->table[batch->pgt_index++] = table;
  309. if (batch->pgt_index >= batch->crst_index)
  310. rcu_table_freelist_finish();
  311. }
  312. /*
  313. * switch on pgstes for its userspace process (for kvm)
  314. */
  315. int s390_enable_sie(void)
  316. {
  317. struct task_struct *tsk = current;
  318. struct mm_struct *mm, *old_mm;
  319. /* Do we have switched amode? If no, we cannot do sie */
  320. if (user_mode == HOME_SPACE_MODE)
  321. return -EINVAL;
  322. /* Do we have pgstes? if yes, we are done */
  323. if (tsk->mm->context.has_pgste)
  324. return 0;
  325. /* lets check if we are allowed to replace the mm */
  326. task_lock(tsk);
  327. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  328. #ifdef CONFIG_AIO
  329. !hlist_empty(&tsk->mm->ioctx_list) ||
  330. #endif
  331. tsk->mm != tsk->active_mm) {
  332. task_unlock(tsk);
  333. return -EINVAL;
  334. }
  335. task_unlock(tsk);
  336. /* we copy the mm and let dup_mm create the page tables with_pgstes */
  337. tsk->mm->context.alloc_pgste = 1;
  338. mm = dup_mm(tsk);
  339. tsk->mm->context.alloc_pgste = 0;
  340. if (!mm)
  341. return -ENOMEM;
  342. /* Now lets check again if something happened */
  343. task_lock(tsk);
  344. if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
  345. #ifdef CONFIG_AIO
  346. !hlist_empty(&tsk->mm->ioctx_list) ||
  347. #endif
  348. tsk->mm != tsk->active_mm) {
  349. mmput(mm);
  350. task_unlock(tsk);
  351. return -EINVAL;
  352. }
  353. /* ok, we are alone. No ptrace, no threads, etc. */
  354. old_mm = tsk->mm;
  355. tsk->mm = tsk->active_mm = mm;
  356. preempt_disable();
  357. update_mm(mm, tsk);
  358. atomic_inc(&mm->context.attach_count);
  359. atomic_dec(&old_mm->context.attach_count);
  360. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  361. preempt_enable();
  362. task_unlock(tsk);
  363. mmput(old_mm);
  364. return 0;
  365. }
  366. EXPORT_SYMBOL_GPL(s390_enable_sie);
  367. #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
  368. bool kernel_page_present(struct page *page)
  369. {
  370. unsigned long addr;
  371. int cc;
  372. addr = page_to_phys(page);
  373. asm volatile(
  374. " lra %1,0(%1)\n"
  375. " ipm %0\n"
  376. " srl %0,28"
  377. : "=d" (cc), "+a" (addr) : : "cc");
  378. return cc == 0;
  379. }
  380. #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */