pgtable.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * arch/s390/mm/pgtable.c
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/smp.h>
  13. #include <linux/highmem.h>
  14. #include <linux/slab.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/module.h>
  18. #include <linux/quicklist.h>
  19. #include <asm/system.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #ifndef CONFIG_64BIT
  25. #define ALLOC_ORDER 1
  26. #define TABLES_PER_PAGE 4
  27. #define FRAG_MASK 15UL
  28. #define SECOND_HALVES 10UL
  29. #else
  30. #define ALLOC_ORDER 2
  31. #define TABLES_PER_PAGE 2
  32. #define FRAG_MASK 3UL
  33. #define SECOND_HALVES 2UL
  34. #endif
  35. unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
  36. {
  37. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  38. if (!page)
  39. return NULL;
  40. page->index = 0;
  41. if (noexec) {
  42. struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  43. if (!shadow) {
  44. __free_pages(page, ALLOC_ORDER);
  45. return NULL;
  46. }
  47. page->index = page_to_phys(shadow);
  48. }
  49. spin_lock(&mm->page_table_lock);
  50. list_add(&page->lru, &mm->context.crst_list);
  51. spin_unlock(&mm->page_table_lock);
  52. return (unsigned long *) page_to_phys(page);
  53. }
  54. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  55. {
  56. unsigned long *shadow = get_shadow_table(table);
  57. struct page *page = virt_to_page(table);
  58. spin_lock(&mm->page_table_lock);
  59. list_del(&page->lru);
  60. spin_unlock(&mm->page_table_lock);
  61. if (shadow)
  62. free_pages((unsigned long) shadow, ALLOC_ORDER);
  63. free_pages((unsigned long) table, ALLOC_ORDER);
  64. }
  65. /*
  66. * page table entry allocation/free routines.
  67. */
  68. unsigned long *page_table_alloc(struct mm_struct *mm)
  69. {
  70. struct page *page;
  71. unsigned long *table;
  72. unsigned long bits;
  73. bits = mm->context.noexec ? 3UL : 1UL;
  74. spin_lock(&mm->page_table_lock);
  75. page = NULL;
  76. if (!list_empty(&mm->context.pgtable_list)) {
  77. page = list_first_entry(&mm->context.pgtable_list,
  78. struct page, lru);
  79. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  80. page = NULL;
  81. }
  82. if (!page) {
  83. spin_unlock(&mm->page_table_lock);
  84. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  85. if (!page)
  86. return NULL;
  87. pgtable_page_ctor(page);
  88. page->flags &= ~FRAG_MASK;
  89. table = (unsigned long *) page_to_phys(page);
  90. clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
  91. spin_lock(&mm->page_table_lock);
  92. list_add(&page->lru, &mm->context.pgtable_list);
  93. }
  94. table = (unsigned long *) page_to_phys(page);
  95. while (page->flags & bits) {
  96. table += 256;
  97. bits <<= 1;
  98. }
  99. page->flags |= bits;
  100. if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
  101. list_move_tail(&page->lru, &mm->context.pgtable_list);
  102. spin_unlock(&mm->page_table_lock);
  103. return table;
  104. }
  105. void page_table_free(struct mm_struct *mm, unsigned long *table)
  106. {
  107. struct page *page;
  108. unsigned long bits;
  109. bits = mm->context.noexec ? 3UL : 1UL;
  110. bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
  111. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  112. spin_lock(&mm->page_table_lock);
  113. page->flags ^= bits;
  114. if (page->flags & FRAG_MASK) {
  115. /* Page now has some free pgtable fragments. */
  116. list_move(&page->lru, &mm->context.pgtable_list);
  117. page = NULL;
  118. } else
  119. /* All fragments of the 4K page have been freed. */
  120. list_del(&page->lru);
  121. spin_unlock(&mm->page_table_lock);
  122. if (page) {
  123. pgtable_page_dtor(page);
  124. __free_page(page);
  125. }
  126. }
  127. void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
  128. {
  129. struct page *page;
  130. spin_lock(&mm->page_table_lock);
  131. /* Free shadow region and segment tables. */
  132. list_for_each_entry(page, &mm->context.crst_list, lru)
  133. if (page->index) {
  134. free_pages((unsigned long) page->index, ALLOC_ORDER);
  135. page->index = 0;
  136. }
  137. /* "Free" second halves of page tables. */
  138. list_for_each_entry(page, &mm->context.pgtable_list, lru)
  139. page->flags &= ~SECOND_HALVES;
  140. spin_unlock(&mm->page_table_lock);
  141. mm->context.noexec = 0;
  142. update_mm(mm, tsk);
  143. }