mmu.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /*
  2. * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/mm.h"
  6. #include "linux/sched.h"
  7. #include "asm/pgalloc.h"
  8. #include "asm/pgtable.h"
  9. #include "os.h"
  10. #include "skas.h"
  11. extern int __syscall_stub_start;
  12. static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
  13. unsigned long kernel)
  14. {
  15. pgd_t *pgd;
  16. pud_t *pud;
  17. pmd_t *pmd;
  18. pte_t *pte;
  19. pgd = pgd_offset(mm, proc);
  20. pud = pud_alloc(mm, pgd, proc);
  21. if (!pud)
  22. goto out;
  23. pmd = pmd_alloc(mm, pud, proc);
  24. if (!pmd)
  25. goto out_pmd;
  26. pte = pte_alloc_map(mm, pmd, proc);
  27. if (!pte)
  28. goto out_pte;
  29. /*
  30. * There's an interaction between the skas0 stub pages, stack
  31. * randomization, and the BUG at the end of exit_mmap. exit_mmap
  32. * checks that the number of page tables freed is the same as had
  33. * been allocated. If the stack is on the last page table page,
  34. * then the stack pte page will be freed, and if not, it won't. To
  35. * avoid having to know where the stack is, or if the process mapped
  36. * something at the top of its address space for some other reason,
  37. * we set TASK_SIZE to end at the start of the last page table.
  38. * This keeps exit_mmap off the last page, but introduces a leak
  39. * of that page. So, we hang onto it here and free it in
  40. * destroy_context_skas.
  41. */
  42. mm->context.last_page_table = pmd_page_vaddr(*pmd);
  43. #ifdef CONFIG_3_LEVEL_PGTABLES
  44. mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
  45. #endif
  46. *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
  47. *pte = pte_mkread(*pte);
  48. return 0;
  49. out_pmd:
  50. pud_free(pud);
  51. out_pte:
  52. pmd_free(pmd);
  53. out:
  54. return -ENOMEM;
  55. }
  56. int init_new_context(struct task_struct *task, struct mm_struct *mm)
  57. {
  58. struct mm_context *from_mm = NULL;
  59. struct mm_context *to_mm = &mm->context;
  60. unsigned long stack = 0;
  61. int ret = -ENOMEM;
  62. if (skas_needs_stub) {
  63. stack = get_zeroed_page(GFP_KERNEL);
  64. if (stack == 0)
  65. goto out;
  66. /*
  67. * This zeros the entry that pgd_alloc didn't, needed since
  68. * we are about to reinitialize it, and want mm.nr_ptes to
  69. * be accurate.
  70. */
  71. mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
  72. ret = init_stub_pte(mm, CONFIG_STUB_CODE,
  73. (unsigned long) &__syscall_stub_start);
  74. if (ret)
  75. goto out_free;
  76. ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
  77. if (ret)
  78. goto out_free;
  79. mm->nr_ptes--;
  80. }
  81. to_mm->id.stack = stack;
  82. if (current->mm != NULL && current->mm != &init_mm)
  83. from_mm = &current->mm->context;
  84. if (proc_mm) {
  85. ret = new_mm(stack);
  86. if (ret < 0) {
  87. printk(KERN_ERR "init_new_context_skas - "
  88. "new_mm failed, errno = %d\n", ret);
  89. goto out_free;
  90. }
  91. to_mm->id.u.mm_fd = ret;
  92. }
  93. else {
  94. if (from_mm)
  95. to_mm->id.u.pid = copy_context_skas0(stack,
  96. from_mm->id.u.pid);
  97. else to_mm->id.u.pid = start_userspace(stack);
  98. }
  99. ret = init_new_ldt(to_mm, from_mm);
  100. if (ret < 0) {
  101. printk(KERN_ERR "init_new_context_skas - init_ldt"
  102. " failed, errno = %d\n", ret);
  103. goto out_free;
  104. }
  105. return 0;
  106. out_free:
  107. if (to_mm->id.stack != 0)
  108. free_page(to_mm->id.stack);
  109. out:
  110. return ret;
  111. }
  112. void destroy_context(struct mm_struct *mm)
  113. {
  114. struct mm_context *mmu = &mm->context;
  115. if (proc_mm)
  116. os_close_file(mmu->id.u.mm_fd);
  117. else
  118. os_kill_ptraced_process(mmu->id.u.pid, 1);
  119. if (!proc_mm || !ptrace_faultinfo) {
  120. free_page(mmu->id.stack);
  121. pte_lock_deinit(virt_to_page(mmu->last_page_table));
  122. pte_free_kernel((pte_t *) mmu->last_page_table);
  123. dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
  124. #ifdef CONFIG_3_LEVEL_PGTABLES
  125. pmd_free((pmd_t *) mmu->last_pmd);
  126. #endif
  127. }
  128. }