syscall32.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. /* Copyright 2002,2003 Andi Kleen, SuSE Labs */
  2. /* vsyscall handling for 32bit processes. Map a stub page into it
  3. on demand because 32bit cannot reach the kernel's fixmaps */
  4. #include <linux/mm.h>
  5. #include <linux/string.h>
  6. #include <linux/kernel.h>
  7. #include <linux/gfp.h>
  8. #include <linux/init.h>
  9. #include <linux/stringify.h>
  10. #include <linux/security.h>
  11. #include <asm/proto.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/ia32_unistd.h>
  14. extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
  15. extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
  16. extern int sysctl_vsyscall32;
  17. char *syscall32_page;
  18. static int use_sysenter = -1;
  19. static struct page *
  20. syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
  21. {
  22. struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
  23. get_page(p);
  24. return p;
  25. }
  26. /* Prevent VMA merging */
  27. static void syscall32_vma_close(struct vm_area_struct *vma)
  28. {
  29. }
  30. static struct vm_operations_struct syscall32_vm_ops = {
  31. .close = syscall32_vma_close,
  32. .nopage = syscall32_nopage,
  33. };
  34. struct linux_binprm;
  35. /* Setup a VMA at program startup for the vsyscall page */
  36. int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
  37. {
  38. int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
  39. struct vm_area_struct *vma;
  40. struct mm_struct *mm = current->mm;
  41. int ret;
  42. vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  43. if (!vma)
  44. return -ENOMEM;
  45. memset(vma, 0, sizeof(struct vm_area_struct));
  46. /* Could randomize here */
  47. vma->vm_start = VSYSCALL32_BASE;
  48. vma->vm_end = VSYSCALL32_END;
  49. /* MAYWRITE to allow gdb to COW and set breakpoints */
  50. vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
  51. vma->vm_flags |= mm->def_flags;
  52. vma->vm_page_prot = protection_map[vma->vm_flags & 7];
  53. vma->vm_ops = &syscall32_vm_ops;
  54. vma->vm_mm = mm;
  55. down_write(&mm->mmap_sem);
  56. if ((ret = insert_vm_struct(mm, vma))) {
  57. up_write(&mm->mmap_sem);
  58. kmem_cache_free(vm_area_cachep, vma);
  59. return ret;
  60. }
  61. mm->total_vm += npages;
  62. up_write(&mm->mmap_sem);
  63. return 0;
  64. }
  65. static int __init init_syscall32(void)
  66. {
  67. syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
  68. if (!syscall32_page)
  69. panic("Cannot allocate syscall32 page");
  70. if (use_sysenter > 0) {
  71. memcpy(syscall32_page, syscall32_sysenter,
  72. syscall32_sysenter_end - syscall32_sysenter);
  73. } else {
  74. memcpy(syscall32_page, syscall32_syscall,
  75. syscall32_syscall_end - syscall32_syscall);
  76. }
  77. return 0;
  78. }
  79. __initcall(init_syscall32);
  80. /* May not be __init: called during resume */
  81. void syscall32_cpu_init(void)
  82. {
  83. if (use_sysenter < 0)
  84. use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
  85. /* Load these always in case some future AMD CPU supports
  86. SYSENTER from compat mode too. */
  87. checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
  88. checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
  89. checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
  90. wrmsrl(MSR_CSTAR, ia32_cstar_target);
  91. }