syscall32.c 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /* Copyright 2002,2003 Andi Kleen, SuSE Labs */
  2. /* vsyscall handling for 32bit processes. Map a stub page into it
  3. on demand because 32bit cannot reach the kernel's fixmaps */
  4. #include <linux/mm.h>
  5. #include <linux/string.h>
  6. #include <linux/kernel.h>
  7. #include <linux/gfp.h>
  8. #include <linux/init.h>
  9. #include <linux/stringify.h>
  10. #include <linux/security.h>
  11. #include <asm/proto.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/ia32_unistd.h>
  14. #include <asm/vsyscall32.h>
  15. extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
  16. extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
  17. extern int sysctl_vsyscall32;
  18. static struct page *syscall32_pages[1];
  19. static int use_sysenter = -1;
  20. struct linux_binprm;
  21. /* Setup a VMA at program startup for the vsyscall page */
  22. int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
  23. {
  24. struct mm_struct *mm = current->mm;
  25. int ret;
  26. down_write(&mm->mmap_sem);
  27. /*
  28. * MAYWRITE to allow gdb to COW and set breakpoints
  29. *
  30. * Make sure the vDSO gets into every core dump.
  31. * Dumping its contents makes post-mortem fully interpretable later
  32. * without matching up the same kernel and hardware config to see
  33. * what PC values meant.
  34. */
  35. /* Could randomize here */
  36. ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
  37. VM_READ|VM_EXEC|
  38. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
  39. VM_ALWAYSDUMP,
  40. syscall32_pages);
  41. up_write(&mm->mmap_sem);
  42. return ret;
  43. }
  44. const char *arch_vma_name(struct vm_area_struct *vma)
  45. {
  46. if (vma->vm_start == VSYSCALL32_BASE &&
  47. vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET)
  48. return "[vdso]";
  49. return NULL;
  50. }
  51. static int __init init_syscall32(void)
  52. {
  53. char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
  54. if (!syscall32_page)
  55. panic("Cannot allocate syscall32 page");
  56. syscall32_pages[0] = virt_to_page(syscall32_page);
  57. if (use_sysenter > 0) {
  58. memcpy(syscall32_page, syscall32_sysenter,
  59. syscall32_sysenter_end - syscall32_sysenter);
  60. } else {
  61. memcpy(syscall32_page, syscall32_syscall,
  62. syscall32_syscall_end - syscall32_syscall);
  63. }
  64. return 0;
  65. }
  66. __initcall(init_syscall32);
  67. /* May not be __init: called during resume */
  68. void syscall32_cpu_init(void)
  69. {
  70. if (use_sysenter < 0)
  71. use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
  72. /* Load these always in case some future AMD CPU supports
  73. SYSENTER from compat mode too. */
  74. checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
  75. checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
  76. checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
  77. wrmsrl(MSR_CSTAR, ia32_cstar_target);
  78. }