copy_user.S 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /* Copyright 2002 Andi Kleen, SuSE Labs.
  2. * Subject to the GNU Public License v2.
  3. *
  4. * Functions to copy from and to user space.
  5. */
  6. #include <asm/current.h>
  7. #include <asm/asm-offsets.h>
  8. #include <asm/thread_info.h>
  9. /* Standard copy_to_user with segment limit checking */
  10. .globl copy_to_user
  11. .p2align 4
  12. copy_to_user:
  13. GET_THREAD_INFO(%rax)
  14. movq %rdi,%rcx
  15. addq %rdx,%rcx
  16. jc bad_to_user
  17. cmpq threadinfo_addr_limit(%rax),%rcx
  18. jae bad_to_user
  19. jmp copy_user_generic
  20. /* Standard copy_from_user with segment limit checking */
  21. .globl copy_from_user
  22. .p2align 4
  23. copy_from_user:
  24. GET_THREAD_INFO(%rax)
  25. movq %rsi,%rcx
  26. addq %rdx,%rcx
  27. jc bad_from_user
  28. cmpq threadinfo_addr_limit(%rax),%rcx
  29. jae bad_from_user
  30. /* FALL THROUGH to copy_user_generic */
  31. .section .fixup,"ax"
  32. /* must zero dest */
  33. bad_from_user:
  34. movl %edx,%ecx
  35. xorl %eax,%eax
  36. rep
  37. stosb
  38. bad_to_user:
  39. movl %edx,%eax
  40. ret
  41. .previous
  42. /*
  43. * copy_user_generic - memory copy with exception handling.
  44. *
  45. * Input:
  46. * rdi destination
  47. * rsi source
  48. * rdx count
  49. *
  50. * Only 4GB of copy is supported. This shouldn't be a problem
  51. * because the kernel normally only writes from/to page sized chunks
  52. * even if user space passed a longer buffer.
  53. * And more would be dangerous because both Intel and AMD have
  54. * errata with rep movsq > 4GB. If someone feels the need to fix
  55. * this please consider this.
  56. *
  57. * Output:
  58. * eax uncopied bytes or 0 if successful.
  59. */
  60. .globl copy_user_generic
  61. copy_user_generic:
  62. movl %edx,%ecx
  63. shrl $3,%ecx
  64. andl $7,%edx
  65. jz 5f
  66. 1: rep
  67. movsq
  68. movl %edx,%ecx
  69. xor %eax,%eax
  70. 2: rep
  71. movsb
  72. ret
  73. /* align here? */
  74. 5: xorl %eax,%eax
  75. 6: rep movsq
  76. ret
  77. .section .fixup,"ax"
  78. 3: lea (%rdx,%rcx,8),%rax
  79. ret
  80. 4: movl %ecx,%eax
  81. ret
  82. .previous
  83. .section __ex_table,"a"
  84. .quad 1b,3b
  85. .quad 2b,4b
  86. .quad 6b,4b
  87. .previous