uaccess.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #ifndef __LINUX_UACCESS_H__
  2. #define __LINUX_UACCESS_H__
  3. #include <linux/preempt.h>
  4. #include <asm/uaccess.h>
  5. /*
  6. * These routines enable/disable the pagefault handler in that
  7. * it will not take any locks and go straight to the fixup table.
  8. *
  9. * They have great resemblance to the preempt_disable/enable calls
  10. * and in fact they are identical; this is because currently there is
  11. * no other way to make the pagefault handlers do this. So we do
  12. * disable preemption but we don't necessarily care about that.
  13. */
  14. static inline void pagefault_disable(void)
  15. {
  16. preempt_count_inc();
  17. /*
  18. * make sure to have issued the store before a pagefault
  19. * can hit.
  20. */
  21. barrier();
  22. }
  23. static inline void pagefault_enable(void)
  24. {
  25. /*
  26. * make sure to issue those last loads/stores before enabling
  27. * the pagefault handler again.
  28. */
  29. barrier();
  30. preempt_count_dec();
  31. preempt_check_resched();
  32. }
  33. #ifndef ARCH_HAS_NOCACHE_UACCESS
  34. static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
  35. const void __user *from, unsigned long n)
  36. {
  37. return __copy_from_user_inatomic(to, from, n);
  38. }
  39. static inline unsigned long __copy_from_user_nocache(void *to,
  40. const void __user *from, unsigned long n)
  41. {
  42. return __copy_from_user(to, from, n);
  43. }
  44. #endif /* ARCH_HAS_NOCACHE_UACCESS */
  45. /**
  46. * probe_kernel_address(): safely attempt to read from a location
  47. * @addr: address to read from - its type is type typeof(retval)*
  48. * @retval: read into this variable
  49. *
  50. * Safely read from address @addr into variable @revtal. If a kernel fault
  51. * happens, handle that and return -EFAULT.
  52. * We ensure that the __get_user() is executed in atomic context so that
  53. * do_page_fault() doesn't attempt to take mmap_sem. This makes
  54. * probe_kernel_address() suitable for use within regions where the caller
  55. * already holds mmap_sem, or other locks which nest inside mmap_sem.
  56. * This must be a macro because __get_user() needs to know the types of the
  57. * args.
  58. *
  59. * We don't include enough header files to be able to do the set_fs(). We
  60. * require that the probe_kernel_address() caller will do that.
  61. */
  62. #define probe_kernel_address(addr, retval) \
  63. ({ \
  64. long ret; \
  65. mm_segment_t old_fs = get_fs(); \
  66. \
  67. set_fs(KERNEL_DS); \
  68. pagefault_disable(); \
  69. ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
  70. pagefault_enable(); \
  71. set_fs(old_fs); \
  72. ret; \
  73. })
  74. /*
  75. * probe_kernel_read(): safely attempt to read from a location
  76. * @dst: pointer to the buffer that shall take the data
  77. * @src: address to read from
  78. * @size: size of the data chunk
  79. *
  80. * Safely read from address @src to the buffer at @dst. If a kernel fault
  81. * happens, handle that and return -EFAULT.
  82. */
  83. extern long probe_kernel_read(void *dst, const void *src, size_t size);
  84. extern long __probe_kernel_read(void *dst, const void *src, size_t size);
  85. /*
  86. * probe_kernel_write(): safely attempt to write to a location
  87. * @dst: address to write to
  88. * @src: pointer to the data that shall be written
  89. * @size: size of the data chunk
  90. *
  91. * Safely write to address @dst from the buffer at @src. If a kernel fault
  92. * happens, handle that and return -EFAULT.
  93. */
  94. extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
  95. extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
  96. #endif /* __LINUX_UACCESS_H__ */