uaccess.h 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. #ifndef __LINUX_UACCESS_H__
  2. #define __LINUX_UACCESS_H__
  3. #include <linux/preempt.h>
  4. #include <asm/uaccess.h>
  5. /*
  6. * These routines enable/disable the pagefault handler in that
  7. * it will not take any locks and go straight to the fixup table.
  8. *
  9. * They have great resemblance to the preempt_disable/enable calls
  10. * and in fact they are identical; this is because currently there is
  11. * no other way to make the pagefault handlers do this. So we do
  12. * disable preemption but we don't necessarily care about that.
  13. */
  14. static inline void pagefault_disable(void)
  15. {
  16. inc_preempt_count();
  17. /*
  18. * make sure to have issued the store before a pagefault
  19. * can hit.
  20. */
  21. barrier();
  22. }
  23. static inline void pagefault_enable(void)
  24. {
  25. /*
  26. * make sure to issue those last loads/stores before enabling
  27. * the pagefault handler again.
  28. */
  29. barrier();
  30. dec_preempt_count();
  31. /*
  32. * make sure we do..
  33. */
  34. barrier();
  35. preempt_check_resched();
  36. }
  37. #ifndef ARCH_HAS_NOCACHE_UACCESS
  38. static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
  39. const void __user *from, unsigned long n)
  40. {
  41. return __copy_from_user_inatomic(to, from, n);
  42. }
  43. static inline unsigned long __copy_from_user_nocache(void *to,
  44. const void __user *from, unsigned long n)
  45. {
  46. return __copy_from_user(to, from, n);
  47. }
  48. #endif /* ARCH_HAS_NOCACHE_UACCESS */
  49. /**
  50. * probe_kernel_address(): safely attempt to read from a location
  51. * @addr: address to read from - its type is type typeof(retval)*
  52. * @retval: read into this variable
  53. *
  54. * Safely read from address @addr into variable @revtal. If a kernel fault
  55. * happens, handle that and return -EFAULT.
  56. * We ensure that the __get_user() is executed in atomic context so that
  57. * do_page_fault() doesn't attempt to take mmap_sem. This makes
  58. * probe_kernel_address() suitable for use within regions where the caller
  59. * already holds mmap_sem, or other locks which nest inside mmap_sem.
  60. */
  61. #define probe_kernel_address(addr, retval) \
  62. ({ \
  63. long ret; \
  64. \
  65. pagefault_disable(); \
  66. ret = __get_user(retval, addr); \
  67. pagefault_enable(); \
  68. ret; \
  69. })
  70. #endif /* __LINUX_UACCESS_H__ */