uaccess.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/err.h"
  6. #include "linux/highmem.h"
  7. #include "linux/mm.h"
  8. #include "asm/current.h"
  9. #include "asm/page.h"
  10. #include "asm/pgtable.h"
  11. #include "kern_util.h"
  12. #include "os.h"
  13. static void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
  14. pte_t *pte_out)
  15. {
  16. pgd_t *pgd;
  17. pud_t *pud;
  18. pmd_t *pmd;
  19. pte_t *pte;
  20. pte_t ptent;
  21. if (task->mm == NULL)
  22. return ERR_PTR(-EINVAL);
  23. pgd = pgd_offset(task->mm, addr);
  24. if (!pgd_present(*pgd))
  25. return ERR_PTR(-EINVAL);
  26. pud = pud_offset(pgd, addr);
  27. if (!pud_present(*pud))
  28. return ERR_PTR(-EINVAL);
  29. pmd = pmd_offset(pud, addr);
  30. if (!pmd_present(*pmd))
  31. return ERR_PTR(-EINVAL);
  32. pte = pte_offset_kernel(pmd, addr);
  33. ptent = *pte;
  34. if (!pte_present(ptent))
  35. return ERR_PTR(-EINVAL);
  36. if (pte_out != NULL)
  37. *pte_out = ptent;
  38. return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
  39. }
  40. static unsigned long maybe_map(unsigned long virt, int is_write)
  41. {
  42. pte_t pte;
  43. int err;
  44. void *phys = um_virt_to_phys(current, virt, &pte);
  45. int dummy_code;
  46. if (IS_ERR(phys) || (is_write && !pte_write(pte))) {
  47. err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
  48. if (err)
  49. return -1UL;
  50. phys = um_virt_to_phys(current, virt, NULL);
  51. }
  52. if (IS_ERR(phys))
  53. phys = (void *) -1;
  54. return (unsigned long) phys;
  55. }
  56. static int do_op_one_page(unsigned long addr, int len, int is_write,
  57. int (*op)(unsigned long addr, int len, void *arg), void *arg)
  58. {
  59. struct page *page;
  60. int n;
  61. addr = maybe_map(addr, is_write);
  62. if (addr == -1UL)
  63. return -1;
  64. page = phys_to_page(addr);
  65. addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
  66. (addr & ~PAGE_MASK);
  67. n = (*op)(addr, len, arg);
  68. kunmap_atomic(page, KM_UML_USERCOPY);
  69. return n;
  70. }
  71. static void do_buffer_op(void *jmpbuf, void *arg_ptr)
  72. {
  73. va_list args;
  74. unsigned long addr;
  75. int len, is_write, size, remain, n;
  76. int (*op)(unsigned long, int, void *);
  77. void *arg;
  78. int *res;
  79. va_copy(args, *(va_list *)arg_ptr);
  80. addr = va_arg(args, unsigned long);
  81. len = va_arg(args, int);
  82. is_write = va_arg(args, int);
  83. op = va_arg(args, void *);
  84. arg = va_arg(args, void *);
  85. res = va_arg(args, int *);
  86. va_end(args);
  87. size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
  88. remain = len;
  89. current->thread.fault_catcher = jmpbuf;
  90. n = do_op_one_page(addr, size, is_write, op, arg);
  91. if (n != 0) {
  92. *res = (n < 0 ? remain : 0);
  93. goto out;
  94. }
  95. addr += size;
  96. remain -= size;
  97. if (remain == 0) {
  98. *res = 0;
  99. goto out;
  100. }
  101. while(addr < ((addr + remain) & PAGE_MASK)) {
  102. n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
  103. if (n != 0) {
  104. *res = (n < 0 ? remain : 0);
  105. goto out;
  106. }
  107. addr += PAGE_SIZE;
  108. remain -= PAGE_SIZE;
  109. }
  110. if (remain == 0) {
  111. *res = 0;
  112. goto out;
  113. }
  114. n = do_op_one_page(addr, remain, is_write, op, arg);
  115. if (n != 0)
  116. *res = (n < 0 ? remain : 0);
  117. else *res = 0;
  118. out:
  119. current->thread.fault_catcher = NULL;
  120. }
  121. static int buffer_op(unsigned long addr, int len, int is_write,
  122. int (*op)(unsigned long addr, int len, void *arg),
  123. void *arg)
  124. {
  125. int faulted, res;
  126. faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
  127. &res);
  128. if (!faulted)
  129. return res;
  130. return addr + len - (unsigned long) current->thread.fault_addr;
  131. }
  132. static int copy_chunk_from_user(unsigned long from, int len, void *arg)
  133. {
  134. unsigned long *to_ptr = arg, to = *to_ptr;
  135. memcpy((void *) to, (void *) from, len);
  136. *to_ptr += len;
  137. return 0;
  138. }
  139. int copy_from_user(void *to, const void __user *from, int n)
  140. {
  141. if (segment_eq(get_fs(), KERNEL_DS)) {
  142. memcpy(to, (__force void*)from, n);
  143. return 0;
  144. }
  145. return access_ok(VERIFY_READ, from, n) ?
  146. buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to):
  147. n;
  148. }
  149. static int copy_chunk_to_user(unsigned long to, int len, void *arg)
  150. {
  151. unsigned long *from_ptr = arg, from = *from_ptr;
  152. memcpy((void *) to, (void *) from, len);
  153. *from_ptr += len;
  154. return 0;
  155. }
  156. int copy_to_user(void __user *to, const void *from, int n)
  157. {
  158. if (segment_eq(get_fs(), KERNEL_DS)) {
  159. memcpy((__force void *) to, from, n);
  160. return 0;
  161. }
  162. return access_ok(VERIFY_WRITE, to, n) ?
  163. buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from) :
  164. n;
  165. }
  166. static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
  167. {
  168. char **to_ptr = arg, *to = *to_ptr;
  169. int n;
  170. strncpy(to, (void *) from, len);
  171. n = strnlen(to, len);
  172. *to_ptr += n;
  173. if (n < len)
  174. return 1;
  175. return 0;
  176. }
  177. int strncpy_from_user(char *dst, const char __user *src, int count)
  178. {
  179. int n;
  180. char *ptr = dst;
  181. if (segment_eq(get_fs(), KERNEL_DS)) {
  182. strncpy(dst, (__force void *) src, count);
  183. return strnlen(dst, count);
  184. }
  185. if (!access_ok(VERIFY_READ, src, 1))
  186. return -EFAULT;
  187. n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
  188. &ptr);
  189. if (n != 0)
  190. return -EFAULT;
  191. return strnlen(dst, count);
  192. }
  193. static int clear_chunk(unsigned long addr, int len, void *unused)
  194. {
  195. memset((void *) addr, 0, len);
  196. return 0;
  197. }
  198. int __clear_user(void __user *mem, int len)
  199. {
  200. return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
  201. }
  202. int clear_user(void __user *mem, int len)
  203. {
  204. if (segment_eq(get_fs(), KERNEL_DS)) {
  205. memset((__force void*)mem, 0, len);
  206. return 0;
  207. }
  208. return access_ok(VERIFY_WRITE, mem, len) ?
  209. buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL) : len;
  210. }
  211. static int strnlen_chunk(unsigned long str, int len, void *arg)
  212. {
  213. int *len_ptr = arg, n;
  214. n = strnlen((void *) str, len);
  215. *len_ptr += n;
  216. if (n < len)
  217. return 1;
  218. return 0;
  219. }
  220. int strnlen_user(const void __user *str, int len)
  221. {
  222. int count = 0, n;
  223. if (segment_eq(get_fs(), KERNEL_DS))
  224. return strnlen((__force char*)str, len) + 1;
  225. n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
  226. if (n == 0)
  227. return count + 1;
  228. return -EFAULT;
  229. }