usercopy.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/config.h>
  9. #include <linux/mm.h>
  10. #include <linux/highmem.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/module.h>
  13. #include <asm/uaccess.h>
  14. #include <asm/mmx.h>
  15. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  16. {
  17. #ifdef CONFIG_X86_INTEL_USERCOPY
  18. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  19. return 0;
  20. #endif
  21. return 1;
  22. }
  23. #define movsl_is_ok(a1,a2,n) \
  24. __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
  25. /*
  26. * Copy a null terminated string from userspace.
  27. */
  28. #define __do_strncpy_from_user(dst,src,count,res) \
  29. do { \
  30. int __d0, __d1, __d2; \
  31. might_sleep(); \
  32. __asm__ __volatile__( \
  33. " testl %1,%1\n" \
  34. " jz 2f\n" \
  35. "0: lodsb\n" \
  36. " stosb\n" \
  37. " testb %%al,%%al\n" \
  38. " jz 1f\n" \
  39. " decl %1\n" \
  40. " jnz 0b\n" \
  41. "1: subl %1,%0\n" \
  42. "2:\n" \
  43. ".section .fixup,\"ax\"\n" \
  44. "3: movl %5,%0\n" \
  45. " jmp 2b\n" \
  46. ".previous\n" \
  47. ".section __ex_table,\"a\"\n" \
  48. " .align 4\n" \
  49. " .long 0b,3b\n" \
  50. ".previous" \
  51. : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
  52. "=&D" (__d2) \
  53. : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
  54. : "memory"); \
  55. } while (0)
  56. /**
  57. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  58. * @dst: Destination address, in kernel space. This buffer must be at
  59. * least @count bytes long.
  60. * @src: Source address, in user space.
  61. * @count: Maximum number of bytes to copy, including the trailing NUL.
  62. *
  63. * Copies a NUL-terminated string from userspace to kernel space.
  64. * Caller must check the specified block with access_ok() before calling
  65. * this function.
  66. *
  67. * On success, returns the length of the string (not including the trailing
  68. * NUL).
  69. *
  70. * If access to userspace fails, returns -EFAULT (some data may have been
  71. * copied).
  72. *
  73. * If @count is smaller than the length of the string, copies @count bytes
  74. * and returns @count.
  75. */
  76. long
  77. __strncpy_from_user(char *dst, const char __user *src, long count)
  78. {
  79. long res;
  80. __do_strncpy_from_user(dst, src, count, res);
  81. return res;
  82. }
  83. /**
  84. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  85. * @dst: Destination address, in kernel space. This buffer must be at
  86. * least @count bytes long.
  87. * @src: Source address, in user space.
  88. * @count: Maximum number of bytes to copy, including the trailing NUL.
  89. *
  90. * Copies a NUL-terminated string from userspace to kernel space.
  91. *
  92. * On success, returns the length of the string (not including the trailing
  93. * NUL).
  94. *
  95. * If access to userspace fails, returns -EFAULT (some data may have been
  96. * copied).
  97. *
  98. * If @count is smaller than the length of the string, copies @count bytes
  99. * and returns @count.
  100. */
  101. long
  102. strncpy_from_user(char *dst, const char __user *src, long count)
  103. {
  104. long res = -EFAULT;
  105. if (access_ok(VERIFY_READ, src, 1))
  106. __do_strncpy_from_user(dst, src, count, res);
  107. return res;
  108. }
  109. /*
  110. * Zero Userspace
  111. */
  112. #define __do_clear_user(addr,size) \
  113. do { \
  114. int __d0; \
  115. might_sleep(); \
  116. __asm__ __volatile__( \
  117. "0: rep; stosl\n" \
  118. " movl %2,%0\n" \
  119. "1: rep; stosb\n" \
  120. "2:\n" \
  121. ".section .fixup,\"ax\"\n" \
  122. "3: lea 0(%2,%0,4),%0\n" \
  123. " jmp 2b\n" \
  124. ".previous\n" \
  125. ".section __ex_table,\"a\"\n" \
  126. " .align 4\n" \
  127. " .long 0b,3b\n" \
  128. " .long 1b,2b\n" \
  129. ".previous" \
  130. : "=&c"(size), "=&D" (__d0) \
  131. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  132. } while (0)
  133. /**
  134. * clear_user: - Zero a block of memory in user space.
  135. * @to: Destination address, in user space.
  136. * @n: Number of bytes to zero.
  137. *
  138. * Zero a block of memory in user space.
  139. *
  140. * Returns number of bytes that could not be cleared.
  141. * On success, this will be zero.
  142. */
  143. unsigned long
  144. clear_user(void __user *to, unsigned long n)
  145. {
  146. might_sleep();
  147. if (access_ok(VERIFY_WRITE, to, n))
  148. __do_clear_user(to, n);
  149. return n;
  150. }
  151. /**
  152. * __clear_user: - Zero a block of memory in user space, with less checking.
  153. * @to: Destination address, in user space.
  154. * @n: Number of bytes to zero.
  155. *
  156. * Zero a block of memory in user space. Caller must check
  157. * the specified block with access_ok() before calling this function.
  158. *
  159. * Returns number of bytes that could not be cleared.
  160. * On success, this will be zero.
  161. */
  162. unsigned long
  163. __clear_user(void __user *to, unsigned long n)
  164. {
  165. __do_clear_user(to, n);
  166. return n;
  167. }
  168. /**
  169. * strlen_user: - Get the size of a string in user space.
  170. * @s: The string to measure.
  171. * @n: The maximum valid length
  172. *
  173. * Get the size of a NUL-terminated string in user space.
  174. *
  175. * Returns the size of the string INCLUDING the terminating NUL.
  176. * On exception, returns 0.
  177. * If the string is too long, returns a value greater than @n.
  178. */
  179. long strnlen_user(const char __user *s, long n)
  180. {
  181. unsigned long mask = -__addr_ok(s);
  182. unsigned long res, tmp;
  183. might_sleep();
  184. __asm__ __volatile__(
  185. " testl %0, %0\n"
  186. " jz 3f\n"
  187. " andl %0,%%ecx\n"
  188. "0: repne; scasb\n"
  189. " setne %%al\n"
  190. " subl %%ecx,%0\n"
  191. " addl %0,%%eax\n"
  192. "1:\n"
  193. ".section .fixup,\"ax\"\n"
  194. "2: xorl %%eax,%%eax\n"
  195. " jmp 1b\n"
  196. "3: movb $1,%%al\n"
  197. " jmp 1b\n"
  198. ".previous\n"
  199. ".section __ex_table,\"a\"\n"
  200. " .align 4\n"
  201. " .long 0b,2b\n"
  202. ".previous"
  203. :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
  204. :"0" (n), "1" (s), "2" (0), "3" (mask)
  205. :"cc");
  206. return res & mask;
  207. }
  208. #ifdef CONFIG_X86_INTEL_USERCOPY
  209. static unsigned long
  210. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  211. {
  212. int d0, d1;
  213. __asm__ __volatile__(
  214. " .align 2,0x90\n"
  215. "1: movl 32(%4), %%eax\n"
  216. " cmpl $67, %0\n"
  217. " jbe 3f\n"
  218. "2: movl 64(%4), %%eax\n"
  219. " .align 2,0x90\n"
  220. "3: movl 0(%4), %%eax\n"
  221. "4: movl 4(%4), %%edx\n"
  222. "5: movl %%eax, 0(%3)\n"
  223. "6: movl %%edx, 4(%3)\n"
  224. "7: movl 8(%4), %%eax\n"
  225. "8: movl 12(%4),%%edx\n"
  226. "9: movl %%eax, 8(%3)\n"
  227. "10: movl %%edx, 12(%3)\n"
  228. "11: movl 16(%4), %%eax\n"
  229. "12: movl 20(%4), %%edx\n"
  230. "13: movl %%eax, 16(%3)\n"
  231. "14: movl %%edx, 20(%3)\n"
  232. "15: movl 24(%4), %%eax\n"
  233. "16: movl 28(%4), %%edx\n"
  234. "17: movl %%eax, 24(%3)\n"
  235. "18: movl %%edx, 28(%3)\n"
  236. "19: movl 32(%4), %%eax\n"
  237. "20: movl 36(%4), %%edx\n"
  238. "21: movl %%eax, 32(%3)\n"
  239. "22: movl %%edx, 36(%3)\n"
  240. "23: movl 40(%4), %%eax\n"
  241. "24: movl 44(%4), %%edx\n"
  242. "25: movl %%eax, 40(%3)\n"
  243. "26: movl %%edx, 44(%3)\n"
  244. "27: movl 48(%4), %%eax\n"
  245. "28: movl 52(%4), %%edx\n"
  246. "29: movl %%eax, 48(%3)\n"
  247. "30: movl %%edx, 52(%3)\n"
  248. "31: movl 56(%4), %%eax\n"
  249. "32: movl 60(%4), %%edx\n"
  250. "33: movl %%eax, 56(%3)\n"
  251. "34: movl %%edx, 60(%3)\n"
  252. " addl $-64, %0\n"
  253. " addl $64, %4\n"
  254. " addl $64, %3\n"
  255. " cmpl $63, %0\n"
  256. " ja 1b\n"
  257. "35: movl %0, %%eax\n"
  258. " shrl $2, %0\n"
  259. " andl $3, %%eax\n"
  260. " cld\n"
  261. "99: rep; movsl\n"
  262. "36: movl %%eax, %0\n"
  263. "37: rep; movsb\n"
  264. "100:\n"
  265. ".section .fixup,\"ax\"\n"
  266. "101: lea 0(%%eax,%0,4),%0\n"
  267. " jmp 100b\n"
  268. ".previous\n"
  269. ".section __ex_table,\"a\"\n"
  270. " .align 4\n"
  271. " .long 1b,100b\n"
  272. " .long 2b,100b\n"
  273. " .long 3b,100b\n"
  274. " .long 4b,100b\n"
  275. " .long 5b,100b\n"
  276. " .long 6b,100b\n"
  277. " .long 7b,100b\n"
  278. " .long 8b,100b\n"
  279. " .long 9b,100b\n"
  280. " .long 10b,100b\n"
  281. " .long 11b,100b\n"
  282. " .long 12b,100b\n"
  283. " .long 13b,100b\n"
  284. " .long 14b,100b\n"
  285. " .long 15b,100b\n"
  286. " .long 16b,100b\n"
  287. " .long 17b,100b\n"
  288. " .long 18b,100b\n"
  289. " .long 19b,100b\n"
  290. " .long 20b,100b\n"
  291. " .long 21b,100b\n"
  292. " .long 22b,100b\n"
  293. " .long 23b,100b\n"
  294. " .long 24b,100b\n"
  295. " .long 25b,100b\n"
  296. " .long 26b,100b\n"
  297. " .long 27b,100b\n"
  298. " .long 28b,100b\n"
  299. " .long 29b,100b\n"
  300. " .long 30b,100b\n"
  301. " .long 31b,100b\n"
  302. " .long 32b,100b\n"
  303. " .long 33b,100b\n"
  304. " .long 34b,100b\n"
  305. " .long 35b,100b\n"
  306. " .long 36b,100b\n"
  307. " .long 37b,100b\n"
  308. " .long 99b,101b\n"
  309. ".previous"
  310. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  311. : "1"(to), "2"(from), "0"(size)
  312. : "eax", "edx", "memory");
  313. return size;
  314. }
  315. static unsigned long
  316. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  317. {
  318. int d0, d1;
  319. __asm__ __volatile__(
  320. " .align 2,0x90\n"
  321. "0: movl 32(%4), %%eax\n"
  322. " cmpl $67, %0\n"
  323. " jbe 2f\n"
  324. "1: movl 64(%4), %%eax\n"
  325. " .align 2,0x90\n"
  326. "2: movl 0(%4), %%eax\n"
  327. "21: movl 4(%4), %%edx\n"
  328. " movl %%eax, 0(%3)\n"
  329. " movl %%edx, 4(%3)\n"
  330. "3: movl 8(%4), %%eax\n"
  331. "31: movl 12(%4),%%edx\n"
  332. " movl %%eax, 8(%3)\n"
  333. " movl %%edx, 12(%3)\n"
  334. "4: movl 16(%4), %%eax\n"
  335. "41: movl 20(%4), %%edx\n"
  336. " movl %%eax, 16(%3)\n"
  337. " movl %%edx, 20(%3)\n"
  338. "10: movl 24(%4), %%eax\n"
  339. "51: movl 28(%4), %%edx\n"
  340. " movl %%eax, 24(%3)\n"
  341. " movl %%edx, 28(%3)\n"
  342. "11: movl 32(%4), %%eax\n"
  343. "61: movl 36(%4), %%edx\n"
  344. " movl %%eax, 32(%3)\n"
  345. " movl %%edx, 36(%3)\n"
  346. "12: movl 40(%4), %%eax\n"
  347. "71: movl 44(%4), %%edx\n"
  348. " movl %%eax, 40(%3)\n"
  349. " movl %%edx, 44(%3)\n"
  350. "13: movl 48(%4), %%eax\n"
  351. "81: movl 52(%4), %%edx\n"
  352. " movl %%eax, 48(%3)\n"
  353. " movl %%edx, 52(%3)\n"
  354. "14: movl 56(%4), %%eax\n"
  355. "91: movl 60(%4), %%edx\n"
  356. " movl %%eax, 56(%3)\n"
  357. " movl %%edx, 60(%3)\n"
  358. " addl $-64, %0\n"
  359. " addl $64, %4\n"
  360. " addl $64, %3\n"
  361. " cmpl $63, %0\n"
  362. " ja 0b\n"
  363. "5: movl %0, %%eax\n"
  364. " shrl $2, %0\n"
  365. " andl $3, %%eax\n"
  366. " cld\n"
  367. "6: rep; movsl\n"
  368. " movl %%eax,%0\n"
  369. "7: rep; movsb\n"
  370. "8:\n"
  371. ".section .fixup,\"ax\"\n"
  372. "9: lea 0(%%eax,%0,4),%0\n"
  373. "16: pushl %0\n"
  374. " pushl %%eax\n"
  375. " xorl %%eax,%%eax\n"
  376. " rep; stosb\n"
  377. " popl %%eax\n"
  378. " popl %0\n"
  379. " jmp 8b\n"
  380. ".previous\n"
  381. ".section __ex_table,\"a\"\n"
  382. " .align 4\n"
  383. " .long 0b,16b\n"
  384. " .long 1b,16b\n"
  385. " .long 2b,16b\n"
  386. " .long 21b,16b\n"
  387. " .long 3b,16b\n"
  388. " .long 31b,16b\n"
  389. " .long 4b,16b\n"
  390. " .long 41b,16b\n"
  391. " .long 10b,16b\n"
  392. " .long 51b,16b\n"
  393. " .long 11b,16b\n"
  394. " .long 61b,16b\n"
  395. " .long 12b,16b\n"
  396. " .long 71b,16b\n"
  397. " .long 13b,16b\n"
  398. " .long 81b,16b\n"
  399. " .long 14b,16b\n"
  400. " .long 91b,16b\n"
  401. " .long 6b,9b\n"
  402. " .long 7b,16b\n"
  403. ".previous"
  404. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  405. : "1"(to), "2"(from), "0"(size)
  406. : "eax", "edx", "memory");
  407. return size;
  408. }
  409. #else
  410. /*
  411. * Leave these declared but undefined. They should not be any references to
  412. * them
  413. */
  414. unsigned long
  415. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
  416. unsigned long
  417. __copy_user_intel(void __user *to, const void *from, unsigned long size);
  418. #endif /* CONFIG_X86_INTEL_USERCOPY */
  419. /* Generic arbitrary sized copy. */
  420. #define __copy_user(to,from,size) \
  421. do { \
  422. int __d0, __d1, __d2; \
  423. __asm__ __volatile__( \
  424. " cmp $7,%0\n" \
  425. " jbe 1f\n" \
  426. " movl %1,%0\n" \
  427. " negl %0\n" \
  428. " andl $7,%0\n" \
  429. " subl %0,%3\n" \
  430. "4: rep; movsb\n" \
  431. " movl %3,%0\n" \
  432. " shrl $2,%0\n" \
  433. " andl $3,%3\n" \
  434. " .align 2,0x90\n" \
  435. "0: rep; movsl\n" \
  436. " movl %3,%0\n" \
  437. "1: rep; movsb\n" \
  438. "2:\n" \
  439. ".section .fixup,\"ax\"\n" \
  440. "5: addl %3,%0\n" \
  441. " jmp 2b\n" \
  442. "3: lea 0(%3,%0,4),%0\n" \
  443. " jmp 2b\n" \
  444. ".previous\n" \
  445. ".section __ex_table,\"a\"\n" \
  446. " .align 4\n" \
  447. " .long 4b,5b\n" \
  448. " .long 0b,3b\n" \
  449. " .long 1b,2b\n" \
  450. ".previous" \
  451. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  452. : "3"(size), "0"(size), "1"(to), "2"(from) \
  453. : "memory"); \
  454. } while (0)
  455. #define __copy_user_zeroing(to,from,size) \
  456. do { \
  457. int __d0, __d1, __d2; \
  458. __asm__ __volatile__( \
  459. " cmp $7,%0\n" \
  460. " jbe 1f\n" \
  461. " movl %1,%0\n" \
  462. " negl %0\n" \
  463. " andl $7,%0\n" \
  464. " subl %0,%3\n" \
  465. "4: rep; movsb\n" \
  466. " movl %3,%0\n" \
  467. " shrl $2,%0\n" \
  468. " andl $3,%3\n" \
  469. " .align 2,0x90\n" \
  470. "0: rep; movsl\n" \
  471. " movl %3,%0\n" \
  472. "1: rep; movsb\n" \
  473. "2:\n" \
  474. ".section .fixup,\"ax\"\n" \
  475. "5: addl %3,%0\n" \
  476. " jmp 6f\n" \
  477. "3: lea 0(%3,%0,4),%0\n" \
  478. "6: pushl %0\n" \
  479. " pushl %%eax\n" \
  480. " xorl %%eax,%%eax\n" \
  481. " rep; stosb\n" \
  482. " popl %%eax\n" \
  483. " popl %0\n" \
  484. " jmp 2b\n" \
  485. ".previous\n" \
  486. ".section __ex_table,\"a\"\n" \
  487. " .align 4\n" \
  488. " .long 4b,5b\n" \
  489. " .long 0b,3b\n" \
  490. " .long 1b,6b\n" \
  491. ".previous" \
  492. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  493. : "3"(size), "0"(size), "1"(to), "2"(from) \
  494. : "memory"); \
  495. } while (0)
  496. unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
  497. {
  498. BUG_ON((long) n < 0);
  499. #ifndef CONFIG_X86_WP_WORKS_OK
  500. if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
  501. ((unsigned long )to) < TASK_SIZE) {
  502. /*
  503. * CPU does not honor the WP bit when writing
  504. * from supervisory mode, and due to preemption or SMP,
  505. * the page tables can change at any time.
  506. * Do it manually. Manfred <manfred@colorfullife.com>
  507. */
  508. while (n) {
  509. unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
  510. unsigned long len = PAGE_SIZE - offset;
  511. int retval;
  512. struct page *pg;
  513. void *maddr;
  514. if (len > n)
  515. len = n;
  516. survive:
  517. down_read(&current->mm->mmap_sem);
  518. retval = get_user_pages(current, current->mm,
  519. (unsigned long )to, 1, 1, 0, &pg, NULL);
  520. if (retval == -ENOMEM && current->pid == 1) {
  521. up_read(&current->mm->mmap_sem);
  522. blk_congestion_wait(WRITE, HZ/50);
  523. goto survive;
  524. }
  525. if (retval != 1) {
  526. up_read(&current->mm->mmap_sem);
  527. break;
  528. }
  529. maddr = kmap_atomic(pg, KM_USER0);
  530. memcpy(maddr + offset, from, len);
  531. kunmap_atomic(maddr, KM_USER0);
  532. set_page_dirty_lock(pg);
  533. put_page(pg);
  534. up_read(&current->mm->mmap_sem);
  535. from += len;
  536. to += len;
  537. n -= len;
  538. }
  539. return n;
  540. }
  541. #endif
  542. if (movsl_is_ok(to, from, n))
  543. __copy_user(to, from, n);
  544. else
  545. n = __copy_user_intel(to, from, n);
  546. return n;
  547. }
  548. unsigned long
  549. __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
  550. {
  551. BUG_ON((long)n < 0);
  552. if (movsl_is_ok(to, from, n))
  553. __copy_user_zeroing(to, from, n);
  554. else
  555. n = __copy_user_zeroing_intel(to, from, n);
  556. return n;
  557. }
  558. /**
  559. * copy_to_user: - Copy a block of data into user space.
  560. * @to: Destination address, in user space.
  561. * @from: Source address, in kernel space.
  562. * @n: Number of bytes to copy.
  563. *
  564. * Context: User context only. This function may sleep.
  565. *
  566. * Copy data from kernel space to user space.
  567. *
  568. * Returns number of bytes that could not be copied.
  569. * On success, this will be zero.
  570. */
  571. unsigned long
  572. copy_to_user(void __user *to, const void *from, unsigned long n)
  573. {
  574. might_sleep();
  575. BUG_ON((long) n < 0);
  576. if (access_ok(VERIFY_WRITE, to, n))
  577. n = __copy_to_user(to, from, n);
  578. return n;
  579. }
  580. EXPORT_SYMBOL(copy_to_user);
  581. /**
  582. * copy_from_user: - Copy a block of data from user space.
  583. * @to: Destination address, in kernel space.
  584. * @from: Source address, in user space.
  585. * @n: Number of bytes to copy.
  586. *
  587. * Context: User context only. This function may sleep.
  588. *
  589. * Copy data from user space to kernel space.
  590. *
  591. * Returns number of bytes that could not be copied.
  592. * On success, this will be zero.
  593. *
  594. * If some data could not be copied, this function will pad the copied
  595. * data to the requested size using zero bytes.
  596. */
  597. unsigned long
  598. copy_from_user(void *to, const void __user *from, unsigned long n)
  599. {
  600. might_sleep();
  601. BUG_ON((long) n < 0);
  602. if (access_ok(VERIFY_READ, from, n))
  603. n = __copy_from_user(to, from, n);
  604. else
  605. memset(to, 0, n);
  606. return n;
  607. }
  608. EXPORT_SYMBOL(copy_from_user);