usercopy_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/highmem.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/module.h>
  12. #include <linux/backing-dev.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/mmx.h>
  16. #ifdef CONFIG_X86_INTEL_USERCOPY
  17. /*
  18. * Alignment at which movsl is preferred for bulk memory copies.
  19. */
  20. struct movsl_mask movsl_mask __read_mostly;
  21. #endif
  22. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  23. {
  24. #ifdef CONFIG_X86_INTEL_USERCOPY
  25. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  26. return 0;
  27. #endif
  28. return 1;
  29. }
  30. #define movsl_is_ok(a1, a2, n) \
  31. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  32. /*
  33. * Copy a null terminated string from userspace.
  34. */
  35. #define __do_strncpy_from_user(dst, src, count, res) \
  36. do { \
  37. int __d0, __d1, __d2; \
  38. might_fault(); \
  39. __asm__ __volatile__( \
  40. " testl %1,%1\n" \
  41. " jz 2f\n" \
  42. "0: lodsb\n" \
  43. " stosb\n" \
  44. " testb %%al,%%al\n" \
  45. " jz 1f\n" \
  46. " decl %1\n" \
  47. " jnz 0b\n" \
  48. "1: subl %1,%0\n" \
  49. "2:\n" \
  50. ".section .fixup,\"ax\"\n" \
  51. "3: movl %5,%0\n" \
  52. " jmp 2b\n" \
  53. ".previous\n" \
  54. _ASM_EXTABLE(0b,3b) \
  55. : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
  56. "=&D" (__d2) \
  57. : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
  58. : "memory"); \
  59. } while (0)
  60. /**
  61. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  62. * @dst: Destination address, in kernel space. This buffer must be at
  63. * least @count bytes long.
  64. * @src: Source address, in user space.
  65. * @count: Maximum number of bytes to copy, including the trailing NUL.
  66. *
  67. * Copies a NUL-terminated string from userspace to kernel space.
  68. * Caller must check the specified block with access_ok() before calling
  69. * this function.
  70. *
  71. * On success, returns the length of the string (not including the trailing
  72. * NUL).
  73. *
  74. * If access to userspace fails, returns -EFAULT (some data may have been
  75. * copied).
  76. *
  77. * If @count is smaller than the length of the string, copies @count bytes
  78. * and returns @count.
  79. */
  80. long
  81. __strncpy_from_user(char *dst, const char __user *src, long count)
  82. {
  83. long res;
  84. __do_strncpy_from_user(dst, src, count, res);
  85. return res;
  86. }
  87. EXPORT_SYMBOL(__strncpy_from_user);
  88. /**
  89. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  90. * @dst: Destination address, in kernel space. This buffer must be at
  91. * least @count bytes long.
  92. * @src: Source address, in user space.
  93. * @count: Maximum number of bytes to copy, including the trailing NUL.
  94. *
  95. * Copies a NUL-terminated string from userspace to kernel space.
  96. *
  97. * On success, returns the length of the string (not including the trailing
  98. * NUL).
  99. *
  100. * If access to userspace fails, returns -EFAULT (some data may have been
  101. * copied).
  102. *
  103. * If @count is smaller than the length of the string, copies @count bytes
  104. * and returns @count.
  105. */
  106. long
  107. strncpy_from_user(char *dst, const char __user *src, long count)
  108. {
  109. long res = -EFAULT;
  110. if (access_ok(VERIFY_READ, src, 1))
  111. __do_strncpy_from_user(dst, src, count, res);
  112. return res;
  113. }
  114. EXPORT_SYMBOL(strncpy_from_user);
  115. /*
  116. * Zero Userspace
  117. */
  118. #define __do_clear_user(addr,size) \
  119. do { \
  120. int __d0; \
  121. might_fault(); \
  122. __asm__ __volatile__( \
  123. "0: rep; stosl\n" \
  124. " movl %2,%0\n" \
  125. "1: rep; stosb\n" \
  126. "2:\n" \
  127. ".section .fixup,\"ax\"\n" \
  128. "3: lea 0(%2,%0,4),%0\n" \
  129. " jmp 2b\n" \
  130. ".previous\n" \
  131. _ASM_EXTABLE(0b,3b) \
  132. _ASM_EXTABLE(1b,2b) \
  133. : "=&c"(size), "=&D" (__d0) \
  134. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  135. } while (0)
  136. /**
  137. * clear_user: - Zero a block of memory in user space.
  138. * @to: Destination address, in user space.
  139. * @n: Number of bytes to zero.
  140. *
  141. * Zero a block of memory in user space.
  142. *
  143. * Returns number of bytes that could not be cleared.
  144. * On success, this will be zero.
  145. */
  146. unsigned long
  147. clear_user(void __user *to, unsigned long n)
  148. {
  149. might_fault();
  150. if (access_ok(VERIFY_WRITE, to, n))
  151. __do_clear_user(to, n);
  152. return n;
  153. }
  154. EXPORT_SYMBOL(clear_user);
  155. /**
  156. * __clear_user: - Zero a block of memory in user space, with less checking.
  157. * @to: Destination address, in user space.
  158. * @n: Number of bytes to zero.
  159. *
  160. * Zero a block of memory in user space. Caller must check
  161. * the specified block with access_ok() before calling this function.
  162. *
  163. * Returns number of bytes that could not be cleared.
  164. * On success, this will be zero.
  165. */
  166. unsigned long
  167. __clear_user(void __user *to, unsigned long n)
  168. {
  169. __do_clear_user(to, n);
  170. return n;
  171. }
  172. EXPORT_SYMBOL(__clear_user);
  173. /**
  174. * strnlen_user: - Get the size of a string in user space.
  175. * @s: The string to measure.
  176. * @n: The maximum valid length
  177. *
  178. * Get the size of a NUL-terminated string in user space.
  179. *
  180. * Returns the size of the string INCLUDING the terminating NUL.
  181. * On exception, returns 0.
  182. * If the string is too long, returns a value greater than @n.
  183. */
  184. long strnlen_user(const char __user *s, long n)
  185. {
  186. unsigned long mask = -__addr_ok(s);
  187. unsigned long res, tmp;
  188. might_fault();
  189. __asm__ __volatile__(
  190. " testl %0, %0\n"
  191. " jz 3f\n"
  192. " andl %0,%%ecx\n"
  193. "0: repne; scasb\n"
  194. " setne %%al\n"
  195. " subl %%ecx,%0\n"
  196. " addl %0,%%eax\n"
  197. "1:\n"
  198. ".section .fixup,\"ax\"\n"
  199. "2: xorl %%eax,%%eax\n"
  200. " jmp 1b\n"
  201. "3: movb $1,%%al\n"
  202. " jmp 1b\n"
  203. ".previous\n"
  204. ".section __ex_table,\"a\"\n"
  205. " .align 4\n"
  206. " .long 0b,2b\n"
  207. ".previous"
  208. :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
  209. :"0" (n), "1" (s), "2" (0), "3" (mask)
  210. :"cc");
  211. return res & mask;
  212. }
  213. EXPORT_SYMBOL(strnlen_user);
  214. #ifdef CONFIG_X86_INTEL_USERCOPY
  215. static unsigned long
  216. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  217. {
  218. int d0, d1;
  219. __asm__ __volatile__(
  220. " .align 2,0x90\n"
  221. "1: movl 32(%4), %%eax\n"
  222. " cmpl $67, %0\n"
  223. " jbe 3f\n"
  224. "2: movl 64(%4), %%eax\n"
  225. " .align 2,0x90\n"
  226. "3: movl 0(%4), %%eax\n"
  227. "4: movl 4(%4), %%edx\n"
  228. "5: movl %%eax, 0(%3)\n"
  229. "6: movl %%edx, 4(%3)\n"
  230. "7: movl 8(%4), %%eax\n"
  231. "8: movl 12(%4),%%edx\n"
  232. "9: movl %%eax, 8(%3)\n"
  233. "10: movl %%edx, 12(%3)\n"
  234. "11: movl 16(%4), %%eax\n"
  235. "12: movl 20(%4), %%edx\n"
  236. "13: movl %%eax, 16(%3)\n"
  237. "14: movl %%edx, 20(%3)\n"
  238. "15: movl 24(%4), %%eax\n"
  239. "16: movl 28(%4), %%edx\n"
  240. "17: movl %%eax, 24(%3)\n"
  241. "18: movl %%edx, 28(%3)\n"
  242. "19: movl 32(%4), %%eax\n"
  243. "20: movl 36(%4), %%edx\n"
  244. "21: movl %%eax, 32(%3)\n"
  245. "22: movl %%edx, 36(%3)\n"
  246. "23: movl 40(%4), %%eax\n"
  247. "24: movl 44(%4), %%edx\n"
  248. "25: movl %%eax, 40(%3)\n"
  249. "26: movl %%edx, 44(%3)\n"
  250. "27: movl 48(%4), %%eax\n"
  251. "28: movl 52(%4), %%edx\n"
  252. "29: movl %%eax, 48(%3)\n"
  253. "30: movl %%edx, 52(%3)\n"
  254. "31: movl 56(%4), %%eax\n"
  255. "32: movl 60(%4), %%edx\n"
  256. "33: movl %%eax, 56(%3)\n"
  257. "34: movl %%edx, 60(%3)\n"
  258. " addl $-64, %0\n"
  259. " addl $64, %4\n"
  260. " addl $64, %3\n"
  261. " cmpl $63, %0\n"
  262. " ja 1b\n"
  263. "35: movl %0, %%eax\n"
  264. " shrl $2, %0\n"
  265. " andl $3, %%eax\n"
  266. " cld\n"
  267. "99: rep; movsl\n"
  268. "36: movl %%eax, %0\n"
  269. "37: rep; movsb\n"
  270. "100:\n"
  271. ".section .fixup,\"ax\"\n"
  272. "101: lea 0(%%eax,%0,4),%0\n"
  273. " jmp 100b\n"
  274. ".previous\n"
  275. ".section __ex_table,\"a\"\n"
  276. " .align 4\n"
  277. " .long 1b,100b\n"
  278. " .long 2b,100b\n"
  279. " .long 3b,100b\n"
  280. " .long 4b,100b\n"
  281. " .long 5b,100b\n"
  282. " .long 6b,100b\n"
  283. " .long 7b,100b\n"
  284. " .long 8b,100b\n"
  285. " .long 9b,100b\n"
  286. " .long 10b,100b\n"
  287. " .long 11b,100b\n"
  288. " .long 12b,100b\n"
  289. " .long 13b,100b\n"
  290. " .long 14b,100b\n"
  291. " .long 15b,100b\n"
  292. " .long 16b,100b\n"
  293. " .long 17b,100b\n"
  294. " .long 18b,100b\n"
  295. " .long 19b,100b\n"
  296. " .long 20b,100b\n"
  297. " .long 21b,100b\n"
  298. " .long 22b,100b\n"
  299. " .long 23b,100b\n"
  300. " .long 24b,100b\n"
  301. " .long 25b,100b\n"
  302. " .long 26b,100b\n"
  303. " .long 27b,100b\n"
  304. " .long 28b,100b\n"
  305. " .long 29b,100b\n"
  306. " .long 30b,100b\n"
  307. " .long 31b,100b\n"
  308. " .long 32b,100b\n"
  309. " .long 33b,100b\n"
  310. " .long 34b,100b\n"
  311. " .long 35b,100b\n"
  312. " .long 36b,100b\n"
  313. " .long 37b,100b\n"
  314. " .long 99b,101b\n"
  315. ".previous"
  316. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  317. : "1"(to), "2"(from), "0"(size)
  318. : "eax", "edx", "memory");
  319. return size;
  320. }
  321. static unsigned long
  322. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  323. {
  324. int d0, d1;
  325. __asm__ __volatile__(
  326. " .align 2,0x90\n"
  327. "0: movl 32(%4), %%eax\n"
  328. " cmpl $67, %0\n"
  329. " jbe 2f\n"
  330. "1: movl 64(%4), %%eax\n"
  331. " .align 2,0x90\n"
  332. "2: movl 0(%4), %%eax\n"
  333. "21: movl 4(%4), %%edx\n"
  334. " movl %%eax, 0(%3)\n"
  335. " movl %%edx, 4(%3)\n"
  336. "3: movl 8(%4), %%eax\n"
  337. "31: movl 12(%4),%%edx\n"
  338. " movl %%eax, 8(%3)\n"
  339. " movl %%edx, 12(%3)\n"
  340. "4: movl 16(%4), %%eax\n"
  341. "41: movl 20(%4), %%edx\n"
  342. " movl %%eax, 16(%3)\n"
  343. " movl %%edx, 20(%3)\n"
  344. "10: movl 24(%4), %%eax\n"
  345. "51: movl 28(%4), %%edx\n"
  346. " movl %%eax, 24(%3)\n"
  347. " movl %%edx, 28(%3)\n"
  348. "11: movl 32(%4), %%eax\n"
  349. "61: movl 36(%4), %%edx\n"
  350. " movl %%eax, 32(%3)\n"
  351. " movl %%edx, 36(%3)\n"
  352. "12: movl 40(%4), %%eax\n"
  353. "71: movl 44(%4), %%edx\n"
  354. " movl %%eax, 40(%3)\n"
  355. " movl %%edx, 44(%3)\n"
  356. "13: movl 48(%4), %%eax\n"
  357. "81: movl 52(%4), %%edx\n"
  358. " movl %%eax, 48(%3)\n"
  359. " movl %%edx, 52(%3)\n"
  360. "14: movl 56(%4), %%eax\n"
  361. "91: movl 60(%4), %%edx\n"
  362. " movl %%eax, 56(%3)\n"
  363. " movl %%edx, 60(%3)\n"
  364. " addl $-64, %0\n"
  365. " addl $64, %4\n"
  366. " addl $64, %3\n"
  367. " cmpl $63, %0\n"
  368. " ja 0b\n"
  369. "5: movl %0, %%eax\n"
  370. " shrl $2, %0\n"
  371. " andl $3, %%eax\n"
  372. " cld\n"
  373. "6: rep; movsl\n"
  374. " movl %%eax,%0\n"
  375. "7: rep; movsb\n"
  376. "8:\n"
  377. ".section .fixup,\"ax\"\n"
  378. "9: lea 0(%%eax,%0,4),%0\n"
  379. "16: pushl %0\n"
  380. " pushl %%eax\n"
  381. " xorl %%eax,%%eax\n"
  382. " rep; stosb\n"
  383. " popl %%eax\n"
  384. " popl %0\n"
  385. " jmp 8b\n"
  386. ".previous\n"
  387. ".section __ex_table,\"a\"\n"
  388. " .align 4\n"
  389. " .long 0b,16b\n"
  390. " .long 1b,16b\n"
  391. " .long 2b,16b\n"
  392. " .long 21b,16b\n"
  393. " .long 3b,16b\n"
  394. " .long 31b,16b\n"
  395. " .long 4b,16b\n"
  396. " .long 41b,16b\n"
  397. " .long 10b,16b\n"
  398. " .long 51b,16b\n"
  399. " .long 11b,16b\n"
  400. " .long 61b,16b\n"
  401. " .long 12b,16b\n"
  402. " .long 71b,16b\n"
  403. " .long 13b,16b\n"
  404. " .long 81b,16b\n"
  405. " .long 14b,16b\n"
  406. " .long 91b,16b\n"
  407. " .long 6b,9b\n"
  408. " .long 7b,16b\n"
  409. ".previous"
  410. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  411. : "1"(to), "2"(from), "0"(size)
  412. : "eax", "edx", "memory");
  413. return size;
  414. }
  415. /*
  416. * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
  417. * hyoshiok@miraclelinux.com
  418. */
  419. static unsigned long __copy_user_zeroing_intel_nocache(void *to,
  420. const void __user *from, unsigned long size)
  421. {
  422. int d0, d1;
  423. __asm__ __volatile__(
  424. " .align 2,0x90\n"
  425. "0: movl 32(%4), %%eax\n"
  426. " cmpl $67, %0\n"
  427. " jbe 2f\n"
  428. "1: movl 64(%4), %%eax\n"
  429. " .align 2,0x90\n"
  430. "2: movl 0(%4), %%eax\n"
  431. "21: movl 4(%4), %%edx\n"
  432. " movnti %%eax, 0(%3)\n"
  433. " movnti %%edx, 4(%3)\n"
  434. "3: movl 8(%4), %%eax\n"
  435. "31: movl 12(%4),%%edx\n"
  436. " movnti %%eax, 8(%3)\n"
  437. " movnti %%edx, 12(%3)\n"
  438. "4: movl 16(%4), %%eax\n"
  439. "41: movl 20(%4), %%edx\n"
  440. " movnti %%eax, 16(%3)\n"
  441. " movnti %%edx, 20(%3)\n"
  442. "10: movl 24(%4), %%eax\n"
  443. "51: movl 28(%4), %%edx\n"
  444. " movnti %%eax, 24(%3)\n"
  445. " movnti %%edx, 28(%3)\n"
  446. "11: movl 32(%4), %%eax\n"
  447. "61: movl 36(%4), %%edx\n"
  448. " movnti %%eax, 32(%3)\n"
  449. " movnti %%edx, 36(%3)\n"
  450. "12: movl 40(%4), %%eax\n"
  451. "71: movl 44(%4), %%edx\n"
  452. " movnti %%eax, 40(%3)\n"
  453. " movnti %%edx, 44(%3)\n"
  454. "13: movl 48(%4), %%eax\n"
  455. "81: movl 52(%4), %%edx\n"
  456. " movnti %%eax, 48(%3)\n"
  457. " movnti %%edx, 52(%3)\n"
  458. "14: movl 56(%4), %%eax\n"
  459. "91: movl 60(%4), %%edx\n"
  460. " movnti %%eax, 56(%3)\n"
  461. " movnti %%edx, 60(%3)\n"
  462. " addl $-64, %0\n"
  463. " addl $64, %4\n"
  464. " addl $64, %3\n"
  465. " cmpl $63, %0\n"
  466. " ja 0b\n"
  467. " sfence \n"
  468. "5: movl %0, %%eax\n"
  469. " shrl $2, %0\n"
  470. " andl $3, %%eax\n"
  471. " cld\n"
  472. "6: rep; movsl\n"
  473. " movl %%eax,%0\n"
  474. "7: rep; movsb\n"
  475. "8:\n"
  476. ".section .fixup,\"ax\"\n"
  477. "9: lea 0(%%eax,%0,4),%0\n"
  478. "16: pushl %0\n"
  479. " pushl %%eax\n"
  480. " xorl %%eax,%%eax\n"
  481. " rep; stosb\n"
  482. " popl %%eax\n"
  483. " popl %0\n"
  484. " jmp 8b\n"
  485. ".previous\n"
  486. ".section __ex_table,\"a\"\n"
  487. " .align 4\n"
  488. " .long 0b,16b\n"
  489. " .long 1b,16b\n"
  490. " .long 2b,16b\n"
  491. " .long 21b,16b\n"
  492. " .long 3b,16b\n"
  493. " .long 31b,16b\n"
  494. " .long 4b,16b\n"
  495. " .long 41b,16b\n"
  496. " .long 10b,16b\n"
  497. " .long 51b,16b\n"
  498. " .long 11b,16b\n"
  499. " .long 61b,16b\n"
  500. " .long 12b,16b\n"
  501. " .long 71b,16b\n"
  502. " .long 13b,16b\n"
  503. " .long 81b,16b\n"
  504. " .long 14b,16b\n"
  505. " .long 91b,16b\n"
  506. " .long 6b,9b\n"
  507. " .long 7b,16b\n"
  508. ".previous"
  509. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  510. : "1"(to), "2"(from), "0"(size)
  511. : "eax", "edx", "memory");
  512. return size;
  513. }
  514. static unsigned long __copy_user_intel_nocache(void *to,
  515. const void __user *from, unsigned long size)
  516. {
  517. int d0, d1;
  518. __asm__ __volatile__(
  519. " .align 2,0x90\n"
  520. "0: movl 32(%4), %%eax\n"
  521. " cmpl $67, %0\n"
  522. " jbe 2f\n"
  523. "1: movl 64(%4), %%eax\n"
  524. " .align 2,0x90\n"
  525. "2: movl 0(%4), %%eax\n"
  526. "21: movl 4(%4), %%edx\n"
  527. " movnti %%eax, 0(%3)\n"
  528. " movnti %%edx, 4(%3)\n"
  529. "3: movl 8(%4), %%eax\n"
  530. "31: movl 12(%4),%%edx\n"
  531. " movnti %%eax, 8(%3)\n"
  532. " movnti %%edx, 12(%3)\n"
  533. "4: movl 16(%4), %%eax\n"
  534. "41: movl 20(%4), %%edx\n"
  535. " movnti %%eax, 16(%3)\n"
  536. " movnti %%edx, 20(%3)\n"
  537. "10: movl 24(%4), %%eax\n"
  538. "51: movl 28(%4), %%edx\n"
  539. " movnti %%eax, 24(%3)\n"
  540. " movnti %%edx, 28(%3)\n"
  541. "11: movl 32(%4), %%eax\n"
  542. "61: movl 36(%4), %%edx\n"
  543. " movnti %%eax, 32(%3)\n"
  544. " movnti %%edx, 36(%3)\n"
  545. "12: movl 40(%4), %%eax\n"
  546. "71: movl 44(%4), %%edx\n"
  547. " movnti %%eax, 40(%3)\n"
  548. " movnti %%edx, 44(%3)\n"
  549. "13: movl 48(%4), %%eax\n"
  550. "81: movl 52(%4), %%edx\n"
  551. " movnti %%eax, 48(%3)\n"
  552. " movnti %%edx, 52(%3)\n"
  553. "14: movl 56(%4), %%eax\n"
  554. "91: movl 60(%4), %%edx\n"
  555. " movnti %%eax, 56(%3)\n"
  556. " movnti %%edx, 60(%3)\n"
  557. " addl $-64, %0\n"
  558. " addl $64, %4\n"
  559. " addl $64, %3\n"
  560. " cmpl $63, %0\n"
  561. " ja 0b\n"
  562. " sfence \n"
  563. "5: movl %0, %%eax\n"
  564. " shrl $2, %0\n"
  565. " andl $3, %%eax\n"
  566. " cld\n"
  567. "6: rep; movsl\n"
  568. " movl %%eax,%0\n"
  569. "7: rep; movsb\n"
  570. "8:\n"
  571. ".section .fixup,\"ax\"\n"
  572. "9: lea 0(%%eax,%0,4),%0\n"
  573. "16: jmp 8b\n"
  574. ".previous\n"
  575. ".section __ex_table,\"a\"\n"
  576. " .align 4\n"
  577. " .long 0b,16b\n"
  578. " .long 1b,16b\n"
  579. " .long 2b,16b\n"
  580. " .long 21b,16b\n"
  581. " .long 3b,16b\n"
  582. " .long 31b,16b\n"
  583. " .long 4b,16b\n"
  584. " .long 41b,16b\n"
  585. " .long 10b,16b\n"
  586. " .long 51b,16b\n"
  587. " .long 11b,16b\n"
  588. " .long 61b,16b\n"
  589. " .long 12b,16b\n"
  590. " .long 71b,16b\n"
  591. " .long 13b,16b\n"
  592. " .long 81b,16b\n"
  593. " .long 14b,16b\n"
  594. " .long 91b,16b\n"
  595. " .long 6b,9b\n"
  596. " .long 7b,16b\n"
  597. ".previous"
  598. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  599. : "1"(to), "2"(from), "0"(size)
  600. : "eax", "edx", "memory");
  601. return size;
  602. }
  603. #else
  604. /*
  605. * Leave these declared but undefined. They should not be any references to
  606. * them
  607. */
  608. unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
  609. unsigned long size);
  610. unsigned long __copy_user_intel(void __user *to, const void *from,
  611. unsigned long size);
  612. unsigned long __copy_user_zeroing_intel_nocache(void *to,
  613. const void __user *from, unsigned long size);
  614. #endif /* CONFIG_X86_INTEL_USERCOPY */
  615. /* Generic arbitrary sized copy. */
  616. #define __copy_user(to, from, size) \
  617. do { \
  618. int __d0, __d1, __d2; \
  619. __asm__ __volatile__( \
  620. " cmp $7,%0\n" \
  621. " jbe 1f\n" \
  622. " movl %1,%0\n" \
  623. " negl %0\n" \
  624. " andl $7,%0\n" \
  625. " subl %0,%3\n" \
  626. "4: rep; movsb\n" \
  627. " movl %3,%0\n" \
  628. " shrl $2,%0\n" \
  629. " andl $3,%3\n" \
  630. " .align 2,0x90\n" \
  631. "0: rep; movsl\n" \
  632. " movl %3,%0\n" \
  633. "1: rep; movsb\n" \
  634. "2:\n" \
  635. ".section .fixup,\"ax\"\n" \
  636. "5: addl %3,%0\n" \
  637. " jmp 2b\n" \
  638. "3: lea 0(%3,%0,4),%0\n" \
  639. " jmp 2b\n" \
  640. ".previous\n" \
  641. ".section __ex_table,\"a\"\n" \
  642. " .align 4\n" \
  643. " .long 4b,5b\n" \
  644. " .long 0b,3b\n" \
  645. " .long 1b,2b\n" \
  646. ".previous" \
  647. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  648. : "3"(size), "0"(size), "1"(to), "2"(from) \
  649. : "memory"); \
  650. } while (0)
  651. #define __copy_user_zeroing(to, from, size) \
  652. do { \
  653. int __d0, __d1, __d2; \
  654. __asm__ __volatile__( \
  655. " cmp $7,%0\n" \
  656. " jbe 1f\n" \
  657. " movl %1,%0\n" \
  658. " negl %0\n" \
  659. " andl $7,%0\n" \
  660. " subl %0,%3\n" \
  661. "4: rep; movsb\n" \
  662. " movl %3,%0\n" \
  663. " shrl $2,%0\n" \
  664. " andl $3,%3\n" \
  665. " .align 2,0x90\n" \
  666. "0: rep; movsl\n" \
  667. " movl %3,%0\n" \
  668. "1: rep; movsb\n" \
  669. "2:\n" \
  670. ".section .fixup,\"ax\"\n" \
  671. "5: addl %3,%0\n" \
  672. " jmp 6f\n" \
  673. "3: lea 0(%3,%0,4),%0\n" \
  674. "6: pushl %0\n" \
  675. " pushl %%eax\n" \
  676. " xorl %%eax,%%eax\n" \
  677. " rep; stosb\n" \
  678. " popl %%eax\n" \
  679. " popl %0\n" \
  680. " jmp 2b\n" \
  681. ".previous\n" \
  682. ".section __ex_table,\"a\"\n" \
  683. " .align 4\n" \
  684. " .long 4b,5b\n" \
  685. " .long 0b,3b\n" \
  686. " .long 1b,6b\n" \
  687. ".previous" \
  688. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  689. : "3"(size), "0"(size), "1"(to), "2"(from) \
  690. : "memory"); \
  691. } while (0)
  692. unsigned long __copy_to_user_ll(void __user *to, const void *from,
  693. unsigned long n)
  694. {
  695. #ifndef CONFIG_X86_WP_WORKS_OK
  696. if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
  697. ((unsigned long)to) < TASK_SIZE) {
  698. /*
  699. * When we are in an atomic section (see
  700. * mm/filemap.c:file_read_actor), return the full
  701. * length to take the slow path.
  702. */
  703. if (in_atomic())
  704. return n;
  705. /*
  706. * CPU does not honor the WP bit when writing
  707. * from supervisory mode, and due to preemption or SMP,
  708. * the page tables can change at any time.
  709. * Do it manually. Manfred <manfred@colorfullife.com>
  710. */
  711. while (n) {
  712. unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
  713. unsigned long len = PAGE_SIZE - offset;
  714. int retval;
  715. struct page *pg;
  716. void *maddr;
  717. if (len > n)
  718. len = n;
  719. survive:
  720. down_read(&current->mm->mmap_sem);
  721. retval = get_user_pages(current, current->mm,
  722. (unsigned long)to, 1, 1, 0, &pg, NULL);
  723. if (retval == -ENOMEM && is_global_init(current)) {
  724. up_read(&current->mm->mmap_sem);
  725. congestion_wait(WRITE, HZ/50);
  726. goto survive;
  727. }
  728. if (retval != 1) {
  729. up_read(&current->mm->mmap_sem);
  730. break;
  731. }
  732. maddr = kmap_atomic(pg, KM_USER0);
  733. memcpy(maddr + offset, from, len);
  734. kunmap_atomic(maddr, KM_USER0);
  735. set_page_dirty_lock(pg);
  736. put_page(pg);
  737. up_read(&current->mm->mmap_sem);
  738. from += len;
  739. to += len;
  740. n -= len;
  741. }
  742. return n;
  743. }
  744. #endif
  745. if (movsl_is_ok(to, from, n))
  746. __copy_user(to, from, n);
  747. else
  748. n = __copy_user_intel(to, from, n);
  749. return n;
  750. }
  751. EXPORT_SYMBOL(__copy_to_user_ll);
  752. unsigned long __copy_from_user_ll(void *to, const void __user *from,
  753. unsigned long n)
  754. {
  755. if (movsl_is_ok(to, from, n))
  756. __copy_user_zeroing(to, from, n);
  757. else
  758. n = __copy_user_zeroing_intel(to, from, n);
  759. return n;
  760. }
  761. EXPORT_SYMBOL(__copy_from_user_ll);
  762. unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
  763. unsigned long n)
  764. {
  765. if (movsl_is_ok(to, from, n))
  766. __copy_user(to, from, n);
  767. else
  768. n = __copy_user_intel((void __user *)to,
  769. (const void *)from, n);
  770. return n;
  771. }
  772. EXPORT_SYMBOL(__copy_from_user_ll_nozero);
  773. unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
  774. unsigned long n)
  775. {
  776. #ifdef CONFIG_X86_INTEL_USERCOPY
  777. if (n > 64 && cpu_has_xmm2)
  778. n = __copy_user_zeroing_intel_nocache(to, from, n);
  779. else
  780. __copy_user_zeroing(to, from, n);
  781. #else
  782. __copy_user_zeroing(to, from, n);
  783. #endif
  784. return n;
  785. }
  786. EXPORT_SYMBOL(__copy_from_user_ll_nocache);
  787. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  788. unsigned long n)
  789. {
  790. #ifdef CONFIG_X86_INTEL_USERCOPY
  791. if (n > 64 && cpu_has_xmm2)
  792. n = __copy_user_intel_nocache(to, from, n);
  793. else
  794. __copy_user(to, from, n);
  795. #else
  796. __copy_user(to, from, n);
  797. #endif
  798. return n;
  799. }
  800. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  801. /**
  802. * copy_to_user: - Copy a block of data into user space.
  803. * @to: Destination address, in user space.
  804. * @from: Source address, in kernel space.
  805. * @n: Number of bytes to copy.
  806. *
  807. * Context: User context only. This function may sleep.
  808. *
  809. * Copy data from kernel space to user space.
  810. *
  811. * Returns number of bytes that could not be copied.
  812. * On success, this will be zero.
  813. */
  814. unsigned long
  815. copy_to_user(void __user *to, const void *from, unsigned long n)
  816. {
  817. if (access_ok(VERIFY_WRITE, to, n))
  818. n = __copy_to_user(to, from, n);
  819. return n;
  820. }
  821. EXPORT_SYMBOL(copy_to_user);
  822. /**
  823. * copy_from_user: - Copy a block of data from user space.
  824. * @to: Destination address, in kernel space.
  825. * @from: Source address, in user space.
  826. * @n: Number of bytes to copy.
  827. *
  828. * Context: User context only. This function may sleep.
  829. *
  830. * Copy data from user space to kernel space.
  831. *
  832. * Returns number of bytes that could not be copied.
  833. * On success, this will be zero.
  834. *
  835. * If some data could not be copied, this function will pad the copied
  836. * data to the requested size using zero bytes.
  837. */
  838. unsigned long
  839. copy_from_user(void *to, const void __user *from, unsigned long n)
  840. {
  841. if (access_ok(VERIFY_READ, from, n))
  842. n = __copy_from_user(to, from, n);
  843. else
  844. memset(to, 0, n);
  845. return n;
  846. }
  847. EXPORT_SYMBOL(copy_from_user);