usercopy_32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/highmem.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/module.h>
  12. #include <linux/backing-dev.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/mmx.h>
  16. #include <asm/asm.h>
  17. #ifdef CONFIG_X86_INTEL_USERCOPY
  18. /*
  19. * Alignment at which movsl is preferred for bulk memory copies.
  20. */
  21. struct movsl_mask movsl_mask __read_mostly;
  22. #endif
  23. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  24. {
  25. #ifdef CONFIG_X86_INTEL_USERCOPY
  26. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  27. return 0;
  28. #endif
  29. return 1;
  30. }
  31. #define movsl_is_ok(a1, a2, n) \
  32. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  33. /*
  34. * Zero Userspace
  35. */
  36. #define __do_clear_user(addr,size) \
  37. do { \
  38. int __d0; \
  39. might_fault(); \
  40. __asm__ __volatile__( \
  41. "0: rep; stosl\n" \
  42. " movl %2,%0\n" \
  43. "1: rep; stosb\n" \
  44. "2:\n" \
  45. ".section .fixup,\"ax\"\n" \
  46. "3: lea 0(%2,%0,4),%0\n" \
  47. " jmp 2b\n" \
  48. ".previous\n" \
  49. _ASM_EXTABLE(0b,3b) \
  50. _ASM_EXTABLE(1b,2b) \
  51. : "=&c"(size), "=&D" (__d0) \
  52. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  53. } while (0)
  54. /**
  55. * clear_user: - Zero a block of memory in user space.
  56. * @to: Destination address, in user space.
  57. * @n: Number of bytes to zero.
  58. *
  59. * Zero a block of memory in user space.
  60. *
  61. * Returns number of bytes that could not be cleared.
  62. * On success, this will be zero.
  63. */
  64. unsigned long
  65. clear_user(void __user *to, unsigned long n)
  66. {
  67. might_fault();
  68. if (access_ok(VERIFY_WRITE, to, n))
  69. __do_clear_user(to, n);
  70. return n;
  71. }
  72. EXPORT_SYMBOL(clear_user);
  73. /**
  74. * __clear_user: - Zero a block of memory in user space, with less checking.
  75. * @to: Destination address, in user space.
  76. * @n: Number of bytes to zero.
  77. *
  78. * Zero a block of memory in user space. Caller must check
  79. * the specified block with access_ok() before calling this function.
  80. *
  81. * Returns number of bytes that could not be cleared.
  82. * On success, this will be zero.
  83. */
  84. unsigned long
  85. __clear_user(void __user *to, unsigned long n)
  86. {
  87. __do_clear_user(to, n);
  88. return n;
  89. }
  90. EXPORT_SYMBOL(__clear_user);
  91. /**
  92. * strnlen_user: - Get the size of a string in user space.
  93. * @s: The string to measure.
  94. * @n: The maximum valid length
  95. *
  96. * Get the size of a NUL-terminated string in user space.
  97. *
  98. * Returns the size of the string INCLUDING the terminating NUL.
  99. * On exception, returns 0.
  100. * If the string is too long, returns a value greater than @n.
  101. */
  102. long strnlen_user(const char __user *s, long n)
  103. {
  104. unsigned long mask = -__addr_ok(s);
  105. unsigned long res, tmp;
  106. might_fault();
  107. __asm__ __volatile__(
  108. " testl %0, %0\n"
  109. " jz 3f\n"
  110. " andl %0,%%ecx\n"
  111. "0: repne; scasb\n"
  112. " setne %%al\n"
  113. " subl %%ecx,%0\n"
  114. " addl %0,%%eax\n"
  115. "1:\n"
  116. ".section .fixup,\"ax\"\n"
  117. "2: xorl %%eax,%%eax\n"
  118. " jmp 1b\n"
  119. "3: movb $1,%%al\n"
  120. " jmp 1b\n"
  121. ".previous\n"
  122. _ASM_EXTABLE(0b,2b)
  123. :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
  124. :"0" (n), "1" (s), "2" (0), "3" (mask)
  125. :"cc");
  126. return res & mask;
  127. }
  128. EXPORT_SYMBOL(strnlen_user);
  129. #ifdef CONFIG_X86_INTEL_USERCOPY
  130. static unsigned long
  131. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  132. {
  133. int d0, d1;
  134. __asm__ __volatile__(
  135. " .align 2,0x90\n"
  136. "1: movl 32(%4), %%eax\n"
  137. " cmpl $67, %0\n"
  138. " jbe 3f\n"
  139. "2: movl 64(%4), %%eax\n"
  140. " .align 2,0x90\n"
  141. "3: movl 0(%4), %%eax\n"
  142. "4: movl 4(%4), %%edx\n"
  143. "5: movl %%eax, 0(%3)\n"
  144. "6: movl %%edx, 4(%3)\n"
  145. "7: movl 8(%4), %%eax\n"
  146. "8: movl 12(%4),%%edx\n"
  147. "9: movl %%eax, 8(%3)\n"
  148. "10: movl %%edx, 12(%3)\n"
  149. "11: movl 16(%4), %%eax\n"
  150. "12: movl 20(%4), %%edx\n"
  151. "13: movl %%eax, 16(%3)\n"
  152. "14: movl %%edx, 20(%3)\n"
  153. "15: movl 24(%4), %%eax\n"
  154. "16: movl 28(%4), %%edx\n"
  155. "17: movl %%eax, 24(%3)\n"
  156. "18: movl %%edx, 28(%3)\n"
  157. "19: movl 32(%4), %%eax\n"
  158. "20: movl 36(%4), %%edx\n"
  159. "21: movl %%eax, 32(%3)\n"
  160. "22: movl %%edx, 36(%3)\n"
  161. "23: movl 40(%4), %%eax\n"
  162. "24: movl 44(%4), %%edx\n"
  163. "25: movl %%eax, 40(%3)\n"
  164. "26: movl %%edx, 44(%3)\n"
  165. "27: movl 48(%4), %%eax\n"
  166. "28: movl 52(%4), %%edx\n"
  167. "29: movl %%eax, 48(%3)\n"
  168. "30: movl %%edx, 52(%3)\n"
  169. "31: movl 56(%4), %%eax\n"
  170. "32: movl 60(%4), %%edx\n"
  171. "33: movl %%eax, 56(%3)\n"
  172. "34: movl %%edx, 60(%3)\n"
  173. " addl $-64, %0\n"
  174. " addl $64, %4\n"
  175. " addl $64, %3\n"
  176. " cmpl $63, %0\n"
  177. " ja 1b\n"
  178. "35: movl %0, %%eax\n"
  179. " shrl $2, %0\n"
  180. " andl $3, %%eax\n"
  181. " cld\n"
  182. "99: rep; movsl\n"
  183. "36: movl %%eax, %0\n"
  184. "37: rep; movsb\n"
  185. "100:\n"
  186. ".section .fixup,\"ax\"\n"
  187. "101: lea 0(%%eax,%0,4),%0\n"
  188. " jmp 100b\n"
  189. ".previous\n"
  190. _ASM_EXTABLE(1b,100b)
  191. _ASM_EXTABLE(2b,100b)
  192. _ASM_EXTABLE(3b,100b)
  193. _ASM_EXTABLE(4b,100b)
  194. _ASM_EXTABLE(5b,100b)
  195. _ASM_EXTABLE(6b,100b)
  196. _ASM_EXTABLE(7b,100b)
  197. _ASM_EXTABLE(8b,100b)
  198. _ASM_EXTABLE(9b,100b)
  199. _ASM_EXTABLE(10b,100b)
  200. _ASM_EXTABLE(11b,100b)
  201. _ASM_EXTABLE(12b,100b)
  202. _ASM_EXTABLE(13b,100b)
  203. _ASM_EXTABLE(14b,100b)
  204. _ASM_EXTABLE(15b,100b)
  205. _ASM_EXTABLE(16b,100b)
  206. _ASM_EXTABLE(17b,100b)
  207. _ASM_EXTABLE(18b,100b)
  208. _ASM_EXTABLE(19b,100b)
  209. _ASM_EXTABLE(20b,100b)
  210. _ASM_EXTABLE(21b,100b)
  211. _ASM_EXTABLE(22b,100b)
  212. _ASM_EXTABLE(23b,100b)
  213. _ASM_EXTABLE(24b,100b)
  214. _ASM_EXTABLE(25b,100b)
  215. _ASM_EXTABLE(26b,100b)
  216. _ASM_EXTABLE(27b,100b)
  217. _ASM_EXTABLE(28b,100b)
  218. _ASM_EXTABLE(29b,100b)
  219. _ASM_EXTABLE(30b,100b)
  220. _ASM_EXTABLE(31b,100b)
  221. _ASM_EXTABLE(32b,100b)
  222. _ASM_EXTABLE(33b,100b)
  223. _ASM_EXTABLE(34b,100b)
  224. _ASM_EXTABLE(35b,100b)
  225. _ASM_EXTABLE(36b,100b)
  226. _ASM_EXTABLE(37b,100b)
  227. _ASM_EXTABLE(99b,101b)
  228. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  229. : "1"(to), "2"(from), "0"(size)
  230. : "eax", "edx", "memory");
  231. return size;
  232. }
  233. static unsigned long
  234. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  235. {
  236. int d0, d1;
  237. __asm__ __volatile__(
  238. " .align 2,0x90\n"
  239. "0: movl 32(%4), %%eax\n"
  240. " cmpl $67, %0\n"
  241. " jbe 2f\n"
  242. "1: movl 64(%4), %%eax\n"
  243. " .align 2,0x90\n"
  244. "2: movl 0(%4), %%eax\n"
  245. "21: movl 4(%4), %%edx\n"
  246. " movl %%eax, 0(%3)\n"
  247. " movl %%edx, 4(%3)\n"
  248. "3: movl 8(%4), %%eax\n"
  249. "31: movl 12(%4),%%edx\n"
  250. " movl %%eax, 8(%3)\n"
  251. " movl %%edx, 12(%3)\n"
  252. "4: movl 16(%4), %%eax\n"
  253. "41: movl 20(%4), %%edx\n"
  254. " movl %%eax, 16(%3)\n"
  255. " movl %%edx, 20(%3)\n"
  256. "10: movl 24(%4), %%eax\n"
  257. "51: movl 28(%4), %%edx\n"
  258. " movl %%eax, 24(%3)\n"
  259. " movl %%edx, 28(%3)\n"
  260. "11: movl 32(%4), %%eax\n"
  261. "61: movl 36(%4), %%edx\n"
  262. " movl %%eax, 32(%3)\n"
  263. " movl %%edx, 36(%3)\n"
  264. "12: movl 40(%4), %%eax\n"
  265. "71: movl 44(%4), %%edx\n"
  266. " movl %%eax, 40(%3)\n"
  267. " movl %%edx, 44(%3)\n"
  268. "13: movl 48(%4), %%eax\n"
  269. "81: movl 52(%4), %%edx\n"
  270. " movl %%eax, 48(%3)\n"
  271. " movl %%edx, 52(%3)\n"
  272. "14: movl 56(%4), %%eax\n"
  273. "91: movl 60(%4), %%edx\n"
  274. " movl %%eax, 56(%3)\n"
  275. " movl %%edx, 60(%3)\n"
  276. " addl $-64, %0\n"
  277. " addl $64, %4\n"
  278. " addl $64, %3\n"
  279. " cmpl $63, %0\n"
  280. " ja 0b\n"
  281. "5: movl %0, %%eax\n"
  282. " shrl $2, %0\n"
  283. " andl $3, %%eax\n"
  284. " cld\n"
  285. "6: rep; movsl\n"
  286. " movl %%eax,%0\n"
  287. "7: rep; movsb\n"
  288. "8:\n"
  289. ".section .fixup,\"ax\"\n"
  290. "9: lea 0(%%eax,%0,4),%0\n"
  291. "16: pushl %0\n"
  292. " pushl %%eax\n"
  293. " xorl %%eax,%%eax\n"
  294. " rep; stosb\n"
  295. " popl %%eax\n"
  296. " popl %0\n"
  297. " jmp 8b\n"
  298. ".previous\n"
  299. _ASM_EXTABLE(0b,16b)
  300. _ASM_EXTABLE(1b,16b)
  301. _ASM_EXTABLE(2b,16b)
  302. _ASM_EXTABLE(21b,16b)
  303. _ASM_EXTABLE(3b,16b)
  304. _ASM_EXTABLE(31b,16b)
  305. _ASM_EXTABLE(4b,16b)
  306. _ASM_EXTABLE(41b,16b)
  307. _ASM_EXTABLE(10b,16b)
  308. _ASM_EXTABLE(51b,16b)
  309. _ASM_EXTABLE(11b,16b)
  310. _ASM_EXTABLE(61b,16b)
  311. _ASM_EXTABLE(12b,16b)
  312. _ASM_EXTABLE(71b,16b)
  313. _ASM_EXTABLE(13b,16b)
  314. _ASM_EXTABLE(81b,16b)
  315. _ASM_EXTABLE(14b,16b)
  316. _ASM_EXTABLE(91b,16b)
  317. _ASM_EXTABLE(6b,9b)
  318. _ASM_EXTABLE(7b,16b)
  319. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  320. : "1"(to), "2"(from), "0"(size)
  321. : "eax", "edx", "memory");
  322. return size;
  323. }
  324. /*
  325. * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
  326. * hyoshiok@miraclelinux.com
  327. */
  328. static unsigned long __copy_user_zeroing_intel_nocache(void *to,
  329. const void __user *from, unsigned long size)
  330. {
  331. int d0, d1;
  332. __asm__ __volatile__(
  333. " .align 2,0x90\n"
  334. "0: movl 32(%4), %%eax\n"
  335. " cmpl $67, %0\n"
  336. " jbe 2f\n"
  337. "1: movl 64(%4), %%eax\n"
  338. " .align 2,0x90\n"
  339. "2: movl 0(%4), %%eax\n"
  340. "21: movl 4(%4), %%edx\n"
  341. " movnti %%eax, 0(%3)\n"
  342. " movnti %%edx, 4(%3)\n"
  343. "3: movl 8(%4), %%eax\n"
  344. "31: movl 12(%4),%%edx\n"
  345. " movnti %%eax, 8(%3)\n"
  346. " movnti %%edx, 12(%3)\n"
  347. "4: movl 16(%4), %%eax\n"
  348. "41: movl 20(%4), %%edx\n"
  349. " movnti %%eax, 16(%3)\n"
  350. " movnti %%edx, 20(%3)\n"
  351. "10: movl 24(%4), %%eax\n"
  352. "51: movl 28(%4), %%edx\n"
  353. " movnti %%eax, 24(%3)\n"
  354. " movnti %%edx, 28(%3)\n"
  355. "11: movl 32(%4), %%eax\n"
  356. "61: movl 36(%4), %%edx\n"
  357. " movnti %%eax, 32(%3)\n"
  358. " movnti %%edx, 36(%3)\n"
  359. "12: movl 40(%4), %%eax\n"
  360. "71: movl 44(%4), %%edx\n"
  361. " movnti %%eax, 40(%3)\n"
  362. " movnti %%edx, 44(%3)\n"
  363. "13: movl 48(%4), %%eax\n"
  364. "81: movl 52(%4), %%edx\n"
  365. " movnti %%eax, 48(%3)\n"
  366. " movnti %%edx, 52(%3)\n"
  367. "14: movl 56(%4), %%eax\n"
  368. "91: movl 60(%4), %%edx\n"
  369. " movnti %%eax, 56(%3)\n"
  370. " movnti %%edx, 60(%3)\n"
  371. " addl $-64, %0\n"
  372. " addl $64, %4\n"
  373. " addl $64, %3\n"
  374. " cmpl $63, %0\n"
  375. " ja 0b\n"
  376. " sfence \n"
  377. "5: movl %0, %%eax\n"
  378. " shrl $2, %0\n"
  379. " andl $3, %%eax\n"
  380. " cld\n"
  381. "6: rep; movsl\n"
  382. " movl %%eax,%0\n"
  383. "7: rep; movsb\n"
  384. "8:\n"
  385. ".section .fixup,\"ax\"\n"
  386. "9: lea 0(%%eax,%0,4),%0\n"
  387. "16: pushl %0\n"
  388. " pushl %%eax\n"
  389. " xorl %%eax,%%eax\n"
  390. " rep; stosb\n"
  391. " popl %%eax\n"
  392. " popl %0\n"
  393. " jmp 8b\n"
  394. ".previous\n"
  395. _ASM_EXTABLE(0b,16b)
  396. _ASM_EXTABLE(1b,16b)
  397. _ASM_EXTABLE(2b,16b)
  398. _ASM_EXTABLE(21b,16b)
  399. _ASM_EXTABLE(3b,16b)
  400. _ASM_EXTABLE(31b,16b)
  401. _ASM_EXTABLE(4b,16b)
  402. _ASM_EXTABLE(41b,16b)
  403. _ASM_EXTABLE(10b,16b)
  404. _ASM_EXTABLE(51b,16b)
  405. _ASM_EXTABLE(11b,16b)
  406. _ASM_EXTABLE(61b,16b)
  407. _ASM_EXTABLE(12b,16b)
  408. _ASM_EXTABLE(71b,16b)
  409. _ASM_EXTABLE(13b,16b)
  410. _ASM_EXTABLE(81b,16b)
  411. _ASM_EXTABLE(14b,16b)
  412. _ASM_EXTABLE(91b,16b)
  413. _ASM_EXTABLE(6b,9b)
  414. _ASM_EXTABLE(7b,16b)
  415. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  416. : "1"(to), "2"(from), "0"(size)
  417. : "eax", "edx", "memory");
  418. return size;
  419. }
  420. static unsigned long __copy_user_intel_nocache(void *to,
  421. const void __user *from, unsigned long size)
  422. {
  423. int d0, d1;
  424. __asm__ __volatile__(
  425. " .align 2,0x90\n"
  426. "0: movl 32(%4), %%eax\n"
  427. " cmpl $67, %0\n"
  428. " jbe 2f\n"
  429. "1: movl 64(%4), %%eax\n"
  430. " .align 2,0x90\n"
  431. "2: movl 0(%4), %%eax\n"
  432. "21: movl 4(%4), %%edx\n"
  433. " movnti %%eax, 0(%3)\n"
  434. " movnti %%edx, 4(%3)\n"
  435. "3: movl 8(%4), %%eax\n"
  436. "31: movl 12(%4),%%edx\n"
  437. " movnti %%eax, 8(%3)\n"
  438. " movnti %%edx, 12(%3)\n"
  439. "4: movl 16(%4), %%eax\n"
  440. "41: movl 20(%4), %%edx\n"
  441. " movnti %%eax, 16(%3)\n"
  442. " movnti %%edx, 20(%3)\n"
  443. "10: movl 24(%4), %%eax\n"
  444. "51: movl 28(%4), %%edx\n"
  445. " movnti %%eax, 24(%3)\n"
  446. " movnti %%edx, 28(%3)\n"
  447. "11: movl 32(%4), %%eax\n"
  448. "61: movl 36(%4), %%edx\n"
  449. " movnti %%eax, 32(%3)\n"
  450. " movnti %%edx, 36(%3)\n"
  451. "12: movl 40(%4), %%eax\n"
  452. "71: movl 44(%4), %%edx\n"
  453. " movnti %%eax, 40(%3)\n"
  454. " movnti %%edx, 44(%3)\n"
  455. "13: movl 48(%4), %%eax\n"
  456. "81: movl 52(%4), %%edx\n"
  457. " movnti %%eax, 48(%3)\n"
  458. " movnti %%edx, 52(%3)\n"
  459. "14: movl 56(%4), %%eax\n"
  460. "91: movl 60(%4), %%edx\n"
  461. " movnti %%eax, 56(%3)\n"
  462. " movnti %%edx, 60(%3)\n"
  463. " addl $-64, %0\n"
  464. " addl $64, %4\n"
  465. " addl $64, %3\n"
  466. " cmpl $63, %0\n"
  467. " ja 0b\n"
  468. " sfence \n"
  469. "5: movl %0, %%eax\n"
  470. " shrl $2, %0\n"
  471. " andl $3, %%eax\n"
  472. " cld\n"
  473. "6: rep; movsl\n"
  474. " movl %%eax,%0\n"
  475. "7: rep; movsb\n"
  476. "8:\n"
  477. ".section .fixup,\"ax\"\n"
  478. "9: lea 0(%%eax,%0,4),%0\n"
  479. "16: jmp 8b\n"
  480. ".previous\n"
  481. _ASM_EXTABLE(0b,16b)
  482. _ASM_EXTABLE(1b,16b)
  483. _ASM_EXTABLE(2b,16b)
  484. _ASM_EXTABLE(21b,16b)
  485. _ASM_EXTABLE(3b,16b)
  486. _ASM_EXTABLE(31b,16b)
  487. _ASM_EXTABLE(4b,16b)
  488. _ASM_EXTABLE(41b,16b)
  489. _ASM_EXTABLE(10b,16b)
  490. _ASM_EXTABLE(51b,16b)
  491. _ASM_EXTABLE(11b,16b)
  492. _ASM_EXTABLE(61b,16b)
  493. _ASM_EXTABLE(12b,16b)
  494. _ASM_EXTABLE(71b,16b)
  495. _ASM_EXTABLE(13b,16b)
  496. _ASM_EXTABLE(81b,16b)
  497. _ASM_EXTABLE(14b,16b)
  498. _ASM_EXTABLE(91b,16b)
  499. _ASM_EXTABLE(6b,9b)
  500. _ASM_EXTABLE(7b,16b)
  501. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  502. : "1"(to), "2"(from), "0"(size)
  503. : "eax", "edx", "memory");
  504. return size;
  505. }
  506. #else
  507. /*
  508. * Leave these declared but undefined. They should not be any references to
  509. * them
  510. */
  511. unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
  512. unsigned long size);
  513. unsigned long __copy_user_intel(void __user *to, const void *from,
  514. unsigned long size);
  515. unsigned long __copy_user_zeroing_intel_nocache(void *to,
  516. const void __user *from, unsigned long size);
  517. #endif /* CONFIG_X86_INTEL_USERCOPY */
  518. /* Generic arbitrary sized copy. */
  519. #define __copy_user(to, from, size) \
  520. do { \
  521. int __d0, __d1, __d2; \
  522. __asm__ __volatile__( \
  523. " cmp $7,%0\n" \
  524. " jbe 1f\n" \
  525. " movl %1,%0\n" \
  526. " negl %0\n" \
  527. " andl $7,%0\n" \
  528. " subl %0,%3\n" \
  529. "4: rep; movsb\n" \
  530. " movl %3,%0\n" \
  531. " shrl $2,%0\n" \
  532. " andl $3,%3\n" \
  533. " .align 2,0x90\n" \
  534. "0: rep; movsl\n" \
  535. " movl %3,%0\n" \
  536. "1: rep; movsb\n" \
  537. "2:\n" \
  538. ".section .fixup,\"ax\"\n" \
  539. "5: addl %3,%0\n" \
  540. " jmp 2b\n" \
  541. "3: lea 0(%3,%0,4),%0\n" \
  542. " jmp 2b\n" \
  543. ".previous\n" \
  544. _ASM_EXTABLE(4b,5b) \
  545. _ASM_EXTABLE(0b,3b) \
  546. _ASM_EXTABLE(1b,2b) \
  547. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  548. : "3"(size), "0"(size), "1"(to), "2"(from) \
  549. : "memory"); \
  550. } while (0)
  551. #define __copy_user_zeroing(to, from, size) \
  552. do { \
  553. int __d0, __d1, __d2; \
  554. __asm__ __volatile__( \
  555. " cmp $7,%0\n" \
  556. " jbe 1f\n" \
  557. " movl %1,%0\n" \
  558. " negl %0\n" \
  559. " andl $7,%0\n" \
  560. " subl %0,%3\n" \
  561. "4: rep; movsb\n" \
  562. " movl %3,%0\n" \
  563. " shrl $2,%0\n" \
  564. " andl $3,%3\n" \
  565. " .align 2,0x90\n" \
  566. "0: rep; movsl\n" \
  567. " movl %3,%0\n" \
  568. "1: rep; movsb\n" \
  569. "2:\n" \
  570. ".section .fixup,\"ax\"\n" \
  571. "5: addl %3,%0\n" \
  572. " jmp 6f\n" \
  573. "3: lea 0(%3,%0,4),%0\n" \
  574. "6: pushl %0\n" \
  575. " pushl %%eax\n" \
  576. " xorl %%eax,%%eax\n" \
  577. " rep; stosb\n" \
  578. " popl %%eax\n" \
  579. " popl %0\n" \
  580. " jmp 2b\n" \
  581. ".previous\n" \
  582. _ASM_EXTABLE(4b,5b) \
  583. _ASM_EXTABLE(0b,3b) \
  584. _ASM_EXTABLE(1b,6b) \
  585. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  586. : "3"(size), "0"(size), "1"(to), "2"(from) \
  587. : "memory"); \
  588. } while (0)
  589. unsigned long __copy_to_user_ll(void __user *to, const void *from,
  590. unsigned long n)
  591. {
  592. #ifndef CONFIG_X86_WP_WORKS_OK
  593. if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
  594. ((unsigned long)to) < TASK_SIZE) {
  595. /*
  596. * When we are in an atomic section (see
  597. * mm/filemap.c:file_read_actor), return the full
  598. * length to take the slow path.
  599. */
  600. if (in_atomic())
  601. return n;
  602. /*
  603. * CPU does not honor the WP bit when writing
  604. * from supervisory mode, and due to preemption or SMP,
  605. * the page tables can change at any time.
  606. * Do it manually. Manfred <manfred@colorfullife.com>
  607. */
  608. while (n) {
  609. unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
  610. unsigned long len = PAGE_SIZE - offset;
  611. int retval;
  612. struct page *pg;
  613. void *maddr;
  614. if (len > n)
  615. len = n;
  616. survive:
  617. down_read(&current->mm->mmap_sem);
  618. retval = get_user_pages(current, current->mm,
  619. (unsigned long)to, 1, 1, 0, &pg, NULL);
  620. if (retval == -ENOMEM && is_global_init(current)) {
  621. up_read(&current->mm->mmap_sem);
  622. congestion_wait(BLK_RW_ASYNC, HZ/50);
  623. goto survive;
  624. }
  625. if (retval != 1) {
  626. up_read(&current->mm->mmap_sem);
  627. break;
  628. }
  629. maddr = kmap_atomic(pg);
  630. memcpy(maddr + offset, from, len);
  631. kunmap_atomic(maddr);
  632. set_page_dirty_lock(pg);
  633. put_page(pg);
  634. up_read(&current->mm->mmap_sem);
  635. from += len;
  636. to += len;
  637. n -= len;
  638. }
  639. return n;
  640. }
  641. #endif
  642. if (movsl_is_ok(to, from, n))
  643. __copy_user(to, from, n);
  644. else
  645. n = __copy_user_intel(to, from, n);
  646. return n;
  647. }
  648. EXPORT_SYMBOL(__copy_to_user_ll);
  649. unsigned long __copy_from_user_ll(void *to, const void __user *from,
  650. unsigned long n)
  651. {
  652. if (movsl_is_ok(to, from, n))
  653. __copy_user_zeroing(to, from, n);
  654. else
  655. n = __copy_user_zeroing_intel(to, from, n);
  656. return n;
  657. }
  658. EXPORT_SYMBOL(__copy_from_user_ll);
  659. unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
  660. unsigned long n)
  661. {
  662. if (movsl_is_ok(to, from, n))
  663. __copy_user(to, from, n);
  664. else
  665. n = __copy_user_intel((void __user *)to,
  666. (const void *)from, n);
  667. return n;
  668. }
  669. EXPORT_SYMBOL(__copy_from_user_ll_nozero);
  670. unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
  671. unsigned long n)
  672. {
  673. #ifdef CONFIG_X86_INTEL_USERCOPY
  674. if (n > 64 && cpu_has_xmm2)
  675. n = __copy_user_zeroing_intel_nocache(to, from, n);
  676. else
  677. __copy_user_zeroing(to, from, n);
  678. #else
  679. __copy_user_zeroing(to, from, n);
  680. #endif
  681. return n;
  682. }
  683. EXPORT_SYMBOL(__copy_from_user_ll_nocache);
  684. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  685. unsigned long n)
  686. {
  687. #ifdef CONFIG_X86_INTEL_USERCOPY
  688. if (n > 64 && cpu_has_xmm2)
  689. n = __copy_user_intel_nocache(to, from, n);
  690. else
  691. __copy_user(to, from, n);
  692. #else
  693. __copy_user(to, from, n);
  694. #endif
  695. return n;
  696. }
  697. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  698. /**
  699. * copy_to_user: - Copy a block of data into user space.
  700. * @to: Destination address, in user space.
  701. * @from: Source address, in kernel space.
  702. * @n: Number of bytes to copy.
  703. *
  704. * Context: User context only. This function may sleep.
  705. *
  706. * Copy data from kernel space to user space.
  707. *
  708. * Returns number of bytes that could not be copied.
  709. * On success, this will be zero.
  710. */
  711. unsigned long
  712. copy_to_user(void __user *to, const void *from, unsigned long n)
  713. {
  714. if (access_ok(VERIFY_WRITE, to, n))
  715. n = __copy_to_user(to, from, n);
  716. return n;
  717. }
  718. EXPORT_SYMBOL(copy_to_user);
  719. /**
  720. * copy_from_user: - Copy a block of data from user space.
  721. * @to: Destination address, in kernel space.
  722. * @from: Source address, in user space.
  723. * @n: Number of bytes to copy.
  724. *
  725. * Context: User context only. This function may sleep.
  726. *
  727. * Copy data from user space to kernel space.
  728. *
  729. * Returns number of bytes that could not be copied.
  730. * On success, this will be zero.
  731. *
  732. * If some data could not be copied, this function will pad the copied
  733. * data to the requested size using zero bytes.
  734. */
  735. unsigned long
  736. _copy_from_user(void *to, const void __user *from, unsigned long n)
  737. {
  738. if (access_ok(VERIFY_READ, from, n))
  739. n = __copy_from_user(to, from, n);
  740. else
  741. memset(to, 0, n);
  742. return n;
  743. }
  744. EXPORT_SYMBOL(_copy_from_user);
  745. void copy_from_user_overflow(void)
  746. {
  747. WARN(1, "Buffer overflow detected!\n");
  748. }
  749. EXPORT_SYMBOL(copy_from_user_overflow);