usercopy_32.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/highmem.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/module.h>
  12. #include <linux/backing-dev.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/mmx.h>
  16. #include <asm/asm.h>
  17. #ifdef CONFIG_X86_INTEL_USERCOPY
  18. /*
  19. * Alignment at which movsl is preferred for bulk memory copies.
  20. */
  21. struct movsl_mask movsl_mask __read_mostly;
  22. #endif
  23. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  24. {
  25. #ifdef CONFIG_X86_INTEL_USERCOPY
  26. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  27. return 0;
  28. #endif
  29. return 1;
  30. }
  31. #define movsl_is_ok(a1, a2, n) \
  32. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  33. /*
  34. * Zero Userspace
  35. */
  36. #define __do_clear_user(addr,size) \
  37. do { \
  38. int __d0; \
  39. might_fault(); \
  40. __asm__ __volatile__( \
  41. "0: rep; stosl\n" \
  42. " movl %2,%0\n" \
  43. "1: rep; stosb\n" \
  44. "2:\n" \
  45. ".section .fixup,\"ax\"\n" \
  46. "3: lea 0(%2,%0,4),%0\n" \
  47. " jmp 2b\n" \
  48. ".previous\n" \
  49. _ASM_EXTABLE(0b,3b) \
  50. _ASM_EXTABLE(1b,2b) \
  51. : "=&c"(size), "=&D" (__d0) \
  52. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  53. } while (0)
  54. /**
  55. * clear_user: - Zero a block of memory in user space.
  56. * @to: Destination address, in user space.
  57. * @n: Number of bytes to zero.
  58. *
  59. * Zero a block of memory in user space.
  60. *
  61. * Returns number of bytes that could not be cleared.
  62. * On success, this will be zero.
  63. */
  64. unsigned long
  65. clear_user(void __user *to, unsigned long n)
  66. {
  67. might_fault();
  68. if (access_ok(VERIFY_WRITE, to, n))
  69. __do_clear_user(to, n);
  70. return n;
  71. }
  72. EXPORT_SYMBOL(clear_user);
  73. /**
  74. * __clear_user: - Zero a block of memory in user space, with less checking.
  75. * @to: Destination address, in user space.
  76. * @n: Number of bytes to zero.
  77. *
  78. * Zero a block of memory in user space. Caller must check
  79. * the specified block with access_ok() before calling this function.
  80. *
  81. * Returns number of bytes that could not be cleared.
  82. * On success, this will be zero.
  83. */
  84. unsigned long
  85. __clear_user(void __user *to, unsigned long n)
  86. {
  87. __do_clear_user(to, n);
  88. return n;
  89. }
  90. EXPORT_SYMBOL(__clear_user);
  91. #ifdef CONFIG_X86_INTEL_USERCOPY
  92. static unsigned long
  93. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  94. {
  95. int d0, d1;
  96. __asm__ __volatile__(
  97. " .align 2,0x90\n"
  98. "1: movl 32(%4), %%eax\n"
  99. " cmpl $67, %0\n"
  100. " jbe 3f\n"
  101. "2: movl 64(%4), %%eax\n"
  102. " .align 2,0x90\n"
  103. "3: movl 0(%4), %%eax\n"
  104. "4: movl 4(%4), %%edx\n"
  105. "5: movl %%eax, 0(%3)\n"
  106. "6: movl %%edx, 4(%3)\n"
  107. "7: movl 8(%4), %%eax\n"
  108. "8: movl 12(%4),%%edx\n"
  109. "9: movl %%eax, 8(%3)\n"
  110. "10: movl %%edx, 12(%3)\n"
  111. "11: movl 16(%4), %%eax\n"
  112. "12: movl 20(%4), %%edx\n"
  113. "13: movl %%eax, 16(%3)\n"
  114. "14: movl %%edx, 20(%3)\n"
  115. "15: movl 24(%4), %%eax\n"
  116. "16: movl 28(%4), %%edx\n"
  117. "17: movl %%eax, 24(%3)\n"
  118. "18: movl %%edx, 28(%3)\n"
  119. "19: movl 32(%4), %%eax\n"
  120. "20: movl 36(%4), %%edx\n"
  121. "21: movl %%eax, 32(%3)\n"
  122. "22: movl %%edx, 36(%3)\n"
  123. "23: movl 40(%4), %%eax\n"
  124. "24: movl 44(%4), %%edx\n"
  125. "25: movl %%eax, 40(%3)\n"
  126. "26: movl %%edx, 44(%3)\n"
  127. "27: movl 48(%4), %%eax\n"
  128. "28: movl 52(%4), %%edx\n"
  129. "29: movl %%eax, 48(%3)\n"
  130. "30: movl %%edx, 52(%3)\n"
  131. "31: movl 56(%4), %%eax\n"
  132. "32: movl 60(%4), %%edx\n"
  133. "33: movl %%eax, 56(%3)\n"
  134. "34: movl %%edx, 60(%3)\n"
  135. " addl $-64, %0\n"
  136. " addl $64, %4\n"
  137. " addl $64, %3\n"
  138. " cmpl $63, %0\n"
  139. " ja 1b\n"
  140. "35: movl %0, %%eax\n"
  141. " shrl $2, %0\n"
  142. " andl $3, %%eax\n"
  143. " cld\n"
  144. "99: rep; movsl\n"
  145. "36: movl %%eax, %0\n"
  146. "37: rep; movsb\n"
  147. "100:\n"
  148. ".section .fixup,\"ax\"\n"
  149. "101: lea 0(%%eax,%0,4),%0\n"
  150. " jmp 100b\n"
  151. ".previous\n"
  152. _ASM_EXTABLE(1b,100b)
  153. _ASM_EXTABLE(2b,100b)
  154. _ASM_EXTABLE(3b,100b)
  155. _ASM_EXTABLE(4b,100b)
  156. _ASM_EXTABLE(5b,100b)
  157. _ASM_EXTABLE(6b,100b)
  158. _ASM_EXTABLE(7b,100b)
  159. _ASM_EXTABLE(8b,100b)
  160. _ASM_EXTABLE(9b,100b)
  161. _ASM_EXTABLE(10b,100b)
  162. _ASM_EXTABLE(11b,100b)
  163. _ASM_EXTABLE(12b,100b)
  164. _ASM_EXTABLE(13b,100b)
  165. _ASM_EXTABLE(14b,100b)
  166. _ASM_EXTABLE(15b,100b)
  167. _ASM_EXTABLE(16b,100b)
  168. _ASM_EXTABLE(17b,100b)
  169. _ASM_EXTABLE(18b,100b)
  170. _ASM_EXTABLE(19b,100b)
  171. _ASM_EXTABLE(20b,100b)
  172. _ASM_EXTABLE(21b,100b)
  173. _ASM_EXTABLE(22b,100b)
  174. _ASM_EXTABLE(23b,100b)
  175. _ASM_EXTABLE(24b,100b)
  176. _ASM_EXTABLE(25b,100b)
  177. _ASM_EXTABLE(26b,100b)
  178. _ASM_EXTABLE(27b,100b)
  179. _ASM_EXTABLE(28b,100b)
  180. _ASM_EXTABLE(29b,100b)
  181. _ASM_EXTABLE(30b,100b)
  182. _ASM_EXTABLE(31b,100b)
  183. _ASM_EXTABLE(32b,100b)
  184. _ASM_EXTABLE(33b,100b)
  185. _ASM_EXTABLE(34b,100b)
  186. _ASM_EXTABLE(35b,100b)
  187. _ASM_EXTABLE(36b,100b)
  188. _ASM_EXTABLE(37b,100b)
  189. _ASM_EXTABLE(99b,101b)
  190. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  191. : "1"(to), "2"(from), "0"(size)
  192. : "eax", "edx", "memory");
  193. return size;
  194. }
  195. static unsigned long
  196. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  197. {
  198. int d0, d1;
  199. __asm__ __volatile__(
  200. " .align 2,0x90\n"
  201. "0: movl 32(%4), %%eax\n"
  202. " cmpl $67, %0\n"
  203. " jbe 2f\n"
  204. "1: movl 64(%4), %%eax\n"
  205. " .align 2,0x90\n"
  206. "2: movl 0(%4), %%eax\n"
  207. "21: movl 4(%4), %%edx\n"
  208. " movl %%eax, 0(%3)\n"
  209. " movl %%edx, 4(%3)\n"
  210. "3: movl 8(%4), %%eax\n"
  211. "31: movl 12(%4),%%edx\n"
  212. " movl %%eax, 8(%3)\n"
  213. " movl %%edx, 12(%3)\n"
  214. "4: movl 16(%4), %%eax\n"
  215. "41: movl 20(%4), %%edx\n"
  216. " movl %%eax, 16(%3)\n"
  217. " movl %%edx, 20(%3)\n"
  218. "10: movl 24(%4), %%eax\n"
  219. "51: movl 28(%4), %%edx\n"
  220. " movl %%eax, 24(%3)\n"
  221. " movl %%edx, 28(%3)\n"
  222. "11: movl 32(%4), %%eax\n"
  223. "61: movl 36(%4), %%edx\n"
  224. " movl %%eax, 32(%3)\n"
  225. " movl %%edx, 36(%3)\n"
  226. "12: movl 40(%4), %%eax\n"
  227. "71: movl 44(%4), %%edx\n"
  228. " movl %%eax, 40(%3)\n"
  229. " movl %%edx, 44(%3)\n"
  230. "13: movl 48(%4), %%eax\n"
  231. "81: movl 52(%4), %%edx\n"
  232. " movl %%eax, 48(%3)\n"
  233. " movl %%edx, 52(%3)\n"
  234. "14: movl 56(%4), %%eax\n"
  235. "91: movl 60(%4), %%edx\n"
  236. " movl %%eax, 56(%3)\n"
  237. " movl %%edx, 60(%3)\n"
  238. " addl $-64, %0\n"
  239. " addl $64, %4\n"
  240. " addl $64, %3\n"
  241. " cmpl $63, %0\n"
  242. " ja 0b\n"
  243. "5: movl %0, %%eax\n"
  244. " shrl $2, %0\n"
  245. " andl $3, %%eax\n"
  246. " cld\n"
  247. "6: rep; movsl\n"
  248. " movl %%eax,%0\n"
  249. "7: rep; movsb\n"
  250. "8:\n"
  251. ".section .fixup,\"ax\"\n"
  252. "9: lea 0(%%eax,%0,4),%0\n"
  253. "16: pushl %0\n"
  254. " pushl %%eax\n"
  255. " xorl %%eax,%%eax\n"
  256. " rep; stosb\n"
  257. " popl %%eax\n"
  258. " popl %0\n"
  259. " jmp 8b\n"
  260. ".previous\n"
  261. _ASM_EXTABLE(0b,16b)
  262. _ASM_EXTABLE(1b,16b)
  263. _ASM_EXTABLE(2b,16b)
  264. _ASM_EXTABLE(21b,16b)
  265. _ASM_EXTABLE(3b,16b)
  266. _ASM_EXTABLE(31b,16b)
  267. _ASM_EXTABLE(4b,16b)
  268. _ASM_EXTABLE(41b,16b)
  269. _ASM_EXTABLE(10b,16b)
  270. _ASM_EXTABLE(51b,16b)
  271. _ASM_EXTABLE(11b,16b)
  272. _ASM_EXTABLE(61b,16b)
  273. _ASM_EXTABLE(12b,16b)
  274. _ASM_EXTABLE(71b,16b)
  275. _ASM_EXTABLE(13b,16b)
  276. _ASM_EXTABLE(81b,16b)
  277. _ASM_EXTABLE(14b,16b)
  278. _ASM_EXTABLE(91b,16b)
  279. _ASM_EXTABLE(6b,9b)
  280. _ASM_EXTABLE(7b,16b)
  281. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  282. : "1"(to), "2"(from), "0"(size)
  283. : "eax", "edx", "memory");
  284. return size;
  285. }
  286. /*
  287. * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
  288. * hyoshiok@miraclelinux.com
  289. */
  290. static unsigned long __copy_user_zeroing_intel_nocache(void *to,
  291. const void __user *from, unsigned long size)
  292. {
  293. int d0, d1;
  294. __asm__ __volatile__(
  295. " .align 2,0x90\n"
  296. "0: movl 32(%4), %%eax\n"
  297. " cmpl $67, %0\n"
  298. " jbe 2f\n"
  299. "1: movl 64(%4), %%eax\n"
  300. " .align 2,0x90\n"
  301. "2: movl 0(%4), %%eax\n"
  302. "21: movl 4(%4), %%edx\n"
  303. " movnti %%eax, 0(%3)\n"
  304. " movnti %%edx, 4(%3)\n"
  305. "3: movl 8(%4), %%eax\n"
  306. "31: movl 12(%4),%%edx\n"
  307. " movnti %%eax, 8(%3)\n"
  308. " movnti %%edx, 12(%3)\n"
  309. "4: movl 16(%4), %%eax\n"
  310. "41: movl 20(%4), %%edx\n"
  311. " movnti %%eax, 16(%3)\n"
  312. " movnti %%edx, 20(%3)\n"
  313. "10: movl 24(%4), %%eax\n"
  314. "51: movl 28(%4), %%edx\n"
  315. " movnti %%eax, 24(%3)\n"
  316. " movnti %%edx, 28(%3)\n"
  317. "11: movl 32(%4), %%eax\n"
  318. "61: movl 36(%4), %%edx\n"
  319. " movnti %%eax, 32(%3)\n"
  320. " movnti %%edx, 36(%3)\n"
  321. "12: movl 40(%4), %%eax\n"
  322. "71: movl 44(%4), %%edx\n"
  323. " movnti %%eax, 40(%3)\n"
  324. " movnti %%edx, 44(%3)\n"
  325. "13: movl 48(%4), %%eax\n"
  326. "81: movl 52(%4), %%edx\n"
  327. " movnti %%eax, 48(%3)\n"
  328. " movnti %%edx, 52(%3)\n"
  329. "14: movl 56(%4), %%eax\n"
  330. "91: movl 60(%4), %%edx\n"
  331. " movnti %%eax, 56(%3)\n"
  332. " movnti %%edx, 60(%3)\n"
  333. " addl $-64, %0\n"
  334. " addl $64, %4\n"
  335. " addl $64, %3\n"
  336. " cmpl $63, %0\n"
  337. " ja 0b\n"
  338. " sfence \n"
  339. "5: movl %0, %%eax\n"
  340. " shrl $2, %0\n"
  341. " andl $3, %%eax\n"
  342. " cld\n"
  343. "6: rep; movsl\n"
  344. " movl %%eax,%0\n"
  345. "7: rep; movsb\n"
  346. "8:\n"
  347. ".section .fixup,\"ax\"\n"
  348. "9: lea 0(%%eax,%0,4),%0\n"
  349. "16: pushl %0\n"
  350. " pushl %%eax\n"
  351. " xorl %%eax,%%eax\n"
  352. " rep; stosb\n"
  353. " popl %%eax\n"
  354. " popl %0\n"
  355. " jmp 8b\n"
  356. ".previous\n"
  357. _ASM_EXTABLE(0b,16b)
  358. _ASM_EXTABLE(1b,16b)
  359. _ASM_EXTABLE(2b,16b)
  360. _ASM_EXTABLE(21b,16b)
  361. _ASM_EXTABLE(3b,16b)
  362. _ASM_EXTABLE(31b,16b)
  363. _ASM_EXTABLE(4b,16b)
  364. _ASM_EXTABLE(41b,16b)
  365. _ASM_EXTABLE(10b,16b)
  366. _ASM_EXTABLE(51b,16b)
  367. _ASM_EXTABLE(11b,16b)
  368. _ASM_EXTABLE(61b,16b)
  369. _ASM_EXTABLE(12b,16b)
  370. _ASM_EXTABLE(71b,16b)
  371. _ASM_EXTABLE(13b,16b)
  372. _ASM_EXTABLE(81b,16b)
  373. _ASM_EXTABLE(14b,16b)
  374. _ASM_EXTABLE(91b,16b)
  375. _ASM_EXTABLE(6b,9b)
  376. _ASM_EXTABLE(7b,16b)
  377. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  378. : "1"(to), "2"(from), "0"(size)
  379. : "eax", "edx", "memory");
  380. return size;
  381. }
  382. static unsigned long __copy_user_intel_nocache(void *to,
  383. const void __user *from, unsigned long size)
  384. {
  385. int d0, d1;
  386. __asm__ __volatile__(
  387. " .align 2,0x90\n"
  388. "0: movl 32(%4), %%eax\n"
  389. " cmpl $67, %0\n"
  390. " jbe 2f\n"
  391. "1: movl 64(%4), %%eax\n"
  392. " .align 2,0x90\n"
  393. "2: movl 0(%4), %%eax\n"
  394. "21: movl 4(%4), %%edx\n"
  395. " movnti %%eax, 0(%3)\n"
  396. " movnti %%edx, 4(%3)\n"
  397. "3: movl 8(%4), %%eax\n"
  398. "31: movl 12(%4),%%edx\n"
  399. " movnti %%eax, 8(%3)\n"
  400. " movnti %%edx, 12(%3)\n"
  401. "4: movl 16(%4), %%eax\n"
  402. "41: movl 20(%4), %%edx\n"
  403. " movnti %%eax, 16(%3)\n"
  404. " movnti %%edx, 20(%3)\n"
  405. "10: movl 24(%4), %%eax\n"
  406. "51: movl 28(%4), %%edx\n"
  407. " movnti %%eax, 24(%3)\n"
  408. " movnti %%edx, 28(%3)\n"
  409. "11: movl 32(%4), %%eax\n"
  410. "61: movl 36(%4), %%edx\n"
  411. " movnti %%eax, 32(%3)\n"
  412. " movnti %%edx, 36(%3)\n"
  413. "12: movl 40(%4), %%eax\n"
  414. "71: movl 44(%4), %%edx\n"
  415. " movnti %%eax, 40(%3)\n"
  416. " movnti %%edx, 44(%3)\n"
  417. "13: movl 48(%4), %%eax\n"
  418. "81: movl 52(%4), %%edx\n"
  419. " movnti %%eax, 48(%3)\n"
  420. " movnti %%edx, 52(%3)\n"
  421. "14: movl 56(%4), %%eax\n"
  422. "91: movl 60(%4), %%edx\n"
  423. " movnti %%eax, 56(%3)\n"
  424. " movnti %%edx, 60(%3)\n"
  425. " addl $-64, %0\n"
  426. " addl $64, %4\n"
  427. " addl $64, %3\n"
  428. " cmpl $63, %0\n"
  429. " ja 0b\n"
  430. " sfence \n"
  431. "5: movl %0, %%eax\n"
  432. " shrl $2, %0\n"
  433. " andl $3, %%eax\n"
  434. " cld\n"
  435. "6: rep; movsl\n"
  436. " movl %%eax,%0\n"
  437. "7: rep; movsb\n"
  438. "8:\n"
  439. ".section .fixup,\"ax\"\n"
  440. "9: lea 0(%%eax,%0,4),%0\n"
  441. "16: jmp 8b\n"
  442. ".previous\n"
  443. _ASM_EXTABLE(0b,16b)
  444. _ASM_EXTABLE(1b,16b)
  445. _ASM_EXTABLE(2b,16b)
  446. _ASM_EXTABLE(21b,16b)
  447. _ASM_EXTABLE(3b,16b)
  448. _ASM_EXTABLE(31b,16b)
  449. _ASM_EXTABLE(4b,16b)
  450. _ASM_EXTABLE(41b,16b)
  451. _ASM_EXTABLE(10b,16b)
  452. _ASM_EXTABLE(51b,16b)
  453. _ASM_EXTABLE(11b,16b)
  454. _ASM_EXTABLE(61b,16b)
  455. _ASM_EXTABLE(12b,16b)
  456. _ASM_EXTABLE(71b,16b)
  457. _ASM_EXTABLE(13b,16b)
  458. _ASM_EXTABLE(81b,16b)
  459. _ASM_EXTABLE(14b,16b)
  460. _ASM_EXTABLE(91b,16b)
  461. _ASM_EXTABLE(6b,9b)
  462. _ASM_EXTABLE(7b,16b)
  463. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  464. : "1"(to), "2"(from), "0"(size)
  465. : "eax", "edx", "memory");
  466. return size;
  467. }
  468. #else
  469. /*
  470. * Leave these declared but undefined. They should not be any references to
  471. * them
  472. */
  473. unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
  474. unsigned long size);
  475. unsigned long __copy_user_intel(void __user *to, const void *from,
  476. unsigned long size);
  477. unsigned long __copy_user_zeroing_intel_nocache(void *to,
  478. const void __user *from, unsigned long size);
  479. #endif /* CONFIG_X86_INTEL_USERCOPY */
  480. /* Generic arbitrary sized copy. */
  481. #define __copy_user(to, from, size) \
  482. do { \
  483. int __d0, __d1, __d2; \
  484. __asm__ __volatile__( \
  485. " cmp $7,%0\n" \
  486. " jbe 1f\n" \
  487. " movl %1,%0\n" \
  488. " negl %0\n" \
  489. " andl $7,%0\n" \
  490. " subl %0,%3\n" \
  491. "4: rep; movsb\n" \
  492. " movl %3,%0\n" \
  493. " shrl $2,%0\n" \
  494. " andl $3,%3\n" \
  495. " .align 2,0x90\n" \
  496. "0: rep; movsl\n" \
  497. " movl %3,%0\n" \
  498. "1: rep; movsb\n" \
  499. "2:\n" \
  500. ".section .fixup,\"ax\"\n" \
  501. "5: addl %3,%0\n" \
  502. " jmp 2b\n" \
  503. "3: lea 0(%3,%0,4),%0\n" \
  504. " jmp 2b\n" \
  505. ".previous\n" \
  506. _ASM_EXTABLE(4b,5b) \
  507. _ASM_EXTABLE(0b,3b) \
  508. _ASM_EXTABLE(1b,2b) \
  509. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  510. : "3"(size), "0"(size), "1"(to), "2"(from) \
  511. : "memory"); \
  512. } while (0)
  513. #define __copy_user_zeroing(to, from, size) \
  514. do { \
  515. int __d0, __d1, __d2; \
  516. __asm__ __volatile__( \
  517. " cmp $7,%0\n" \
  518. " jbe 1f\n" \
  519. " movl %1,%0\n" \
  520. " negl %0\n" \
  521. " andl $7,%0\n" \
  522. " subl %0,%3\n" \
  523. "4: rep; movsb\n" \
  524. " movl %3,%0\n" \
  525. " shrl $2,%0\n" \
  526. " andl $3,%3\n" \
  527. " .align 2,0x90\n" \
  528. "0: rep; movsl\n" \
  529. " movl %3,%0\n" \
  530. "1: rep; movsb\n" \
  531. "2:\n" \
  532. ".section .fixup,\"ax\"\n" \
  533. "5: addl %3,%0\n" \
  534. " jmp 6f\n" \
  535. "3: lea 0(%3,%0,4),%0\n" \
  536. "6: pushl %0\n" \
  537. " pushl %%eax\n" \
  538. " xorl %%eax,%%eax\n" \
  539. " rep; stosb\n" \
  540. " popl %%eax\n" \
  541. " popl %0\n" \
  542. " jmp 2b\n" \
  543. ".previous\n" \
  544. _ASM_EXTABLE(4b,5b) \
  545. _ASM_EXTABLE(0b,3b) \
  546. _ASM_EXTABLE(1b,6b) \
  547. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  548. : "3"(size), "0"(size), "1"(to), "2"(from) \
  549. : "memory"); \
  550. } while (0)
  551. unsigned long __copy_to_user_ll(void __user *to, const void *from,
  552. unsigned long n)
  553. {
  554. #ifndef CONFIG_X86_WP_WORKS_OK
  555. if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
  556. ((unsigned long)to) < TASK_SIZE) {
  557. /*
  558. * When we are in an atomic section (see
  559. * mm/filemap.c:file_read_actor), return the full
  560. * length to take the slow path.
  561. */
  562. if (in_atomic())
  563. return n;
  564. /*
  565. * CPU does not honor the WP bit when writing
  566. * from supervisory mode, and due to preemption or SMP,
  567. * the page tables can change at any time.
  568. * Do it manually. Manfred <manfred@colorfullife.com>
  569. */
  570. while (n) {
  571. unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
  572. unsigned long len = PAGE_SIZE - offset;
  573. int retval;
  574. struct page *pg;
  575. void *maddr;
  576. if (len > n)
  577. len = n;
  578. survive:
  579. down_read(&current->mm->mmap_sem);
  580. retval = get_user_pages(current, current->mm,
  581. (unsigned long)to, 1, 1, 0, &pg, NULL);
  582. if (retval == -ENOMEM && is_global_init(current)) {
  583. up_read(&current->mm->mmap_sem);
  584. congestion_wait(BLK_RW_ASYNC, HZ/50);
  585. goto survive;
  586. }
  587. if (retval != 1) {
  588. up_read(&current->mm->mmap_sem);
  589. break;
  590. }
  591. maddr = kmap_atomic(pg);
  592. memcpy(maddr + offset, from, len);
  593. kunmap_atomic(maddr);
  594. set_page_dirty_lock(pg);
  595. put_page(pg);
  596. up_read(&current->mm->mmap_sem);
  597. from += len;
  598. to += len;
  599. n -= len;
  600. }
  601. return n;
  602. }
  603. #endif
  604. if (movsl_is_ok(to, from, n))
  605. __copy_user(to, from, n);
  606. else
  607. n = __copy_user_intel(to, from, n);
  608. return n;
  609. }
  610. EXPORT_SYMBOL(__copy_to_user_ll);
  611. unsigned long __copy_from_user_ll(void *to, const void __user *from,
  612. unsigned long n)
  613. {
  614. if (movsl_is_ok(to, from, n))
  615. __copy_user_zeroing(to, from, n);
  616. else
  617. n = __copy_user_zeroing_intel(to, from, n);
  618. return n;
  619. }
  620. EXPORT_SYMBOL(__copy_from_user_ll);
  621. unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
  622. unsigned long n)
  623. {
  624. if (movsl_is_ok(to, from, n))
  625. __copy_user(to, from, n);
  626. else
  627. n = __copy_user_intel((void __user *)to,
  628. (const void *)from, n);
  629. return n;
  630. }
  631. EXPORT_SYMBOL(__copy_from_user_ll_nozero);
  632. unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
  633. unsigned long n)
  634. {
  635. #ifdef CONFIG_X86_INTEL_USERCOPY
  636. if (n > 64 && cpu_has_xmm2)
  637. n = __copy_user_zeroing_intel_nocache(to, from, n);
  638. else
  639. __copy_user_zeroing(to, from, n);
  640. #else
  641. __copy_user_zeroing(to, from, n);
  642. #endif
  643. return n;
  644. }
  645. EXPORT_SYMBOL(__copy_from_user_ll_nocache);
  646. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  647. unsigned long n)
  648. {
  649. #ifdef CONFIG_X86_INTEL_USERCOPY
  650. if (n > 64 && cpu_has_xmm2)
  651. n = __copy_user_intel_nocache(to, from, n);
  652. else
  653. __copy_user(to, from, n);
  654. #else
  655. __copy_user(to, from, n);
  656. #endif
  657. return n;
  658. }
  659. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  660. /**
  661. * copy_to_user: - Copy a block of data into user space.
  662. * @to: Destination address, in user space.
  663. * @from: Source address, in kernel space.
  664. * @n: Number of bytes to copy.
  665. *
  666. * Context: User context only. This function may sleep.
  667. *
  668. * Copy data from kernel space to user space.
  669. *
  670. * Returns number of bytes that could not be copied.
  671. * On success, this will be zero.
  672. */
  673. unsigned long
  674. copy_to_user(void __user *to, const void *from, unsigned long n)
  675. {
  676. if (access_ok(VERIFY_WRITE, to, n))
  677. n = __copy_to_user(to, from, n);
  678. return n;
  679. }
  680. EXPORT_SYMBOL(copy_to_user);
  681. /**
  682. * copy_from_user: - Copy a block of data from user space.
  683. * @to: Destination address, in kernel space.
  684. * @from: Source address, in user space.
  685. * @n: Number of bytes to copy.
  686. *
  687. * Context: User context only. This function may sleep.
  688. *
  689. * Copy data from user space to kernel space.
  690. *
  691. * Returns number of bytes that could not be copied.
  692. * On success, this will be zero.
  693. *
  694. * If some data could not be copied, this function will pad the copied
  695. * data to the requested size using zero bytes.
  696. */
  697. unsigned long
  698. _copy_from_user(void *to, const void __user *from, unsigned long n)
  699. {
  700. if (access_ok(VERIFY_READ, from, n))
  701. n = __copy_from_user(to, from, n);
  702. else
  703. memset(to, 0, n);
  704. return n;
  705. }
  706. EXPORT_SYMBOL(_copy_from_user);
  707. void copy_from_user_overflow(void)
  708. {
  709. WARN(1, "Buffer overflow detected!\n");
  710. }
  711. EXPORT_SYMBOL(copy_from_user_overflow);