memcpy.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Optimized memory copy routines.
  3. *
  4. * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2, or (at your option)
  9. * any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. *
  20. * Portions derived from the GNU C Library
  21. * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
  22. *
  23. * Several strategies are tried to try to get the best performance for various
  24. * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
  25. * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
  26. * general registers. Unaligned copies are handled either by aligning the
  27. * destination and then using shift-and-write method, or in a few cases by
  28. * falling back to a byte-at-a-time copy.
  29. *
  30. * I chose to implement this in C because it is easier to maintain and debug,
  31. * and in my experiments it appears that the C code generated by gcc (3.3/3.4
  32. * at the time of writing) is fairly optimal. Unfortunately some of the
  33. * semantics of the copy routine (exception handling) is difficult to express
  34. * in C, so we have to play some tricks to get it to work.
  35. *
  36. * All the loads and stores are done via explicit asm() code in order to use
  37. * the right space registers.
  38. *
  39. * Testing with various alignments and buffer sizes shows that this code is
  40. * often >10x faster than a simple byte-at-a-time copy, even for strangely
  41. * aligned operands. It is interesting to note that the glibc version
  42. * of memcpy (written in C) is actually quite fast already. This routine is
  43. * able to beat it by 30-40% for aligned copies because of the loop unrolling,
  44. * but in some cases the glibc version is still slightly faster. This lends
  45. * more credibility that gcc can generate very good code as long as we are
  46. * careful.
  47. *
  48. * TODO:
  49. * - cache prefetching needs more experimentation to get optimal settings
  50. * - try not to use the post-increment address modifiers; they create additional
  51. * interlocks
  52. * - replace byte-copy loops with stybs sequences
  53. */
  54. #ifdef __KERNEL__
  55. #include <linux/module.h>
  56. #include <linux/compiler.h>
  57. #include <asm/uaccess.h>
  58. #define s_space "%%sr1"
  59. #define d_space "%%sr2"
  60. #else
  61. #include "memcpy.h"
  62. #define s_space "%%sr0"
  63. #define d_space "%%sr0"
  64. #define pa_memcpy new2_copy
  65. #endif
  66. DECLARE_PER_CPU(struct exception_data, exception_data);
  67. #define preserve_branch(label) do { \
  68. volatile int dummy; \
  69. /* The following branch is never taken, it's just here to */ \
  70. /* prevent gcc from optimizing away our exception code. */ \
  71. if (unlikely(dummy != dummy)) \
  72. goto label; \
  73. } while (0)
  74. #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
  75. #define get_kernel_space() (0)
  76. #define MERGE(w0, sh_1, w1, sh_2) ({ \
  77. unsigned int _r; \
  78. asm volatile ( \
  79. "mtsar %3\n" \
  80. "shrpw %1, %2, %%sar, %0\n" \
  81. : "=r"(_r) \
  82. : "r"(w0), "r"(w1), "r"(sh_2) \
  83. ); \
  84. _r; \
  85. })
  86. #define THRESHOLD 16
  87. #ifdef DEBUG_MEMCPY
  88. #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
  89. #else
  90. #define DPRINTF(fmt, args...)
  91. #endif
  92. #ifndef __LP64__
  93. #define EXC_WORD ".word"
  94. #else
  95. #define EXC_WORD ".dword"
  96. #endif
  97. #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
  98. __asm__ __volatile__ ( \
  99. "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n" \
  100. "\t.section __ex_table,\"aw\"\n" \
  101. "\t" EXC_WORD "\t1b\n" \
  102. "\t" EXC_WORD "\t" #_e "\n" \
  103. "\t.previous\n" \
  104. : _tt(_t), "+r"(_a) \
  105. : \
  106. : "r8")
  107. #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
  108. __asm__ __volatile__ ( \
  109. "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n" \
  110. "\t.section __ex_table,\"aw\"\n" \
  111. "\t" EXC_WORD "\t1b\n" \
  112. "\t" EXC_WORD "\t" #_e "\n" \
  113. "\t.previous\n" \
  114. : "+r"(_a) \
  115. : _tt(_t) \
  116. : "r8")
  117. #define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
  118. #define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
  119. #define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
  120. #define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
  121. #define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
  122. #define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
  123. #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
  124. __asm__ __volatile__ ( \
  125. "1:\t" #_insn " " #_o "(" _s ",%1), %0\n" \
  126. "\t.section __ex_table,\"aw\"\n" \
  127. "\t" EXC_WORD "\t1b\n" \
  128. "\t" EXC_WORD "\t" #_e "\n" \
  129. "\t.previous\n" \
  130. : _tt(_t) \
  131. : "r"(_a) \
  132. : "r8")
  133. #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
  134. __asm__ __volatile__ ( \
  135. "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n" \
  136. "\t.section __ex_table,\"aw\"\n" \
  137. "\t" EXC_WORD "\t1b\n" \
  138. "\t" EXC_WORD "\t" #_e "\n" \
  139. "\t.previous\n" \
  140. : \
  141. : _tt(_t), "r"(_a) \
  142. : "r8")
  143. #define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
  144. #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
  145. #ifdef CONFIG_PREFETCH
  146. extern inline void prefetch_src(const void *addr)
  147. {
  148. __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
  149. }
  150. extern inline void prefetch_dst(const void *addr)
  151. {
  152. __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
  153. }
  154. #else
  155. #define prefetch_src(addr)
  156. #define prefetch_dst(addr)
  157. #endif
  158. /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
  159. * per loop. This code is derived from glibc.
  160. */
  161. static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
  162. {
  163. /* gcc complains that a2 and a3 may be uninitialized, but actually
  164. * they cannot be. Initialize a2/a3 to shut gcc up.
  165. */
  166. register unsigned int a0, a1, a2 = 0, a3 = 0;
  167. int sh_1, sh_2;
  168. struct exception_data *d;
  169. /* prefetch_src((const void *)src); */
  170. /* Calculate how to shift a word read at the memory operation
  171. aligned srcp to make it aligned for copy. */
  172. sh_1 = 8 * (src % sizeof(unsigned int));
  173. sh_2 = 8 * sizeof(unsigned int) - sh_1;
  174. /* Make src aligned by rounding it down. */
  175. src &= -sizeof(unsigned int);
  176. switch (len % 4)
  177. {
  178. case 2:
  179. /* a1 = ((unsigned int *) src)[0];
  180. a2 = ((unsigned int *) src)[1]; */
  181. ldw(s_space, 0, src, a1, cda_ldw_exc);
  182. ldw(s_space, 4, src, a2, cda_ldw_exc);
  183. src -= 1 * sizeof(unsigned int);
  184. dst -= 3 * sizeof(unsigned int);
  185. len += 2;
  186. goto do1;
  187. case 3:
  188. /* a0 = ((unsigned int *) src)[0];
  189. a1 = ((unsigned int *) src)[1]; */
  190. ldw(s_space, 0, src, a0, cda_ldw_exc);
  191. ldw(s_space, 4, src, a1, cda_ldw_exc);
  192. src -= 0 * sizeof(unsigned int);
  193. dst -= 2 * sizeof(unsigned int);
  194. len += 1;
  195. goto do2;
  196. case 0:
  197. if (len == 0)
  198. return 0;
  199. /* a3 = ((unsigned int *) src)[0];
  200. a0 = ((unsigned int *) src)[1]; */
  201. ldw(s_space, 0, src, a3, cda_ldw_exc);
  202. ldw(s_space, 4, src, a0, cda_ldw_exc);
  203. src -=-1 * sizeof(unsigned int);
  204. dst -= 1 * sizeof(unsigned int);
  205. len += 0;
  206. goto do3;
  207. case 1:
  208. /* a2 = ((unsigned int *) src)[0];
  209. a3 = ((unsigned int *) src)[1]; */
  210. ldw(s_space, 0, src, a2, cda_ldw_exc);
  211. ldw(s_space, 4, src, a3, cda_ldw_exc);
  212. src -=-2 * sizeof(unsigned int);
  213. dst -= 0 * sizeof(unsigned int);
  214. len -= 1;
  215. if (len == 0)
  216. goto do0;
  217. goto do4; /* No-op. */
  218. }
  219. do
  220. {
  221. /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
  222. do4:
  223. /* a0 = ((unsigned int *) src)[0]; */
  224. ldw(s_space, 0, src, a0, cda_ldw_exc);
  225. /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
  226. stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
  227. do3:
  228. /* a1 = ((unsigned int *) src)[1]; */
  229. ldw(s_space, 4, src, a1, cda_ldw_exc);
  230. /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
  231. stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
  232. do2:
  233. /* a2 = ((unsigned int *) src)[2]; */
  234. ldw(s_space, 8, src, a2, cda_ldw_exc);
  235. /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
  236. stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
  237. do1:
  238. /* a3 = ((unsigned int *) src)[3]; */
  239. ldw(s_space, 12, src, a3, cda_ldw_exc);
  240. /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
  241. stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
  242. src += 4 * sizeof(unsigned int);
  243. dst += 4 * sizeof(unsigned int);
  244. len -= 4;
  245. }
  246. while (len != 0);
  247. do0:
  248. /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
  249. stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
  250. preserve_branch(handle_load_error);
  251. preserve_branch(handle_store_error);
  252. return 0;
  253. handle_load_error:
  254. __asm__ __volatile__ ("cda_ldw_exc:\n");
  255. d = &__get_cpu_var(exception_data);
  256. DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
  257. o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
  258. return o_len * 4 - d->fault_addr + o_src;
  259. handle_store_error:
  260. __asm__ __volatile__ ("cda_stw_exc:\n");
  261. d = &__get_cpu_var(exception_data);
  262. DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
  263. o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
  264. return o_len * 4 - d->fault_addr + o_dst;
  265. }
  266. /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
  267. unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
  268. {
  269. register unsigned long src, dst, t1, t2, t3;
  270. register unsigned char *pcs, *pcd;
  271. register unsigned int *pws, *pwd;
  272. register double *pds, *pdd;
  273. unsigned long ret = 0;
  274. unsigned long o_dst, o_src, o_len;
  275. struct exception_data *d;
  276. src = (unsigned long)srcp;
  277. dst = (unsigned long)dstp;
  278. pcs = (unsigned char *)srcp;
  279. pcd = (unsigned char *)dstp;
  280. o_dst = dst; o_src = src; o_len = len;
  281. /* prefetch_src((const void *)srcp); */
  282. if (len < THRESHOLD)
  283. goto byte_copy;
  284. /* Check alignment */
  285. t1 = (src ^ dst);
  286. if (unlikely(t1 & (sizeof(double)-1)))
  287. goto unaligned_copy;
  288. /* src and dst have same alignment. */
  289. /* Copy bytes till we are double-aligned. */
  290. t2 = src & (sizeof(double) - 1);
  291. if (unlikely(t2 != 0)) {
  292. t2 = sizeof(double) - t2;
  293. while (t2 && len) {
  294. /* *pcd++ = *pcs++; */
  295. ldbma(s_space, pcs, t3, pmc_load_exc);
  296. len--;
  297. stbma(d_space, t3, pcd, pmc_store_exc);
  298. t2--;
  299. }
  300. }
  301. pds = (double *)pcs;
  302. pdd = (double *)pcd;
  303. #if 0
  304. /* Copy 8 doubles at a time */
  305. while (len >= 8*sizeof(double)) {
  306. register double r1, r2, r3, r4, r5, r6, r7, r8;
  307. /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
  308. flddma(s_space, pds, r1, pmc_load_exc);
  309. flddma(s_space, pds, r2, pmc_load_exc);
  310. flddma(s_space, pds, r3, pmc_load_exc);
  311. flddma(s_space, pds, r4, pmc_load_exc);
  312. fstdma(d_space, r1, pdd, pmc_store_exc);
  313. fstdma(d_space, r2, pdd, pmc_store_exc);
  314. fstdma(d_space, r3, pdd, pmc_store_exc);
  315. fstdma(d_space, r4, pdd, pmc_store_exc);
  316. #if 0
  317. if (L1_CACHE_BYTES <= 32)
  318. prefetch_src((char *)pds + L1_CACHE_BYTES);
  319. #endif
  320. flddma(s_space, pds, r5, pmc_load_exc);
  321. flddma(s_space, pds, r6, pmc_load_exc);
  322. flddma(s_space, pds, r7, pmc_load_exc);
  323. flddma(s_space, pds, r8, pmc_load_exc);
  324. fstdma(d_space, r5, pdd, pmc_store_exc);
  325. fstdma(d_space, r6, pdd, pmc_store_exc);
  326. fstdma(d_space, r7, pdd, pmc_store_exc);
  327. fstdma(d_space, r8, pdd, pmc_store_exc);
  328. len -= 8*sizeof(double);
  329. }
  330. #endif
  331. pws = (unsigned int *)pds;
  332. pwd = (unsigned int *)pdd;
  333. word_copy:
  334. while (len >= 8*sizeof(unsigned int)) {
  335. register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
  336. /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
  337. ldwma(s_space, pws, r1, pmc_load_exc);
  338. ldwma(s_space, pws, r2, pmc_load_exc);
  339. ldwma(s_space, pws, r3, pmc_load_exc);
  340. ldwma(s_space, pws, r4, pmc_load_exc);
  341. stwma(d_space, r1, pwd, pmc_store_exc);
  342. stwma(d_space, r2, pwd, pmc_store_exc);
  343. stwma(d_space, r3, pwd, pmc_store_exc);
  344. stwma(d_space, r4, pwd, pmc_store_exc);
  345. ldwma(s_space, pws, r5, pmc_load_exc);
  346. ldwma(s_space, pws, r6, pmc_load_exc);
  347. ldwma(s_space, pws, r7, pmc_load_exc);
  348. ldwma(s_space, pws, r8, pmc_load_exc);
  349. stwma(d_space, r5, pwd, pmc_store_exc);
  350. stwma(d_space, r6, pwd, pmc_store_exc);
  351. stwma(d_space, r7, pwd, pmc_store_exc);
  352. stwma(d_space, r8, pwd, pmc_store_exc);
  353. len -= 8*sizeof(unsigned int);
  354. }
  355. while (len >= 4*sizeof(unsigned int)) {
  356. register unsigned int r1,r2,r3,r4;
  357. ldwma(s_space, pws, r1, pmc_load_exc);
  358. ldwma(s_space, pws, r2, pmc_load_exc);
  359. ldwma(s_space, pws, r3, pmc_load_exc);
  360. ldwma(s_space, pws, r4, pmc_load_exc);
  361. stwma(d_space, r1, pwd, pmc_store_exc);
  362. stwma(d_space, r2, pwd, pmc_store_exc);
  363. stwma(d_space, r3, pwd, pmc_store_exc);
  364. stwma(d_space, r4, pwd, pmc_store_exc);
  365. len -= 4*sizeof(unsigned int);
  366. }
  367. pcs = (unsigned char *)pws;
  368. pcd = (unsigned char *)pwd;
  369. byte_copy:
  370. while (len) {
  371. /* *pcd++ = *pcs++; */
  372. ldbma(s_space, pcs, t3, pmc_load_exc);
  373. stbma(d_space, t3, pcd, pmc_store_exc);
  374. len--;
  375. }
  376. return 0;
  377. unaligned_copy:
  378. /* possibly we are aligned on a word, but not on a double... */
  379. if (likely(t1 & (sizeof(unsigned int)-1)) == 0) {
  380. t2 = src & (sizeof(unsigned int) - 1);
  381. if (unlikely(t2 != 0)) {
  382. t2 = sizeof(unsigned int) - t2;
  383. while (t2) {
  384. /* *pcd++ = *pcs++; */
  385. ldbma(s_space, pcs, t3, pmc_load_exc);
  386. stbma(d_space, t3, pcd, pmc_store_exc);
  387. len--;
  388. t2--;
  389. }
  390. }
  391. pws = (unsigned int *)pcs;
  392. pwd = (unsigned int *)pcd;
  393. goto word_copy;
  394. }
  395. /* Align the destination. */
  396. if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
  397. t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
  398. while (t2) {
  399. /* *pcd++ = *pcs++; */
  400. ldbma(s_space, pcs, t3, pmc_load_exc);
  401. stbma(d_space, t3, pcd, pmc_store_exc);
  402. len--;
  403. t2--;
  404. }
  405. dst = (unsigned long)pcd;
  406. src = (unsigned long)pcs;
  407. }
  408. ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
  409. o_dst, o_src, o_len);
  410. if (ret)
  411. return ret;
  412. pcs += (len & -sizeof(unsigned int));
  413. pcd += (len & -sizeof(unsigned int));
  414. len %= sizeof(unsigned int);
  415. preserve_branch(handle_load_error);
  416. preserve_branch(handle_store_error);
  417. goto byte_copy;
  418. handle_load_error:
  419. __asm__ __volatile__ ("pmc_load_exc:\n");
  420. d = &__get_cpu_var(exception_data);
  421. DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
  422. o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
  423. return o_len - d->fault_addr + o_src;
  424. handle_store_error:
  425. __asm__ __volatile__ ("pmc_store_exc:\n");
  426. d = &__get_cpu_var(exception_data);
  427. DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
  428. o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
  429. return o_len - d->fault_addr + o_dst;
  430. }
  431. #ifdef __KERNEL__
  432. unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
  433. {
  434. mtsp(get_kernel_space(), 1);
  435. mtsp(get_user_space(), 2);
  436. return pa_memcpy((void __force *)dst, src, len);
  437. }
  438. unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len)
  439. {
  440. mtsp(get_user_space(), 1);
  441. mtsp(get_kernel_space(), 2);
  442. return pa_memcpy(dst, (void __force *)src, len);
  443. }
  444. unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
  445. {
  446. mtsp(get_user_space(), 1);
  447. mtsp(get_user_space(), 2);
  448. return pa_memcpy((void __force *)dst, (void __force *)src, len);
  449. }
  450. void * memcpy(void * dst,const void *src, size_t count)
  451. {
  452. mtsp(get_kernel_space(), 1);
  453. mtsp(get_kernel_space(), 2);
  454. pa_memcpy(dst, src, count);
  455. return dst;
  456. }
  457. EXPORT_SYMBOL(copy_to_user);
  458. EXPORT_SYMBOL(copy_from_user);
  459. EXPORT_SYMBOL(copy_in_user);
  460. EXPORT_SYMBOL(memcpy);
  461. #endif