memcpy.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Unified implementation of memcpy, memmove and the __copy_user backend.
  7. *
  8. * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
  9. * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
  10. * Copyright (C) 2002 Broadcom, Inc.
  11. * memcpy/copy_user author: Mark Vandevoorde
  12. * Copyright (C) 2007 Maciej W. Rozycki
  13. *
  14. * Mnemonic names for arguments to memcpy/__copy_user
  15. */
  16. /*
  17. * Hack to resolve longstanding prefetch issue
  18. *
  19. * Prefetching may be fatal on some systems if we're prefetching beyond the
  20. * end of memory on some systems. It's also a seriously bad idea on non
  21. * dma-coherent systems.
  22. */
  23. #ifdef CONFIG_DMA_NONCOHERENT
  24. #undef CONFIG_CPU_HAS_PREFETCH
  25. #endif
  26. #ifdef CONFIG_MIPS_MALTA
  27. #undef CONFIG_CPU_HAS_PREFETCH
  28. #endif
  29. #include <asm/asm.h>
  30. #include <asm/asm-offsets.h>
  31. #include <asm/regdef.h>
  32. #define dst a0
  33. #define src a1
  34. #define len a2
  35. /*
  36. * Spec
  37. *
  38. * memcpy copies len bytes from src to dst and sets v0 to dst.
  39. * It assumes that
  40. * - src and dst don't overlap
  41. * - src is readable
  42. * - dst is writable
  43. * memcpy uses the standard calling convention
  44. *
  45. * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
  46. * the number of uncopied bytes due to an exception caused by a read or write.
  47. * __copy_user assumes that src and dst don't overlap, and that the call is
  48. * implementing one of the following:
  49. * copy_to_user
  50. * - src is readable (no exceptions when reading src)
  51. * copy_from_user
  52. * - dst is writable (no exceptions when writing dst)
  53. * __copy_user uses a non-standard calling convention; see
  54. * include/asm-mips/uaccess.h
  55. *
  56. * When an exception happens on a load, the handler must
  57. # ensure that all of the destination buffer is overwritten to prevent
  58. * leaking information to user mode programs.
  59. */
  60. /*
  61. * Implementation
  62. */
  63. /*
  64. * The exception handler for loads requires that:
  65. * 1- AT contain the address of the byte just past the end of the source
  66. * of the copy,
  67. * 2- src_entry <= src < AT, and
  68. * 3- (dst - src) == (dst_entry - src_entry),
  69. * The _entry suffix denotes values when __copy_user was called.
  70. *
  71. * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
  72. * (2) is met by incrementing src by the number of bytes copied
  73. * (3) is met by not doing loads between a pair of increments of dst and src
  74. *
  75. * The exception handlers for stores adjust len (if necessary) and return.
  76. * These handlers do not need to overwrite any data.
  77. *
  78. * For __rmemcpy and memmove an exception is always a kernel bug, therefore
  79. * they're not protected.
  80. */
  81. #define EXC(inst_reg,addr,handler) \
  82. 9: inst_reg, addr; \
  83. .section __ex_table,"a"; \
  84. PTR 9b, handler; \
  85. .previous
  86. /*
  87. * Only on the 64-bit kernel we can made use of 64-bit registers.
  88. */
  89. #ifdef CONFIG_64BIT
  90. #define USE_DOUBLE
  91. #endif
  92. #ifdef USE_DOUBLE
  93. #define LOAD ld
  94. #define LOADL ldl
  95. #define LOADR ldr
  96. #define STOREL sdl
  97. #define STORER sdr
  98. #define STORE sd
  99. #define ADD daddu
  100. #define SUB dsubu
  101. #define SRL dsrl
  102. #define SRA dsra
  103. #define SLL dsll
  104. #define SLLV dsllv
  105. #define SRLV dsrlv
  106. #define NBYTES 8
  107. #define LOG_NBYTES 3
  108. /*
  109. * As we are sharing code base with the mips32 tree (which use the o32 ABI
  110. * register definitions). We need to redefine the register definitions from
  111. * the n64 ABI register naming to the o32 ABI register naming.
  112. */
  113. #undef t0
  114. #undef t1
  115. #undef t2
  116. #undef t3
  117. #define t0 $8
  118. #define t1 $9
  119. #define t2 $10
  120. #define t3 $11
  121. #define t4 $12
  122. #define t5 $13
  123. #define t6 $14
  124. #define t7 $15
  125. #else
  126. #define LOAD lw
  127. #define LOADL lwl
  128. #define LOADR lwr
  129. #define STOREL swl
  130. #define STORER swr
  131. #define STORE sw
  132. #define ADD addu
  133. #define SUB subu
  134. #define SRL srl
  135. #define SLL sll
  136. #define SRA sra
  137. #define SLLV sllv
  138. #define SRLV srlv
  139. #define NBYTES 4
  140. #define LOG_NBYTES 2
  141. #endif /* USE_DOUBLE */
  142. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  143. #define LDFIRST LOADR
  144. #define LDREST LOADL
  145. #define STFIRST STORER
  146. #define STREST STOREL
  147. #define SHIFT_DISCARD SLLV
  148. #else
  149. #define LDFIRST LOADL
  150. #define LDREST LOADR
  151. #define STFIRST STOREL
  152. #define STREST STORER
  153. #define SHIFT_DISCARD SRLV
  154. #endif
  155. #define FIRST(unit) ((unit)*NBYTES)
  156. #define REST(unit) (FIRST(unit)+NBYTES-1)
  157. #define UNIT(unit) FIRST(unit)
  158. #define ADDRMASK (NBYTES-1)
  159. .text
  160. .set noreorder
  161. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  162. .set noat
  163. #else
  164. .set at=v1
  165. #endif
  166. /*
  167. * t6 is used as a flag to note inatomic mode.
  168. */
  169. LEAF(__copy_user_inatomic)
  170. b __copy_user_common
  171. li t6, 1
  172. END(__copy_user_inatomic)
  173. /*
  174. * A combined memcpy/__copy_user
  175. * __copy_user sets len to 0 for success; else to an upper bound of
  176. * the number of uncopied bytes.
  177. * memcpy sets v0 to dst.
  178. */
  179. .align 5
  180. LEAF(memcpy) /* a0=dst a1=src a2=len */
  181. move v0, dst /* return value */
  182. .L__memcpy:
  183. FEXPORT(__copy_user)
  184. li t6, 0 /* not inatomic */
  185. __copy_user_common:
  186. /*
  187. * Note: dst & src may be unaligned, len may be 0
  188. * Temps
  189. */
  190. #define rem t8
  191. R10KCBARRIER(0(ra))
  192. /*
  193. * The "issue break"s below are very approximate.
  194. * Issue delays for dcache fills will perturb the schedule, as will
  195. * load queue full replay traps, etc.
  196. *
  197. * If len < NBYTES use byte operations.
  198. */
  199. PREF( 0, 0(src) )
  200. PREF( 1, 0(dst) )
  201. sltu t2, len, NBYTES
  202. and t1, dst, ADDRMASK
  203. PREF( 0, 1*32(src) )
  204. PREF( 1, 1*32(dst) )
  205. bnez t2, .Lcopy_bytes_checklen
  206. and t0, src, ADDRMASK
  207. PREF( 0, 2*32(src) )
  208. PREF( 1, 2*32(dst) )
  209. bnez t1, .Ldst_unaligned
  210. nop
  211. bnez t0, .Lsrc_unaligned_dst_aligned
  212. /*
  213. * use delay slot for fall-through
  214. * src and dst are aligned; need to compute rem
  215. */
  216. .Lboth_aligned:
  217. SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
  218. beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
  219. and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
  220. PREF( 0, 3*32(src) )
  221. PREF( 1, 3*32(dst) )
  222. .align 4
  223. 1:
  224. R10KCBARRIER(0(ra))
  225. EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
  226. EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
  227. EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
  228. EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
  229. SUB len, len, 8*NBYTES
  230. EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
  231. EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy)
  232. EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u)
  233. EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u)
  234. EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy)
  235. EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy)
  236. ADD src, src, 8*NBYTES
  237. ADD dst, dst, 8*NBYTES
  238. EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u)
  239. EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u)
  240. EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u)
  241. EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u)
  242. EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u)
  243. EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u)
  244. PREF( 0, 8*32(src) )
  245. PREF( 1, 8*32(dst) )
  246. bne len, rem, 1b
  247. nop
  248. /*
  249. * len == rem == the number of bytes left to copy < 8*NBYTES
  250. */
  251. .Lcleanup_both_aligned:
  252. beqz len, .Ldone
  253. sltu t0, len, 4*NBYTES
  254. bnez t0, .Lless_than_4units
  255. and rem, len, (NBYTES-1) # rem = len % NBYTES
  256. /*
  257. * len >= 4*NBYTES
  258. */
  259. EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
  260. EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
  261. EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
  262. EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
  263. SUB len, len, 4*NBYTES
  264. ADD src, src, 4*NBYTES
  265. R10KCBARRIER(0(ra))
  266. EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
  267. EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
  268. EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
  269. EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
  270. .set reorder /* DADDI_WAR */
  271. ADD dst, dst, 4*NBYTES
  272. beqz len, .Ldone
  273. .set noreorder
  274. .Lless_than_4units:
  275. /*
  276. * rem = len % NBYTES
  277. */
  278. beq rem, len, .Lcopy_bytes
  279. nop
  280. 1:
  281. R10KCBARRIER(0(ra))
  282. EXC( LOAD t0, 0(src), .Ll_exc)
  283. ADD src, src, NBYTES
  284. SUB len, len, NBYTES
  285. EXC( STORE t0, 0(dst), .Ls_exc_p1u)
  286. .set reorder /* DADDI_WAR */
  287. ADD dst, dst, NBYTES
  288. bne rem, len, 1b
  289. .set noreorder
  290. /*
  291. * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
  292. * A loop would do only a byte at a time with possible branch
  293. * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
  294. * because can't assume read-access to dst. Instead, use
  295. * STREST dst, which doesn't require read access to dst.
  296. *
  297. * This code should perform better than a simple loop on modern,
  298. * wide-issue mips processors because the code has fewer branches and
  299. * more instruction-level parallelism.
  300. */
  301. #define bits t2
  302. beqz len, .Ldone
  303. ADD t1, dst, len # t1 is just past last byte of dst
  304. li bits, 8*NBYTES
  305. SLL rem, len, 3 # rem = number of bits to keep
  306. EXC( LOAD t0, 0(src), .Ll_exc)
  307. SUB bits, bits, rem # bits = number of bits to discard
  308. SHIFT_DISCARD t0, t0, bits
  309. EXC( STREST t0, -1(t1), .Ls_exc)
  310. jr ra
  311. move len, zero
  312. .Ldst_unaligned:
  313. /*
  314. * dst is unaligned
  315. * t0 = src & ADDRMASK
  316. * t1 = dst & ADDRMASK; T1 > 0
  317. * len >= NBYTES
  318. *
  319. * Copy enough bytes to align dst
  320. * Set match = (src and dst have same alignment)
  321. */
  322. #define match rem
  323. EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
  324. ADD t2, zero, NBYTES
  325. EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
  326. SUB t2, t2, t1 # t2 = number of bytes copied
  327. xor match, t0, t1
  328. R10KCBARRIER(0(ra))
  329. EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
  330. beq len, t2, .Ldone
  331. SUB len, len, t2
  332. ADD dst, dst, t2
  333. beqz match, .Lboth_aligned
  334. ADD src, src, t2
  335. .Lsrc_unaligned_dst_aligned:
  336. SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
  337. PREF( 0, 3*32(src) )
  338. beqz t0, .Lcleanup_src_unaligned
  339. and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
  340. PREF( 1, 3*32(dst) )
  341. 1:
  342. /*
  343. * Avoid consecutive LD*'s to the same register since some mips
  344. * implementations can't issue them in the same cycle.
  345. * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  346. * are to the same unit (unless src is aligned, but it's not).
  347. */
  348. R10KCBARRIER(0(ra))
  349. EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
  350. EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
  351. SUB len, len, 4*NBYTES
  352. EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
  353. EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
  354. EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
  355. EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
  356. EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
  357. EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
  358. PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
  359. ADD src, src, 4*NBYTES
  360. #ifdef CONFIG_CPU_SB1
  361. nop # improves slotting
  362. #endif
  363. EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
  364. EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
  365. EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
  366. EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
  367. PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
  368. .set reorder /* DADDI_WAR */
  369. ADD dst, dst, 4*NBYTES
  370. bne len, rem, 1b
  371. .set noreorder
  372. .Lcleanup_src_unaligned:
  373. beqz len, .Ldone
  374. and rem, len, NBYTES-1 # rem = len % NBYTES
  375. beq rem, len, .Lcopy_bytes
  376. nop
  377. 1:
  378. R10KCBARRIER(0(ra))
  379. EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
  380. EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
  381. ADD src, src, NBYTES
  382. SUB len, len, NBYTES
  383. EXC( STORE t0, 0(dst), .Ls_exc_p1u)
  384. .set reorder /* DADDI_WAR */
  385. ADD dst, dst, NBYTES
  386. bne len, rem, 1b
  387. .set noreorder
  388. .Lcopy_bytes_checklen:
  389. beqz len, .Ldone
  390. nop
  391. .Lcopy_bytes:
  392. /* 0 < len < NBYTES */
  393. R10KCBARRIER(0(ra))
  394. #define COPY_BYTE(N) \
  395. EXC( lb t0, N(src), .Ll_exc); \
  396. SUB len, len, 1; \
  397. beqz len, .Ldone; \
  398. EXC( sb t0, N(dst), .Ls_exc_p1)
  399. COPY_BYTE(0)
  400. COPY_BYTE(1)
  401. #ifdef USE_DOUBLE
  402. COPY_BYTE(2)
  403. COPY_BYTE(3)
  404. COPY_BYTE(4)
  405. COPY_BYTE(5)
  406. #endif
  407. EXC( lb t0, NBYTES-2(src), .Ll_exc)
  408. SUB len, len, 1
  409. jr ra
  410. EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1)
  411. .Ldone:
  412. jr ra
  413. nop
  414. END(memcpy)
  415. .Ll_exc_copy:
  416. /*
  417. * Copy bytes from src until faulting load address (or until a
  418. * lb faults)
  419. *
  420. * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
  421. * may be more than a byte beyond the last address.
  422. * Hence, the lb below may get an exception.
  423. *
  424. * Assumes src < THREAD_BUADDR($28)
  425. */
  426. LOAD t0, TI_TASK($28)
  427. nop
  428. LOAD t0, THREAD_BUADDR(t0)
  429. 1:
  430. EXC( lb t1, 0(src), .Ll_exc)
  431. ADD src, src, 1
  432. sb t1, 0(dst) # can't fault -- we're copy_from_user
  433. .set reorder /* DADDI_WAR */
  434. ADD dst, dst, 1
  435. bne src, t0, 1b
  436. .set noreorder
  437. .Ll_exc:
  438. LOAD t0, TI_TASK($28)
  439. nop
  440. LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
  441. nop
  442. SUB len, AT, t0 # len number of uncopied bytes
  443. bnez t6, .Ldone /* Skip the zeroing part if inatomic */
  444. /*
  445. * Here's where we rely on src and dst being incremented in tandem,
  446. * See (3) above.
  447. * dst += (fault addr - src) to put dst at first byte to clear
  448. */
  449. ADD dst, t0 # compute start address in a1
  450. SUB dst, src
  451. /*
  452. * Clear len bytes starting at dst. Can't call __bzero because it
  453. * might modify len. An inefficient loop for these rare times...
  454. */
  455. .set reorder /* DADDI_WAR */
  456. SUB src, len, 1
  457. beqz len, .Ldone
  458. .set noreorder
  459. 1: sb zero, 0(dst)
  460. ADD dst, dst, 1
  461. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  462. bnez src, 1b
  463. SUB src, src, 1
  464. #else
  465. .set push
  466. .set noat
  467. li v1, 1
  468. bnez src, 1b
  469. SUB src, src, v1
  470. .set pop
  471. #endif
  472. jr ra
  473. nop
  474. #define SEXC(n) \
  475. .set reorder; /* DADDI_WAR */ \
  476. .Ls_exc_p ## n ## u: \
  477. ADD len, len, n*NBYTES; \
  478. jr ra; \
  479. .set noreorder
  480. SEXC(8)
  481. SEXC(7)
  482. SEXC(6)
  483. SEXC(5)
  484. SEXC(4)
  485. SEXC(3)
  486. SEXC(2)
  487. SEXC(1)
  488. .Ls_exc_p1:
  489. .set reorder /* DADDI_WAR */
  490. ADD len, len, 1
  491. jr ra
  492. .set noreorder
  493. .Ls_exc:
  494. jr ra
  495. nop
  496. .align 5
  497. LEAF(memmove)
  498. ADD t0, a0, a2
  499. ADD t1, a1, a2
  500. sltu t0, a1, t0 # dst + len <= src -> memcpy
  501. sltu t1, a0, t1 # dst >= src + len -> memcpy
  502. and t0, t1
  503. beqz t0, .L__memcpy
  504. move v0, a0 /* return value */
  505. beqz a2, .Lr_out
  506. END(memmove)
  507. /* fall through to __rmemcpy */
  508. LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
  509. sltu t0, a1, a0
  510. beqz t0, .Lr_end_bytes_up # src >= dst
  511. nop
  512. ADD a0, a2 # dst = dst + len
  513. ADD a1, a2 # src = src + len
  514. .Lr_end_bytes:
  515. R10KCBARRIER(0(ra))
  516. lb t0, -1(a1)
  517. SUB a2, a2, 0x1
  518. sb t0, -1(a0)
  519. SUB a1, a1, 0x1
  520. .set reorder /* DADDI_WAR */
  521. SUB a0, a0, 0x1
  522. bnez a2, .Lr_end_bytes
  523. .set noreorder
  524. .Lr_out:
  525. jr ra
  526. move a2, zero
  527. .Lr_end_bytes_up:
  528. R10KCBARRIER(0(ra))
  529. lb t0, (a1)
  530. SUB a2, a2, 0x1
  531. sb t0, (a0)
  532. ADD a1, a1, 0x1
  533. .set reorder /* DADDI_WAR */
  534. ADD a0, a0, 0x1
  535. bnez a2, .Lr_end_bytes_up
  536. .set noreorder
  537. jr ra
  538. move a2, zero
  539. END(__rmemcpy)