cache-v7.S 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * linux/arch/arm/mm/cache-v7.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. * Copyright (C) 2005 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This is the "shell" of the ARMv7 processor support.
  12. */
  13. #include <linux/linkage.h>
  14. #include <linux/init.h>
  15. #include <asm/assembler.h>
  16. #include <asm/unwind.h>
  17. #include "proc-macros.S"
  18. /*
  19. * v7_flush_icache_all()
  20. *
  21. * Flush the whole I-cache.
  22. *
  23. * Registers:
  24. * r0 - set to 0
  25. */
  26. ENTRY(v7_flush_icache_all)
  27. mov r0, #0
  28. ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
  29. ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
  30. mov pc, lr
  31. ENDPROC(v7_flush_icache_all)
  32. /*
  33. * v7_flush_dcache_all()
  34. *
  35. * Flush the whole D-cache.
  36. *
  37. * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
  38. *
  39. * - mm - mm_struct describing address space
  40. */
  41. ENTRY(v7_flush_dcache_all)
  42. dmb @ ensure ordering with previous memory accesses
  43. mrc p15, 1, r0, c0, c0, 1 @ read clidr
  44. ands r3, r0, #0x7000000 @ extract loc from clidr
  45. mov r3, r3, lsr #23 @ left align loc bit field
  46. beq finished @ if loc is 0, then no need to clean
  47. mov r10, #0 @ start clean at cache level 0
  48. loop1:
  49. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  50. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  51. and r1, r1, #7 @ mask of the bits for current cache only
  52. cmp r1, #2 @ see what cache we have at this level
  53. blt skip @ skip if no cache, or just i-cache
  54. #ifdef CONFIG_PREEMPT
  55. save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
  56. #endif
  57. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  58. isb @ isb to sych the new cssr&csidr
  59. mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
  60. #ifdef CONFIG_PREEMPT
  61. restore_irqs_notrace r9
  62. #endif
  63. and r2, r1, #7 @ extract the length of the cache lines
  64. add r2, r2, #4 @ add 4 (line length offset)
  65. ldr r4, =0x3ff
  66. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  67. clz r5, r4 @ find bit position of way size increment
  68. ldr r7, =0x7fff
  69. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  70. loop2:
  71. mov r9, r4 @ create working copy of max way size
  72. loop3:
  73. ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
  74. THUMB( lsl r6, r9, r5 )
  75. THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
  76. ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
  77. THUMB( lsl r6, r7, r2 )
  78. THUMB( orr r11, r11, r6 ) @ factor index number into r11
  79. mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
  80. subs r9, r9, #1 @ decrement the way
  81. bge loop3
  82. subs r7, r7, #1 @ decrement the index
  83. bge loop2
  84. skip:
  85. add r10, r10, #2 @ increment cache number
  86. cmp r3, r10
  87. bgt loop1
  88. finished:
  89. mov r10, #0 @ swith back to cache level 0
  90. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  91. dsb
  92. isb
  93. mov pc, lr
  94. ENDPROC(v7_flush_dcache_all)
  95. /*
  96. * v7_flush_cache_all()
  97. *
  98. * Flush the entire cache system.
  99. * The data cache flush is now achieved using atomic clean / invalidates
  100. * working outwards from L1 cache. This is done using Set/Way based cache
  101. * maintenance instructions.
  102. * The instruction cache can still be invalidated back to the point of
  103. * unification in a single instruction.
  104. *
  105. */
  106. ENTRY(v7_flush_kern_cache_all)
  107. ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
  108. THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
  109. bl v7_flush_dcache_all
  110. mov r0, #0
  111. ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
  112. ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
  113. ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
  114. THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
  115. mov pc, lr
  116. ENDPROC(v7_flush_kern_cache_all)
  117. /*
  118. * v7_flush_cache_all()
  119. *
  120. * Flush all TLB entries in a particular address space
  121. *
  122. * - mm - mm_struct describing address space
  123. */
  124. ENTRY(v7_flush_user_cache_all)
  125. /*FALLTHROUGH*/
  126. /*
  127. * v7_flush_cache_range(start, end, flags)
  128. *
  129. * Flush a range of TLB entries in the specified address space.
  130. *
  131. * - start - start address (may not be aligned)
  132. * - end - end address (exclusive, may not be aligned)
  133. * - flags - vm_area_struct flags describing address space
  134. *
  135. * It is assumed that:
  136. * - we have a VIPT cache.
  137. */
  138. ENTRY(v7_flush_user_cache_range)
  139. mov pc, lr
  140. ENDPROC(v7_flush_user_cache_all)
  141. ENDPROC(v7_flush_user_cache_range)
  142. /*
  143. * v7_coherent_kern_range(start,end)
  144. *
  145. * Ensure that the I and D caches are coherent within specified
  146. * region. This is typically used when code has been written to
  147. * a memory region, and will be executed.
  148. *
  149. * - start - virtual start address of region
  150. * - end - virtual end address of region
  151. *
  152. * It is assumed that:
  153. * - the Icache does not read data from the write buffer
  154. */
  155. ENTRY(v7_coherent_kern_range)
  156. /* FALLTHROUGH */
  157. /*
  158. * v7_coherent_user_range(start,end)
  159. *
  160. * Ensure that the I and D caches are coherent within specified
  161. * region. This is typically used when code has been written to
  162. * a memory region, and will be executed.
  163. *
  164. * - start - virtual start address of region
  165. * - end - virtual end address of region
  166. *
  167. * It is assumed that:
  168. * - the Icache does not read data from the write buffer
  169. */
  170. ENTRY(v7_coherent_user_range)
  171. UNWIND(.fnstart )
  172. dcache_line_size r2, r3
  173. sub r3, r2, #1
  174. bic r12, r0, r3
  175. #ifdef CONFIG_ARM_ERRATA_764369
  176. ALT_SMP(W(dsb))
  177. ALT_UP(W(nop))
  178. #endif
  179. 1:
  180. USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
  181. add r12, r12, r2
  182. cmp r12, r1
  183. blo 1b
  184. dsb
  185. icache_line_size r2, r3
  186. sub r3, r2, #1
  187. bic r12, r0, r3
  188. 2:
  189. USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
  190. add r12, r12, r2
  191. cmp r12, r1
  192. blo 2b
  193. 3:
  194. mov r0, #0
  195. ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
  196. ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
  197. dsb
  198. isb
  199. mov pc, lr
  200. /*
  201. * Fault handling for the cache operation above. If the virtual address in r0
  202. * isn't mapped, just try the next page.
  203. */
  204. 9001:
  205. mov r12, r12, lsr #12
  206. mov r12, r12, lsl #12
  207. add r12, r12, #4096
  208. b 3b
  209. UNWIND(.fnend )
  210. ENDPROC(v7_coherent_kern_range)
  211. ENDPROC(v7_coherent_user_range)
  212. /*
  213. * v7_flush_kern_dcache_area(void *addr, size_t size)
  214. *
  215. * Ensure that the data held in the page kaddr is written back
  216. * to the page in question.
  217. *
  218. * - addr - kernel address
  219. * - size - region size
  220. */
  221. ENTRY(v7_flush_kern_dcache_area)
  222. dcache_line_size r2, r3
  223. add r1, r0, r1
  224. sub r3, r2, #1
  225. bic r0, r0, r3
  226. #ifdef CONFIG_ARM_ERRATA_764369
  227. ALT_SMP(W(dsb))
  228. ALT_UP(W(nop))
  229. #endif
  230. 1:
  231. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
  232. add r0, r0, r2
  233. cmp r0, r1
  234. blo 1b
  235. dsb
  236. mov pc, lr
  237. ENDPROC(v7_flush_kern_dcache_area)
  238. /*
  239. * v7_dma_inv_range(start,end)
  240. *
  241. * Invalidate the data cache within the specified region; we will
  242. * be performing a DMA operation in this region and we want to
  243. * purge old data in the cache.
  244. *
  245. * - start - virtual start address of region
  246. * - end - virtual end address of region
  247. */
  248. v7_dma_inv_range:
  249. dcache_line_size r2, r3
  250. sub r3, r2, #1
  251. tst r0, r3
  252. bic r0, r0, r3
  253. #ifdef CONFIG_ARM_ERRATA_764369
  254. ALT_SMP(W(dsb))
  255. ALT_UP(W(nop))
  256. #endif
  257. mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
  258. tst r1, r3
  259. bic r1, r1, r3
  260. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
  261. 1:
  262. mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
  263. add r0, r0, r2
  264. cmp r0, r1
  265. blo 1b
  266. dsb
  267. mov pc, lr
  268. ENDPROC(v7_dma_inv_range)
  269. /*
  270. * v7_dma_clean_range(start,end)
  271. * - start - virtual start address of region
  272. * - end - virtual end address of region
  273. */
  274. v7_dma_clean_range:
  275. dcache_line_size r2, r3
  276. sub r3, r2, #1
  277. bic r0, r0, r3
  278. #ifdef CONFIG_ARM_ERRATA_764369
  279. ALT_SMP(W(dsb))
  280. ALT_UP(W(nop))
  281. #endif
  282. 1:
  283. mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
  284. add r0, r0, r2
  285. cmp r0, r1
  286. blo 1b
  287. dsb
  288. mov pc, lr
  289. ENDPROC(v7_dma_clean_range)
  290. /*
  291. * v7_dma_flush_range(start,end)
  292. * - start - virtual start address of region
  293. * - end - virtual end address of region
  294. */
  295. ENTRY(v7_dma_flush_range)
  296. dcache_line_size r2, r3
  297. sub r3, r2, #1
  298. bic r0, r0, r3
  299. #ifdef CONFIG_ARM_ERRATA_764369
  300. ALT_SMP(W(dsb))
  301. ALT_UP(W(nop))
  302. #endif
  303. 1:
  304. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
  305. add r0, r0, r2
  306. cmp r0, r1
  307. blo 1b
  308. dsb
  309. mov pc, lr
  310. ENDPROC(v7_dma_flush_range)
  311. /*
  312. * dma_map_area(start, size, dir)
  313. * - start - kernel virtual start address
  314. * - size - size of region
  315. * - dir - DMA direction
  316. */
  317. ENTRY(v7_dma_map_area)
  318. add r1, r1, r0
  319. teq r2, #DMA_FROM_DEVICE
  320. beq v7_dma_inv_range
  321. b v7_dma_clean_range
  322. ENDPROC(v7_dma_map_area)
  323. /*
  324. * dma_unmap_area(start, size, dir)
  325. * - start - kernel virtual start address
  326. * - size - size of region
  327. * - dir - DMA direction
  328. */
  329. ENTRY(v7_dma_unmap_area)
  330. add r1, r1, r0
  331. teq r2, #DMA_TO_DEVICE
  332. bne v7_dma_inv_range
  333. mov pc, lr
  334. ENDPROC(v7_dma_unmap_area)
  335. __INITDATA
  336. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  337. define_cache_functions v7