cache-v6.S 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include <asm/unwind.h>
  16. #include "proc-macros.S"
  17. #define HARVARD_CACHE
  18. #define CACHE_LINE_SIZE 32
  19. #define D_CACHE_LINE_SIZE 32
  20. #define BTB_FLUSH_SIZE 8
  21. /*
  22. * v6_flush_icache_all()
  23. *
  24. * Flush the whole I-cache.
  25. *
  26. * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
  27. * This erratum is present in 1136, 1156 and 1176. It does not affect the
  28. * MPCore.
  29. *
  30. * Registers:
  31. * r0 - set to 0
  32. * r1 - corrupted
  33. */
  34. ENTRY(v6_flush_icache_all)
  35. mov r0, #0
  36. #ifdef CONFIG_ARM_ERRATA_411920
  37. mrs r1, cpsr
  38. cpsid ifa @ disable interrupts
  39. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  40. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  41. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  42. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  43. msr cpsr_cx, r1 @ restore interrupts
  44. .rept 11 @ ARM Ltd recommends at least
  45. nop @ 11 NOPs
  46. .endr
  47. #else
  48. mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
  49. #endif
  50. mov pc, lr
  51. ENDPROC(v6_flush_icache_all)
  52. /*
  53. * v6_flush_cache_all()
  54. *
  55. * Flush the entire cache.
  56. *
  57. * It is assumed that:
  58. */
  59. ENTRY(v6_flush_kern_cache_all)
  60. mov r0, #0
  61. #ifdef HARVARD_CACHE
  62. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  63. #ifndef CONFIG_ARM_ERRATA_411920
  64. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  65. #else
  66. b v6_flush_icache_all
  67. #endif
  68. #else
  69. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  70. #endif
  71. mov pc, lr
  72. /*
  73. * v6_flush_cache_all()
  74. *
  75. * Flush all TLB entries in a particular address space
  76. *
  77. * - mm - mm_struct describing address space
  78. */
  79. ENTRY(v6_flush_user_cache_all)
  80. /*FALLTHROUGH*/
  81. /*
  82. * v6_flush_cache_range(start, end, flags)
  83. *
  84. * Flush a range of TLB entries in the specified address space.
  85. *
  86. * - start - start address (may not be aligned)
  87. * - end - end address (exclusive, may not be aligned)
  88. * - flags - vm_area_struct flags describing address space
  89. *
  90. * It is assumed that:
  91. * - we have a VIPT cache.
  92. */
  93. ENTRY(v6_flush_user_cache_range)
  94. mov pc, lr
  95. /*
  96. * v6_coherent_kern_range(start,end)
  97. *
  98. * Ensure that the I and D caches are coherent within specified
  99. * region. This is typically used when code has been written to
  100. * a memory region, and will be executed.
  101. *
  102. * - start - virtual start address of region
  103. * - end - virtual end address of region
  104. *
  105. * It is assumed that:
  106. * - the Icache does not read data from the write buffer
  107. */
  108. ENTRY(v6_coherent_kern_range)
  109. /* FALLTHROUGH */
  110. /*
  111. * v6_coherent_user_range(start,end)
  112. *
  113. * Ensure that the I and D caches are coherent within specified
  114. * region. This is typically used when code has been written to
  115. * a memory region, and will be executed.
  116. *
  117. * - start - virtual start address of region
  118. * - end - virtual end address of region
  119. *
  120. * It is assumed that:
  121. * - the Icache does not read data from the write buffer
  122. */
  123. ENTRY(v6_coherent_user_range)
  124. UNWIND(.fnstart )
  125. #ifdef HARVARD_CACHE
  126. bic r0, r0, #CACHE_LINE_SIZE - 1
  127. 1:
  128. USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
  129. add r0, r0, #CACHE_LINE_SIZE
  130. 2:
  131. cmp r0, r1
  132. blo 1b
  133. #endif
  134. mov r0, #0
  135. #ifdef HARVARD_CACHE
  136. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  137. #ifndef CONFIG_ARM_ERRATA_411920
  138. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  139. #else
  140. b v6_flush_icache_all
  141. #endif
  142. #else
  143. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  144. #endif
  145. mov pc, lr
  146. /*
  147. * Fault handling for the cache operation above. If the virtual address in r0
  148. * isn't mapped, just try the next page.
  149. */
  150. 9001:
  151. mov r0, r0, lsr #12
  152. mov r0, r0, lsl #12
  153. add r0, r0, #4096
  154. b 2b
  155. UNWIND(.fnend )
  156. ENDPROC(v6_coherent_user_range)
  157. ENDPROC(v6_coherent_kern_range)
  158. /*
  159. * v6_flush_kern_dcache_area(void *addr, size_t size)
  160. *
  161. * Ensure that the data held in the page kaddr is written back
  162. * to the page in question.
  163. *
  164. * - addr - kernel address
  165. * - size - region size
  166. */
  167. ENTRY(v6_flush_kern_dcache_area)
  168. add r1, r0, r1
  169. 1:
  170. #ifdef HARVARD_CACHE
  171. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  172. #else
  173. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  174. #endif
  175. add r0, r0, #D_CACHE_LINE_SIZE
  176. cmp r0, r1
  177. blo 1b
  178. #ifdef HARVARD_CACHE
  179. mov r0, #0
  180. mcr p15, 0, r0, c7, c10, 4
  181. #endif
  182. mov pc, lr
  183. /*
  184. * v6_dma_inv_range(start,end)
  185. *
  186. * Invalidate the data cache within the specified region; we will
  187. * be performing a DMA operation in this region and we want to
  188. * purge old data in the cache.
  189. *
  190. * - start - virtual start address of region
  191. * - end - virtual end address of region
  192. */
  193. v6_dma_inv_range:
  194. tst r0, #D_CACHE_LINE_SIZE - 1
  195. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  196. #ifdef HARVARD_CACHE
  197. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  198. #else
  199. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  200. #endif
  201. tst r1, #D_CACHE_LINE_SIZE - 1
  202. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  203. #ifdef HARVARD_CACHE
  204. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  205. #else
  206. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  207. #endif
  208. 1:
  209. #ifdef CONFIG_DMA_CACHE_RWFO
  210. ldr r2, [r0] @ read for ownership
  211. str r2, [r0] @ write for ownership
  212. #endif
  213. #ifdef HARVARD_CACHE
  214. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  215. #else
  216. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  217. #endif
  218. add r0, r0, #D_CACHE_LINE_SIZE
  219. cmp r0, r1
  220. blo 1b
  221. mov r0, #0
  222. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  223. mov pc, lr
  224. /*
  225. * v6_dma_clean_range(start,end)
  226. * - start - virtual start address of region
  227. * - end - virtual end address of region
  228. */
  229. v6_dma_clean_range:
  230. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  231. 1:
  232. #ifdef CONFIG_DMA_CACHE_RWFO
  233. ldr r2, [r0] @ read for ownership
  234. #endif
  235. #ifdef HARVARD_CACHE
  236. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  237. #else
  238. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  239. #endif
  240. add r0, r0, #D_CACHE_LINE_SIZE
  241. cmp r0, r1
  242. blo 1b
  243. mov r0, #0
  244. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  245. mov pc, lr
  246. /*
  247. * v6_dma_flush_range(start,end)
  248. * - start - virtual start address of region
  249. * - end - virtual end address of region
  250. */
  251. ENTRY(v6_dma_flush_range)
  252. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  253. 1:
  254. #ifdef CONFIG_DMA_CACHE_RWFO
  255. ldr r2, [r0] @ read for ownership
  256. str r2, [r0] @ write for ownership
  257. #endif
  258. #ifdef HARVARD_CACHE
  259. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  260. #else
  261. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  262. #endif
  263. add r0, r0, #D_CACHE_LINE_SIZE
  264. cmp r0, r1
  265. blo 1b
  266. mov r0, #0
  267. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  268. mov pc, lr
  269. /*
  270. * dma_map_area(start, size, dir)
  271. * - start - kernel virtual start address
  272. * - size - size of region
  273. * - dir - DMA direction
  274. */
  275. ENTRY(v6_dma_map_area)
  276. add r1, r1, r0
  277. teq r2, #DMA_FROM_DEVICE
  278. beq v6_dma_inv_range
  279. #ifndef CONFIG_DMA_CACHE_RWFO
  280. b v6_dma_clean_range
  281. #else
  282. teq r2, #DMA_TO_DEVICE
  283. beq v6_dma_clean_range
  284. b v6_dma_flush_range
  285. #endif
  286. ENDPROC(v6_dma_map_area)
  287. /*
  288. * dma_unmap_area(start, size, dir)
  289. * - start - kernel virtual start address
  290. * - size - size of region
  291. * - dir - DMA direction
  292. */
  293. ENTRY(v6_dma_unmap_area)
  294. #ifndef CONFIG_DMA_CACHE_RWFO
  295. add r1, r1, r0
  296. teq r2, #DMA_TO_DEVICE
  297. bne v6_dma_inv_range
  298. #endif
  299. mov pc, lr
  300. ENDPROC(v6_dma_unmap_area)
  301. __INITDATA
  302. .type v6_cache_fns, #object
  303. ENTRY(v6_cache_fns)
  304. .long v6_flush_icache_all
  305. .long v6_flush_kern_cache_all
  306. .long v6_flush_user_cache_all
  307. .long v6_flush_user_cache_range
  308. .long v6_coherent_kern_range
  309. .long v6_coherent_user_range
  310. .long v6_flush_kern_dcache_area
  311. .long v6_dma_map_area
  312. .long v6_dma_unmap_area
  313. .long v6_dma_flush_range
  314. .size v6_cache_fns, . - v6_cache_fns