cache-v6.S 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include <asm/unwind.h>
  16. #include "proc-macros.S"
  17. #define HARVARD_CACHE
  18. #define CACHE_LINE_SIZE 32
  19. #define D_CACHE_LINE_SIZE 32
  20. #define BTB_FLUSH_SIZE 8
  21. /*
  22. * v6_flush_icache_all()
  23. *
  24. * Flush the whole I-cache.
  25. *
  26. * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
  27. * This erratum is present in 1136, 1156 and 1176. It does not affect the
  28. * MPCore.
  29. *
  30. * Registers:
  31. * r0 - set to 0
  32. * r1 - corrupted
  33. */
  34. ENTRY(v6_flush_icache_all)
  35. mov r0, #0
  36. #ifdef CONFIG_ARM_ERRATA_411920
  37. mrs r1, cpsr
  38. cpsid ifa @ disable interrupts
  39. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  40. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  41. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  42. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  43. msr cpsr_cx, r1 @ restore interrupts
  44. .rept 11 @ ARM Ltd recommends at least
  45. nop @ 11 NOPs
  46. .endr
  47. #else
  48. mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
  49. #endif
  50. mov pc, lr
  51. ENDPROC(v6_flush_icache_all)
  52. /*
  53. * v6_flush_cache_all()
  54. *
  55. * Flush the entire cache.
  56. *
  57. * It is assumed that:
  58. */
  59. ENTRY(v6_flush_kern_cache_all)
  60. mov r0, #0
  61. #ifdef HARVARD_CACHE
  62. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  63. #ifndef CONFIG_ARM_ERRATA_411920
  64. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  65. #else
  66. b v6_flush_icache_all
  67. #endif
  68. #else
  69. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  70. #endif
  71. mov pc, lr
  72. /*
  73. * v6_flush_cache_all()
  74. *
  75. * Flush all TLB entries in a particular address space
  76. *
  77. * - mm - mm_struct describing address space
  78. */
  79. ENTRY(v6_flush_user_cache_all)
  80. /*FALLTHROUGH*/
  81. /*
  82. * v6_flush_cache_range(start, end, flags)
  83. *
  84. * Flush a range of TLB entries in the specified address space.
  85. *
  86. * - start - start address (may not be aligned)
  87. * - end - end address (exclusive, may not be aligned)
  88. * - flags - vm_area_struct flags describing address space
  89. *
  90. * It is assumed that:
  91. * - we have a VIPT cache.
  92. */
  93. ENTRY(v6_flush_user_cache_range)
  94. mov pc, lr
  95. /*
  96. * v6_coherent_kern_range(start,end)
  97. *
  98. * Ensure that the I and D caches are coherent within specified
  99. * region. This is typically used when code has been written to
  100. * a memory region, and will be executed.
  101. *
  102. * - start - virtual start address of region
  103. * - end - virtual end address of region
  104. *
  105. * It is assumed that:
  106. * - the Icache does not read data from the write buffer
  107. */
  108. ENTRY(v6_coherent_kern_range)
  109. /* FALLTHROUGH */
  110. /*
  111. * v6_coherent_user_range(start,end)
  112. *
  113. * Ensure that the I and D caches are coherent within specified
  114. * region. This is typically used when code has been written to
  115. * a memory region, and will be executed.
  116. *
  117. * - start - virtual start address of region
  118. * - end - virtual end address of region
  119. *
  120. * It is assumed that:
  121. * - the Icache does not read data from the write buffer
  122. */
  123. ENTRY(v6_coherent_user_range)
  124. UNWIND(.fnstart )
  125. #ifdef HARVARD_CACHE
  126. bic r0, r0, #CACHE_LINE_SIZE - 1
  127. 1:
  128. USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
  129. add r0, r0, #CACHE_LINE_SIZE
  130. 2:
  131. cmp r0, r1
  132. blo 1b
  133. #endif
  134. mov r0, #0
  135. #ifdef HARVARD_CACHE
  136. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  137. #ifndef CONFIG_ARM_ERRATA_411920
  138. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  139. #else
  140. b v6_flush_icache_all
  141. #endif
  142. #else
  143. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  144. #endif
  145. mov pc, lr
  146. /*
  147. * Fault handling for the cache operation above. If the virtual address in r0
  148. * isn't mapped, just try the next page.
  149. */
  150. 9001:
  151. mov r0, r0, lsr #12
  152. mov r0, r0, lsl #12
  153. add r0, r0, #4096
  154. b 2b
  155. UNWIND(.fnend )
  156. ENDPROC(v6_coherent_user_range)
  157. ENDPROC(v6_coherent_kern_range)
  158. /*
  159. * v6_flush_kern_dcache_area(void *addr, size_t size)
  160. *
  161. * Ensure that the data held in the page kaddr is written back
  162. * to the page in question.
  163. *
  164. * - addr - kernel address
  165. * - size - region size
  166. */
  167. ENTRY(v6_flush_kern_dcache_area)
  168. add r1, r0, r1
  169. 1:
  170. #ifdef HARVARD_CACHE
  171. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  172. #else
  173. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  174. #endif
  175. add r0, r0, #D_CACHE_LINE_SIZE
  176. cmp r0, r1
  177. blo 1b
  178. #ifdef HARVARD_CACHE
  179. mov r0, #0
  180. mcr p15, 0, r0, c7, c10, 4
  181. #endif
  182. mov pc, lr
  183. /*
  184. * v6_dma_inv_range(start,end)
  185. *
  186. * Invalidate the data cache within the specified region; we will
  187. * be performing a DMA operation in this region and we want to
  188. * purge old data in the cache.
  189. *
  190. * - start - virtual start address of region
  191. * - end - virtual end address of region
  192. */
  193. v6_dma_inv_range:
  194. #ifdef CONFIG_DMA_CACHE_RWFO
  195. ldrb r2, [r0] @ read for ownership
  196. strb r2, [r0] @ write for ownership
  197. #endif
  198. tst r0, #D_CACHE_LINE_SIZE - 1
  199. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  200. #ifdef HARVARD_CACHE
  201. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  202. #else
  203. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  204. #endif
  205. tst r1, #D_CACHE_LINE_SIZE - 1
  206. #ifdef CONFIG_DMA_CACHE_RWFO
  207. ldrneb r2, [r1, #-1] @ read for ownership
  208. strneb r2, [r1, #-1] @ write for ownership
  209. #endif
  210. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  211. #ifdef HARVARD_CACHE
  212. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  213. #else
  214. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  215. #endif
  216. 1:
  217. #ifdef HARVARD_CACHE
  218. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  219. #else
  220. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  221. #endif
  222. add r0, r0, #D_CACHE_LINE_SIZE
  223. cmp r0, r1
  224. #ifdef CONFIG_DMA_CACHE_RWFO
  225. ldrlo r2, [r0] @ read for ownership
  226. strlo r2, [r0] @ write for ownership
  227. #endif
  228. blo 1b
  229. mov r0, #0
  230. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  231. mov pc, lr
  232. /*
  233. * v6_dma_clean_range(start,end)
  234. * - start - virtual start address of region
  235. * - end - virtual end address of region
  236. */
  237. v6_dma_clean_range:
  238. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  239. 1:
  240. #ifdef CONFIG_DMA_CACHE_RWFO
  241. ldr r2, [r0] @ read for ownership
  242. #endif
  243. #ifdef HARVARD_CACHE
  244. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  245. #else
  246. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  247. #endif
  248. add r0, r0, #D_CACHE_LINE_SIZE
  249. cmp r0, r1
  250. blo 1b
  251. mov r0, #0
  252. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  253. mov pc, lr
  254. /*
  255. * v6_dma_flush_range(start,end)
  256. * - start - virtual start address of region
  257. * - end - virtual end address of region
  258. */
  259. ENTRY(v6_dma_flush_range)
  260. #ifdef CONFIG_DMA_CACHE_RWFO
  261. ldrb r2, [r0] @ read for ownership
  262. strb r2, [r0] @ write for ownership
  263. #endif
  264. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  265. 1:
  266. #ifdef HARVARD_CACHE
  267. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  268. #else
  269. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  270. #endif
  271. add r0, r0, #D_CACHE_LINE_SIZE
  272. cmp r0, r1
  273. #ifdef CONFIG_DMA_CACHE_RWFO
  274. ldrlob r2, [r0] @ read for ownership
  275. strlob r2, [r0] @ write for ownership
  276. #endif
  277. blo 1b
  278. mov r0, #0
  279. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  280. mov pc, lr
  281. /*
  282. * dma_map_area(start, size, dir)
  283. * - start - kernel virtual start address
  284. * - size - size of region
  285. * - dir - DMA direction
  286. */
  287. ENTRY(v6_dma_map_area)
  288. add r1, r1, r0
  289. teq r2, #DMA_FROM_DEVICE
  290. beq v6_dma_inv_range
  291. #ifndef CONFIG_DMA_CACHE_RWFO
  292. b v6_dma_clean_range
  293. #else
  294. teq r2, #DMA_TO_DEVICE
  295. beq v6_dma_clean_range
  296. b v6_dma_flush_range
  297. #endif
  298. ENDPROC(v6_dma_map_area)
  299. /*
  300. * dma_unmap_area(start, size, dir)
  301. * - start - kernel virtual start address
  302. * - size - size of region
  303. * - dir - DMA direction
  304. */
  305. ENTRY(v6_dma_unmap_area)
  306. #ifndef CONFIG_DMA_CACHE_RWFO
  307. add r1, r1, r0
  308. teq r2, #DMA_TO_DEVICE
  309. bne v6_dma_inv_range
  310. #endif
  311. mov pc, lr
  312. ENDPROC(v6_dma_unmap_area)
  313. __INITDATA
  314. .type v6_cache_fns, #object
  315. ENTRY(v6_cache_fns)
  316. .long v6_flush_icache_all
  317. .long v6_flush_kern_cache_all
  318. .long v6_flush_user_cache_all
  319. .long v6_flush_user_cache_range
  320. .long v6_coherent_kern_range
  321. .long v6_coherent_user_range
  322. .long v6_flush_kern_dcache_area
  323. .long v6_dma_map_area
  324. .long v6_dma_unmap_area
  325. .long v6_dma_flush_range
  326. .size v6_cache_fns, . - v6_cache_fns