cache-v6.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include "proc-macros.S"
  16. #define HARVARD_CACHE
  17. #define CACHE_LINE_SIZE 32
  18. #define D_CACHE_LINE_SIZE 32
  19. #define BTB_FLUSH_SIZE 8
  20. #ifdef CONFIG_ARM_ERRATA_411920
  21. /*
  22. * Invalidate the entire I cache (this code is a workaround for the ARM1136
  23. * erratum 411920 - Invalidate Instruction Cache operation can fail. This
  24. * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore.
  25. *
  26. * Registers:
  27. * r0 - set to 0
  28. * r1 - corrupted
  29. */
  30. ENTRY(v6_icache_inval_all)
  31. mov r0, #0
  32. mrs r1, cpsr
  33. cpsid ifa @ disable interrupts
  34. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  35. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  36. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  37. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  38. msr cpsr_cx, r1 @ restore interrupts
  39. .rept 11 @ ARM Ltd recommends at least
  40. nop @ 11 NOPs
  41. .endr
  42. mov pc, lr
  43. #endif
  44. /*
  45. * v6_flush_cache_all()
  46. *
  47. * Flush the entire cache.
  48. *
  49. * It is assumed that:
  50. */
  51. ENTRY(v6_flush_kern_cache_all)
  52. mov r0, #0
  53. #ifdef HARVARD_CACHE
  54. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  55. #ifndef CONFIG_ARM_ERRATA_411920
  56. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  57. #else
  58. b v6_icache_inval_all
  59. #endif
  60. #else
  61. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  62. #endif
  63. mov pc, lr
  64. /*
  65. * v6_flush_cache_all()
  66. *
  67. * Flush all TLB entries in a particular address space
  68. *
  69. * - mm - mm_struct describing address space
  70. */
  71. ENTRY(v6_flush_user_cache_all)
  72. /*FALLTHROUGH*/
  73. /*
  74. * v6_flush_cache_range(start, end, flags)
  75. *
  76. * Flush a range of TLB entries in the specified address space.
  77. *
  78. * - start - start address (may not be aligned)
  79. * - end - end address (exclusive, may not be aligned)
  80. * - flags - vm_area_struct flags describing address space
  81. *
  82. * It is assumed that:
  83. * - we have a VIPT cache.
  84. */
  85. ENTRY(v6_flush_user_cache_range)
  86. mov pc, lr
  87. /*
  88. * v6_coherent_kern_range(start,end)
  89. *
  90. * Ensure that the I and D caches are coherent within specified
  91. * region. This is typically used when code has been written to
  92. * a memory region, and will be executed.
  93. *
  94. * - start - virtual start address of region
  95. * - end - virtual end address of region
  96. *
  97. * It is assumed that:
  98. * - the Icache does not read data from the write buffer
  99. */
  100. ENTRY(v6_coherent_kern_range)
  101. /* FALLTHROUGH */
  102. /*
  103. * v6_coherent_user_range(start,end)
  104. *
  105. * Ensure that the I and D caches are coherent within specified
  106. * region. This is typically used when code has been written to
  107. * a memory region, and will be executed.
  108. *
  109. * - start - virtual start address of region
  110. * - end - virtual end address of region
  111. *
  112. * It is assumed that:
  113. * - the Icache does not read data from the write buffer
  114. */
  115. ENTRY(v6_coherent_user_range)
  116. #ifdef HARVARD_CACHE
  117. bic r0, r0, #CACHE_LINE_SIZE - 1
  118. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D line
  119. add r0, r0, #CACHE_LINE_SIZE
  120. cmp r0, r1
  121. blo 1b
  122. #endif
  123. mov r0, #0
  124. #ifdef HARVARD_CACHE
  125. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  126. #ifndef CONFIG_ARM_ERRATA_411920
  127. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  128. #else
  129. b v6_icache_inval_all
  130. #endif
  131. #else
  132. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  133. #endif
  134. mov pc, lr
  135. /*
  136. * v6_flush_kern_dcache_page(kaddr)
  137. *
  138. * Ensure that the data held in the page kaddr is written back
  139. * to the page in question.
  140. *
  141. * - kaddr - kernel address (guaranteed to be page aligned)
  142. */
  143. ENTRY(v6_flush_kern_dcache_page)
  144. add r1, r0, #PAGE_SZ
  145. 1:
  146. #ifdef HARVARD_CACHE
  147. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  148. #else
  149. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  150. #endif
  151. add r0, r0, #D_CACHE_LINE_SIZE
  152. cmp r0, r1
  153. blo 1b
  154. #ifdef HARVARD_CACHE
  155. mov r0, #0
  156. mcr p15, 0, r0, c7, c10, 4
  157. #endif
  158. mov pc, lr
  159. /*
  160. * v6_dma_inv_range(start,end)
  161. *
  162. * Invalidate the data cache within the specified region; we will
  163. * be performing a DMA operation in this region and we want to
  164. * purge old data in the cache.
  165. *
  166. * - start - virtual start address of region
  167. * - end - virtual end address of region
  168. */
  169. ENTRY(v6_dma_inv_range)
  170. tst r0, #D_CACHE_LINE_SIZE - 1
  171. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  172. #ifdef HARVARD_CACHE
  173. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  174. #else
  175. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  176. #endif
  177. tst r1, #D_CACHE_LINE_SIZE - 1
  178. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  179. #ifdef HARVARD_CACHE
  180. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  181. #else
  182. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  183. #endif
  184. 1:
  185. #ifdef HARVARD_CACHE
  186. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  187. #else
  188. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  189. #endif
  190. add r0, r0, #D_CACHE_LINE_SIZE
  191. cmp r0, r1
  192. blo 1b
  193. mov r0, #0
  194. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  195. mov pc, lr
  196. /*
  197. * v6_dma_clean_range(start,end)
  198. * - start - virtual start address of region
  199. * - end - virtual end address of region
  200. */
  201. ENTRY(v6_dma_clean_range)
  202. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  203. 1:
  204. #ifdef HARVARD_CACHE
  205. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  206. #else
  207. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  208. #endif
  209. add r0, r0, #D_CACHE_LINE_SIZE
  210. cmp r0, r1
  211. blo 1b
  212. mov r0, #0
  213. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  214. mov pc, lr
  215. /*
  216. * v6_dma_flush_range(start,end)
  217. * - start - virtual start address of region
  218. * - end - virtual end address of region
  219. */
  220. ENTRY(v6_dma_flush_range)
  221. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  222. 1:
  223. #ifdef HARVARD_CACHE
  224. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  225. #else
  226. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  227. #endif
  228. add r0, r0, #D_CACHE_LINE_SIZE
  229. cmp r0, r1
  230. blo 1b
  231. mov r0, #0
  232. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  233. mov pc, lr
  234. __INITDATA
  235. .type v6_cache_fns, #object
  236. ENTRY(v6_cache_fns)
  237. .long v6_flush_kern_cache_all
  238. .long v6_flush_user_cache_all
  239. .long v6_flush_user_cache_range
  240. .long v6_coherent_kern_range
  241. .long v6_coherent_user_range
  242. .long v6_flush_kern_dcache_page
  243. .long v6_dma_inv_range
  244. .long v6_dma_clean_range
  245. .long v6_dma_flush_range
  246. .size v6_cache_fns, . - v6_cache_fns