cache-v6.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include "proc-macros.S"
  16. #define HARVARD_CACHE
  17. #define CACHE_LINE_SIZE 32
  18. #define D_CACHE_LINE_SIZE 32
  19. #define BTB_FLUSH_SIZE 8
  20. /*
  21. * v6_flush_cache_all()
  22. *
  23. * Flush the entire cache.
  24. *
  25. * It is assumed that:
  26. */
  27. ENTRY(v6_flush_kern_cache_all)
  28. mov r0, #0
  29. #ifdef HARVARD_CACHE
  30. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  31. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  32. #else
  33. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  34. #endif
  35. mov pc, lr
  36. /*
  37. * v6_flush_cache_all()
  38. *
  39. * Flush all TLB entries in a particular address space
  40. *
  41. * - mm - mm_struct describing address space
  42. */
  43. ENTRY(v6_flush_user_cache_all)
  44. /*FALLTHROUGH*/
  45. /*
  46. * v6_flush_cache_range(start, end, flags)
  47. *
  48. * Flush a range of TLB entries in the specified address space.
  49. *
  50. * - start - start address (may not be aligned)
  51. * - end - end address (exclusive, may not be aligned)
  52. * - flags - vm_area_struct flags describing address space
  53. *
  54. * It is assumed that:
  55. * - we have a VIPT cache.
  56. */
  57. ENTRY(v6_flush_user_cache_range)
  58. mov pc, lr
  59. /*
  60. * v6_coherent_kern_range(start,end)
  61. *
  62. * Ensure that the I and D caches are coherent within specified
  63. * region. This is typically used when code has been written to
  64. * a memory region, and will be executed.
  65. *
  66. * - start - virtual start address of region
  67. * - end - virtual end address of region
  68. *
  69. * It is assumed that:
  70. * - the Icache does not read data from the write buffer
  71. */
  72. ENTRY(v6_coherent_kern_range)
  73. /* FALLTHROUGH */
  74. /*
  75. * v6_coherent_user_range(start,end)
  76. *
  77. * Ensure that the I and D caches are coherent within specified
  78. * region. This is typically used when code has been written to
  79. * a memory region, and will be executed.
  80. *
  81. * - start - virtual start address of region
  82. * - end - virtual end address of region
  83. *
  84. * It is assumed that:
  85. * - the Icache does not read data from the write buffer
  86. */
  87. ENTRY(v6_coherent_user_range)
  88. bic r0, r0, #CACHE_LINE_SIZE - 1
  89. 1:
  90. #ifdef HARVARD_CACHE
  91. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  92. mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
  93. #endif
  94. mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
  95. add r0, r0, #BTB_FLUSH_SIZE
  96. mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
  97. add r0, r0, #BTB_FLUSH_SIZE
  98. mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
  99. add r0, r0, #BTB_FLUSH_SIZE
  100. mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
  101. add r0, r0, #BTB_FLUSH_SIZE
  102. cmp r0, r1
  103. blo 1b
  104. #ifdef HARVARD_CACHE
  105. mov r0, #0
  106. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  107. #endif
  108. mov pc, lr
  109. /*
  110. * v6_flush_kern_dcache_page(kaddr)
  111. *
  112. * Ensure that the data held in the page kaddr is written back
  113. * to the page in question.
  114. *
  115. * - kaddr - kernel address (guaranteed to be page aligned)
  116. */
  117. ENTRY(v6_flush_kern_dcache_page)
  118. add r1, r0, #PAGE_SZ
  119. 1:
  120. #ifdef HARVARD_CACHE
  121. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  122. #else
  123. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  124. #endif
  125. add r0, r0, #D_CACHE_LINE_SIZE
  126. cmp r0, r1
  127. blo 1b
  128. #ifdef HARVARD_CACHE
  129. mov r0, #0
  130. mcr p15, 0, r0, c7, c10, 4
  131. #endif
  132. mov pc, lr
  133. /*
  134. * v6_dma_inv_range(start,end)
  135. *
  136. * Invalidate the data cache within the specified region; we will
  137. * be performing a DMA operation in this region and we want to
  138. * purge old data in the cache.
  139. *
  140. * - start - virtual start address of region
  141. * - end - virtual end address of region
  142. */
  143. ENTRY(v6_dma_inv_range)
  144. tst r0, #D_CACHE_LINE_SIZE - 1
  145. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  146. #ifdef HARVARD_CACHE
  147. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  148. #else
  149. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  150. #endif
  151. tst r1, #D_CACHE_LINE_SIZE - 1
  152. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  153. #ifdef HARVARD_CACHE
  154. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  155. #else
  156. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  157. #endif
  158. 1:
  159. #ifdef HARVARD_CACHE
  160. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  161. #else
  162. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  163. #endif
  164. add r0, r0, #D_CACHE_LINE_SIZE
  165. cmp r0, r1
  166. blo 1b
  167. mov r0, #0
  168. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  169. mov pc, lr
  170. /*
  171. * v6_dma_clean_range(start,end)
  172. * - start - virtual start address of region
  173. * - end - virtual end address of region
  174. */
  175. ENTRY(v6_dma_clean_range)
  176. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  177. 1:
  178. #ifdef HARVARD_CACHE
  179. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  180. #else
  181. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  182. #endif
  183. add r0, r0, #D_CACHE_LINE_SIZE
  184. cmp r0, r1
  185. blo 1b
  186. mov r0, #0
  187. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  188. mov pc, lr
  189. /*
  190. * v6_dma_flush_range(start,end)
  191. * - start - virtual start address of region
  192. * - end - virtual end address of region
  193. */
  194. ENTRY(v6_dma_flush_range)
  195. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  196. 1:
  197. #ifdef HARVARD_CACHE
  198. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  199. #else
  200. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  201. #endif
  202. add r0, r0, #D_CACHE_LINE_SIZE
  203. cmp r0, r1
  204. blo 1b
  205. mov r0, #0
  206. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  207. mov pc, lr
  208. __INITDATA
  209. .type v6_cache_fns, #object
  210. ENTRY(v6_cache_fns)
  211. .long v6_flush_kern_cache_all
  212. .long v6_flush_user_cache_all
  213. .long v6_flush_user_cache_range
  214. .long v6_coherent_kern_range
  215. .long v6_coherent_user_range
  216. .long v6_flush_kern_dcache_page
  217. .long v6_dma_inv_range
  218. .long v6_dma_clean_range
  219. .long v6_dma_flush_range
  220. .size v6_cache_fns, . - v6_cache_fns