cache-v4wb.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * linux/arch/arm/mm/cache-v4wb.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/init.h>
  12. #include <asm/memory.h>
  13. #include <asm/page.h>
  14. #include "proc-macros.S"
  15. /*
  16. * The size of one data cache line.
  17. */
  18. #define CACHE_DLINESIZE 32
  19. /*
  20. * The total size of the data cache.
  21. */
  22. #if defined(CONFIG_CPU_SA110)
  23. # define CACHE_DSIZE 16384
  24. #elif defined(CONFIG_CPU_SA1100)
  25. # define CACHE_DSIZE 8192
  26. #else
  27. # error Unknown cache size
  28. #endif
  29. /*
  30. * This is the size at which it becomes more efficient to
  31. * clean the whole cache, rather than using the individual
  32. * cache line maintainence instructions.
  33. *
  34. * Size Clean (ticks) Dirty (ticks)
  35. * 4096 21 20 21 53 55 54
  36. * 8192 40 41 40 106 100 102
  37. * 16384 77 77 76 140 140 138
  38. * 32768 150 149 150 214 216 212 <---
  39. * 65536 296 297 296 351 358 361
  40. * 131072 591 591 591 656 657 651
  41. * Whole 132 136 132 221 217 207 <---
  42. */
  43. #define CACHE_DLIMIT (CACHE_DSIZE * 4)
  44. .data
  45. flush_base:
  46. .long FLUSH_BASE
  47. .text
  48. /*
  49. * flush_user_cache_all()
  50. *
  51. * Clean and invalidate all cache entries in a particular address
  52. * space.
  53. */
  54. ENTRY(v4wb_flush_user_cache_all)
  55. /* FALLTHROUGH */
  56. /*
  57. * flush_kern_cache_all()
  58. *
  59. * Clean and invalidate the entire cache.
  60. */
  61. ENTRY(v4wb_flush_kern_cache_all)
  62. mov ip, #0
  63. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  64. __flush_whole_cache:
  65. ldr r3, =flush_base
  66. ldr r1, [r3, #0]
  67. eor r1, r1, #CACHE_DSIZE
  68. str r1, [r3, #0]
  69. add r2, r1, #CACHE_DSIZE
  70. 1: ldr r3, [r1], #32
  71. cmp r1, r2
  72. blo 1b
  73. #ifdef FLUSH_BASE_MINICACHE
  74. add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
  75. sub r1, r2, #512 @ only 512 bytes
  76. 1: ldr r3, [r1], #32
  77. cmp r1, r2
  78. blo 1b
  79. #endif
  80. mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
  81. mov pc, lr
  82. /*
  83. * flush_user_cache_range(start, end, flags)
  84. *
  85. * Invalidate a range of cache entries in the specified
  86. * address space.
  87. *
  88. * - start - start address (inclusive, page aligned)
  89. * - end - end address (exclusive, page aligned)
  90. * - flags - vma_area_struct flags describing address space
  91. */
  92. ENTRY(v4wb_flush_user_cache_range)
  93. mov ip, #0
  94. sub r3, r1, r0 @ calculate total size
  95. tst r2, #VM_EXEC @ executable region?
  96. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  97. cmp r3, #CACHE_DLIMIT @ total size >= limit?
  98. bhs __flush_whole_cache @ flush whole D cache
  99. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  100. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  101. add r0, r0, #CACHE_DLINESIZE
  102. cmp r0, r1
  103. blo 1b
  104. tst r2, #VM_EXEC
  105. mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
  106. mov pc, lr
  107. /*
  108. * flush_kern_dcache_area(void *addr, size_t size)
  109. *
  110. * Ensure no D cache aliasing occurs, either with itself or
  111. * the I cache
  112. *
  113. * - addr - kernel address
  114. * - size - region size
  115. */
  116. ENTRY(v4wb_flush_kern_dcache_area)
  117. add r1, r0, r1
  118. /* fall through */
  119. /*
  120. * coherent_kern_range(start, end)
  121. *
  122. * Ensure coherency between the Icache and the Dcache in the
  123. * region described by start. If you have non-snooping
  124. * Harvard caches, you need to implement this function.
  125. *
  126. * - start - virtual start address
  127. * - end - virtual end address
  128. */
  129. ENTRY(v4wb_coherent_kern_range)
  130. /* fall through */
  131. /*
  132. * coherent_user_range(start, end)
  133. *
  134. * Ensure coherency between the Icache and the Dcache in the
  135. * region described by start. If you have non-snooping
  136. * Harvard caches, you need to implement this function.
  137. *
  138. * - start - virtual start address
  139. * - end - virtual end address
  140. */
  141. ENTRY(v4wb_coherent_user_range)
  142. bic r0, r0, #CACHE_DLINESIZE - 1
  143. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  144. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  145. add r0, r0, #CACHE_DLINESIZE
  146. cmp r0, r1
  147. blo 1b
  148. mov ip, #0
  149. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  150. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  151. mov pc, lr
  152. /*
  153. * dma_inv_range(start, end)
  154. *
  155. * Invalidate (discard) the specified virtual address range.
  156. * May not write back any entries. If 'start' or 'end'
  157. * are not cache line aligned, those lines must be written
  158. * back.
  159. *
  160. * - start - virtual start address
  161. * - end - virtual end address
  162. */
  163. v4wb_dma_inv_range:
  164. tst r0, #CACHE_DLINESIZE - 1
  165. bic r0, r0, #CACHE_DLINESIZE - 1
  166. mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
  167. tst r1, #CACHE_DLINESIZE - 1
  168. mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
  169. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  170. add r0, r0, #CACHE_DLINESIZE
  171. cmp r0, r1
  172. blo 1b
  173. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  174. mov pc, lr
  175. /*
  176. * dma_clean_range(start, end)
  177. *
  178. * Clean (write back) the specified virtual address range.
  179. *
  180. * - start - virtual start address
  181. * - end - virtual end address
  182. */
  183. v4wb_dma_clean_range:
  184. bic r0, r0, #CACHE_DLINESIZE - 1
  185. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  186. add r0, r0, #CACHE_DLINESIZE
  187. cmp r0, r1
  188. blo 1b
  189. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  190. mov pc, lr
  191. /*
  192. * dma_flush_range(start, end)
  193. *
  194. * Clean and invalidate the specified virtual address range.
  195. *
  196. * - start - virtual start address
  197. * - end - virtual end address
  198. *
  199. * This is actually the same as v4wb_coherent_kern_range()
  200. */
  201. .globl v4wb_dma_flush_range
  202. .set v4wb_dma_flush_range, v4wb_coherent_kern_range
  203. /*
  204. * dma_map_area(start, size, dir)
  205. * - start - kernel virtual start address
  206. * - size - size of region
  207. * - dir - DMA direction
  208. */
  209. ENTRY(v4wb_dma_map_area)
  210. add r1, r1, r0
  211. cmp r2, #DMA_TO_DEVICE
  212. beq v4wb_dma_clean_range
  213. bcs v4wb_dma_inv_range
  214. b v4wb_dma_flush_range
  215. ENDPROC(v4wb_dma_map_area)
  216. /*
  217. * dma_unmap_area(start, size, dir)
  218. * - start - kernel virtual start address
  219. * - size - size of region
  220. * - dir - DMA direction
  221. */
  222. ENTRY(v4wb_dma_unmap_area)
  223. mov pc, lr
  224. ENDPROC(v4wb_dma_unmap_area)
  225. __INITDATA
  226. .type v4wb_cache_fns, #object
  227. ENTRY(v4wb_cache_fns)
  228. .long v4wb_flush_kern_cache_all
  229. .long v4wb_flush_user_cache_all
  230. .long v4wb_flush_user_cache_range
  231. .long v4wb_coherent_kern_range
  232. .long v4wb_coherent_user_range
  233. .long v4wb_flush_kern_dcache_area
  234. .long v4wb_dma_map_area
  235. .long v4wb_dma_unmap_area
  236. .long v4wb_dma_flush_range
  237. .size v4wb_cache_fns, . - v4wb_cache_fns