cache-v4wb.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * linux/arch/arm/mm/cache-v4wb.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/config.h>
  11. #include <linux/linkage.h>
  12. #include <linux/init.h>
  13. #include <asm/memory.h>
  14. #include <asm/page.h>
  15. #include "proc-macros.S"
  16. /*
  17. * The size of one data cache line.
  18. */
  19. #define CACHE_DLINESIZE 32
  20. /*
  21. * The total size of the data cache.
  22. */
  23. #if defined(CONFIG_CPU_SA110)
  24. # define CACHE_DSIZE 16384
  25. #elif defined(CONFIG_CPU_SA1100)
  26. # define CACHE_DSIZE 8192
  27. #else
  28. # error Unknown cache size
  29. #endif
  30. /*
  31. * This is the size at which it becomes more efficient to
  32. * clean the whole cache, rather than using the individual
  33. * cache line maintainence instructions.
  34. *
  35. * Size Clean (ticks) Dirty (ticks)
  36. * 4096 21 20 21 53 55 54
  37. * 8192 40 41 40 106 100 102
  38. * 16384 77 77 76 140 140 138
  39. * 32768 150 149 150 214 216 212 <---
  40. * 65536 296 297 296 351 358 361
  41. * 131072 591 591 591 656 657 651
  42. * Whole 132 136 132 221 217 207 <---
  43. */
  44. #define CACHE_DLIMIT (CACHE_DSIZE * 4)
  45. .data
  46. flush_base:
  47. .long FLUSH_BASE
  48. .text
  49. /*
  50. * flush_user_cache_all()
  51. *
  52. * Clean and invalidate all cache entries in a particular address
  53. * space.
  54. */
  55. ENTRY(v4wb_flush_user_cache_all)
  56. /* FALLTHROUGH */
  57. /*
  58. * flush_kern_cache_all()
  59. *
  60. * Clean and invalidate the entire cache.
  61. */
  62. ENTRY(v4wb_flush_kern_cache_all)
  63. mov ip, #0
  64. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  65. __flush_whole_cache:
  66. ldr r3, =flush_base
  67. ldr r1, [r3, #0]
  68. eor r1, r1, #CACHE_DSIZE
  69. str r1, [r3, #0]
  70. add r2, r1, #CACHE_DSIZE
  71. 1: ldr r3, [r1], #32
  72. cmp r1, r2
  73. blo 1b
  74. #ifdef FLUSH_BASE_MINICACHE
  75. add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
  76. sub r1, r2, #512 @ only 512 bytes
  77. 1: ldr r3, [r1], #32
  78. cmp r1, r2
  79. blo 1b
  80. #endif
  81. mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
  82. mov pc, lr
  83. /*
  84. * flush_user_cache_range(start, end, flags)
  85. *
  86. * Invalidate a range of cache entries in the specified
  87. * address space.
  88. *
  89. * - start - start address (inclusive, page aligned)
  90. * - end - end address (exclusive, page aligned)
  91. * - flags - vma_area_struct flags describing address space
  92. */
  93. ENTRY(v4wb_flush_user_cache_range)
  94. mov ip, #0
  95. sub r3, r1, r0 @ calculate total size
  96. tst r2, #VM_EXEC @ executable region?
  97. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  98. cmp r3, #CACHE_DLIMIT @ total size >= limit?
  99. bhs __flush_whole_cache @ flush whole D cache
  100. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  101. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  102. add r0, r0, #CACHE_DLINESIZE
  103. cmp r0, r1
  104. blo 1b
  105. tst r2, #VM_EXEC
  106. mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
  107. mov pc, lr
  108. /*
  109. * flush_kern_dcache_page(void *page)
  110. *
  111. * Ensure no D cache aliasing occurs, either with itself or
  112. * the I cache
  113. *
  114. * - addr - page aligned address
  115. */
  116. ENTRY(v4wb_flush_kern_dcache_page)
  117. add r1, r0, #PAGE_SZ
  118. /* fall through */
  119. /*
  120. * coherent_kern_range(start, end)
  121. *
  122. * Ensure coherency between the Icache and the Dcache in the
  123. * region described by start. If you have non-snooping
  124. * Harvard caches, you need to implement this function.
  125. *
  126. * - start - virtual start address
  127. * - end - virtual end address
  128. */
  129. ENTRY(v4wb_coherent_kern_range)
  130. /* fall through */
  131. /*
  132. * coherent_user_range(start, end)
  133. *
  134. * Ensure coherency between the Icache and the Dcache in the
  135. * region described by start. If you have non-snooping
  136. * Harvard caches, you need to implement this function.
  137. *
  138. * - start - virtual start address
  139. * - end - virtual end address
  140. */
  141. ENTRY(v4wb_coherent_user_range)
  142. bic r0, r0, #CACHE_DLINESIZE - 1
  143. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  144. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  145. add r0, r0, #CACHE_DLINESIZE
  146. cmp r0, r1
  147. blo 1b
  148. mov ip, #0
  149. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  150. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  151. mov pc, lr
  152. /*
  153. * dma_inv_range(start, end)
  154. *
  155. * Invalidate (discard) the specified virtual address range.
  156. * May not write back any entries. If 'start' or 'end'
  157. * are not cache line aligned, those lines must be written
  158. * back.
  159. *
  160. * - start - virtual start address
  161. * - end - virtual end address
  162. */
  163. ENTRY(v4wb_dma_inv_range)
  164. tst r0, #CACHE_DLINESIZE - 1
  165. bic r0, r0, #CACHE_DLINESIZE - 1
  166. mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
  167. tst r1, #CACHE_DLINESIZE - 1
  168. mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
  169. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  170. add r0, r0, #CACHE_DLINESIZE
  171. cmp r0, r1
  172. blo 1b
  173. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  174. mov pc, lr
  175. /*
  176. * dma_clean_range(start, end)
  177. *
  178. * Clean (write back) the specified virtual address range.
  179. *
  180. * - start - virtual start address
  181. * - end - virtual end address
  182. */
  183. ENTRY(v4wb_dma_clean_range)
  184. bic r0, r0, #CACHE_DLINESIZE - 1
  185. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  186. add r0, r0, #CACHE_DLINESIZE
  187. cmp r0, r1
  188. blo 1b
  189. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  190. mov pc, lr
  191. /*
  192. * dma_flush_range(start, end)
  193. *
  194. * Clean and invalidate the specified virtual address range.
  195. *
  196. * - start - virtual start address
  197. * - end - virtual end address
  198. *
  199. * This is actually the same as v4wb_coherent_kern_range()
  200. */
  201. .globl v4wb_dma_flush_range
  202. .set v4wb_dma_flush_range, v4wb_coherent_kern_range
  203. __INITDATA
  204. .type v4wb_cache_fns, #object
  205. ENTRY(v4wb_cache_fns)
  206. .long v4wb_flush_kern_cache_all
  207. .long v4wb_flush_user_cache_all
  208. .long v4wb_flush_user_cache_range
  209. .long v4wb_coherent_kern_range
  210. .long v4wb_coherent_user_range
  211. .long v4wb_flush_kern_dcache_page
  212. .long v4wb_dma_inv_range
  213. .long v4wb_dma_clean_range
  214. .long v4wb_dma_flush_range
  215. .size v4wb_cache_fns, . - v4wb_cache_fns