cache-v4wt.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * linux/arch/arm/mm/cache-v4wt.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * ARMv4 write through cache operations support.
  11. *
  12. * We assume that the write buffer is not enabled.
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/init.h>
  16. #include <asm/page.h>
  17. #include "proc-macros.S"
  18. /*
  19. * The size of one data cache line.
  20. */
  21. #define CACHE_DLINESIZE 32
  22. /*
  23. * The number of data cache segments.
  24. */
  25. #define CACHE_DSEGMENTS 8
  26. /*
  27. * The number of lines in a cache segment.
  28. */
  29. #define CACHE_DENTRIES 64
  30. /*
  31. * This is the size at which it becomes more efficient to
  32. * clean the whole cache, rather than using the individual
  33. * cache line maintainence instructions.
  34. *
  35. * *** This needs benchmarking
  36. */
  37. #define CACHE_DLIMIT 16384
  38. /*
  39. * flush_user_cache_all()
  40. *
  41. * Invalidate all cache entries in a particular address
  42. * space.
  43. */
  44. ENTRY(v4wt_flush_user_cache_all)
  45. /* FALLTHROUGH */
  46. /*
  47. * flush_kern_cache_all()
  48. *
  49. * Clean and invalidate the entire cache.
  50. */
  51. ENTRY(v4wt_flush_kern_cache_all)
  52. mov r2, #VM_EXEC
  53. mov ip, #0
  54. __flush_whole_cache:
  55. tst r2, #VM_EXEC
  56. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  57. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  58. mov pc, lr
  59. /*
  60. * flush_user_cache_range(start, end, flags)
  61. *
  62. * Clean and invalidate a range of cache entries in the specified
  63. * address space.
  64. *
  65. * - start - start address (inclusive, page aligned)
  66. * - end - end address (exclusive, page aligned)
  67. * - flags - vma_area_struct flags describing address space
  68. */
  69. ENTRY(v4wt_flush_user_cache_range)
  70. sub r3, r1, r0 @ calculate total size
  71. cmp r3, #CACHE_DLIMIT
  72. bhs __flush_whole_cache
  73. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  74. tst r2, #VM_EXEC
  75. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
  76. add r0, r0, #CACHE_DLINESIZE
  77. cmp r0, r1
  78. blo 1b
  79. mov pc, lr
  80. /*
  81. * coherent_kern_range(start, end)
  82. *
  83. * Ensure coherency between the Icache and the Dcache in the
  84. * region described by start. If you have non-snooping
  85. * Harvard caches, you need to implement this function.
  86. *
  87. * - start - virtual start address
  88. * - end - virtual end address
  89. */
  90. ENTRY(v4wt_coherent_kern_range)
  91. /* FALLTRHOUGH */
  92. /*
  93. * coherent_user_range(start, end)
  94. *
  95. * Ensure coherency between the Icache and the Dcache in the
  96. * region described by start. If you have non-snooping
  97. * Harvard caches, you need to implement this function.
  98. *
  99. * - start - virtual start address
  100. * - end - virtual end address
  101. */
  102. ENTRY(v4wt_coherent_user_range)
  103. bic r0, r0, #CACHE_DLINESIZE - 1
  104. 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  105. add r0, r0, #CACHE_DLINESIZE
  106. cmp r0, r1
  107. blo 1b
  108. mov pc, lr
  109. /*
  110. * flush_kern_dcache_area(void *addr, size_t size)
  111. *
  112. * Ensure no D cache aliasing occurs, either with itself or
  113. * the I cache
  114. *
  115. * - addr - kernel address
  116. * - size - region size
  117. */
  118. ENTRY(v4wt_flush_kern_dcache_area)
  119. mov r2, #0
  120. mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
  121. add r1, r0, r1
  122. /* fallthrough */
  123. /*
  124. * dma_inv_range(start, end)
  125. *
  126. * Invalidate (discard) the specified virtual address range.
  127. * May not write back any entries. If 'start' or 'end'
  128. * are not cache line aligned, those lines must be written
  129. * back.
  130. *
  131. * - start - virtual start address
  132. * - end - virtual end address
  133. */
  134. ENTRY(v4wt_dma_inv_range)
  135. bic r0, r0, #CACHE_DLINESIZE - 1
  136. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  137. add r0, r0, #CACHE_DLINESIZE
  138. cmp r0, r1
  139. blo 1b
  140. /* FALLTHROUGH */
  141. /*
  142. * dma_clean_range(start, end)
  143. *
  144. * Clean the specified virtual address range.
  145. *
  146. * - start - virtual start address
  147. * - end - virtual end address
  148. */
  149. ENTRY(v4wt_dma_clean_range)
  150. mov pc, lr
  151. /*
  152. * dma_flush_range(start, end)
  153. *
  154. * Clean and invalidate the specified virtual address range.
  155. *
  156. * - start - virtual start address
  157. * - end - virtual end address
  158. */
  159. .globl v4wt_dma_flush_range
  160. .equ v4wt_dma_flush_range, v4wt_dma_inv_range
  161. __INITDATA
  162. .type v4wt_cache_fns, #object
  163. ENTRY(v4wt_cache_fns)
  164. .long v4wt_flush_kern_cache_all
  165. .long v4wt_flush_user_cache_all
  166. .long v4wt_flush_user_cache_range
  167. .long v4wt_coherent_kern_range
  168. .long v4wt_coherent_user_range
  169. .long v4wt_flush_kern_dcache_area
  170. .long v4wt_dma_inv_range
  171. .long v4wt_dma_clean_range
  172. .long v4wt_dma_flush_range
  173. .size v4wt_cache_fns, . - v4wt_cache_fns