cache-v4wt.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * linux/arch/arm/mm/cache-v4wt.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * ARMv4 write through cache operations support.
  11. *
  12. * We assume that the write buffer is not enabled.
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/init.h>
  16. #include <asm/page.h>
  17. #include "proc-macros.S"
  18. /*
  19. * The size of one data cache line.
  20. */
  21. #define CACHE_DLINESIZE 32
  22. /*
  23. * The number of data cache segments.
  24. */
  25. #define CACHE_DSEGMENTS 8
  26. /*
  27. * The number of lines in a cache segment.
  28. */
  29. #define CACHE_DENTRIES 64
  30. /*
  31. * This is the size at which it becomes more efficient to
  32. * clean the whole cache, rather than using the individual
  33. * cache line maintainence instructions.
  34. *
  35. * *** This needs benchmarking
  36. */
  37. #define CACHE_DLIMIT 16384
  38. /*
  39. * flush_user_cache_all()
  40. *
  41. * Invalidate all cache entries in a particular address
  42. * space.
  43. */
  44. ENTRY(v4wt_flush_user_cache_all)
  45. /* FALLTHROUGH */
  46. /*
  47. * flush_kern_cache_all()
  48. *
  49. * Clean and invalidate the entire cache.
  50. */
  51. ENTRY(v4wt_flush_kern_cache_all)
  52. mov r2, #VM_EXEC
  53. mov ip, #0
  54. __flush_whole_cache:
  55. tst r2, #VM_EXEC
  56. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  57. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  58. mov pc, lr
  59. /*
  60. * flush_user_cache_range(start, end, flags)
  61. *
  62. * Clean and invalidate a range of cache entries in the specified
  63. * address space.
  64. *
  65. * - start - start address (inclusive, page aligned)
  66. * - end - end address (exclusive, page aligned)
  67. * - flags - vma_area_struct flags describing address space
  68. */
  69. ENTRY(v4wt_flush_user_cache_range)
  70. sub r3, r1, r0 @ calculate total size
  71. cmp r3, #CACHE_DLIMIT
  72. bhs __flush_whole_cache
  73. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  74. tst r2, #VM_EXEC
  75. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
  76. add r0, r0, #CACHE_DLINESIZE
  77. cmp r0, r1
  78. blo 1b
  79. mov pc, lr
  80. /*
  81. * coherent_kern_range(start, end)
  82. *
  83. * Ensure coherency between the Icache and the Dcache in the
  84. * region described by start. If you have non-snooping
  85. * Harvard caches, you need to implement this function.
  86. *
  87. * - start - virtual start address
  88. * - end - virtual end address
  89. */
  90. ENTRY(v4wt_coherent_kern_range)
  91. /* FALLTRHOUGH */
  92. /*
  93. * coherent_user_range(start, end)
  94. *
  95. * Ensure coherency between the Icache and the Dcache in the
  96. * region described by start. If you have non-snooping
  97. * Harvard caches, you need to implement this function.
  98. *
  99. * - start - virtual start address
  100. * - end - virtual end address
  101. */
  102. ENTRY(v4wt_coherent_user_range)
  103. bic r0, r0, #CACHE_DLINESIZE - 1
  104. 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  105. add r0, r0, #CACHE_DLINESIZE
  106. cmp r0, r1
  107. blo 1b
  108. mov pc, lr
  109. /*
  110. * flush_kern_dcache_page(void *page)
  111. *
  112. * Ensure no D cache aliasing occurs, either with itself or
  113. * the I cache
  114. *
  115. * - addr - page aligned address
  116. */
  117. ENTRY(v4wt_flush_kern_dcache_page)
  118. mov r2, #0
  119. mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
  120. add r1, r0, #PAGE_SZ
  121. /* fallthrough */
  122. /*
  123. * dma_inv_range(start, end)
  124. *
  125. * Invalidate (discard) the specified virtual address range.
  126. * May not write back any entries. If 'start' or 'end'
  127. * are not cache line aligned, those lines must be written
  128. * back.
  129. *
  130. * - start - virtual start address
  131. * - end - virtual end address
  132. */
  133. ENTRY(v4wt_dma_inv_range)
  134. bic r0, r0, #CACHE_DLINESIZE - 1
  135. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  136. add r0, r0, #CACHE_DLINESIZE
  137. cmp r0, r1
  138. blo 1b
  139. /* FALLTHROUGH */
  140. /*
  141. * dma_clean_range(start, end)
  142. *
  143. * Clean the specified virtual address range.
  144. *
  145. * - start - virtual start address
  146. * - end - virtual end address
  147. */
  148. ENTRY(v4wt_dma_clean_range)
  149. mov pc, lr
  150. /*
  151. * dma_flush_range(start, end)
  152. *
  153. * Clean and invalidate the specified virtual address range.
  154. *
  155. * - start - virtual start address
  156. * - end - virtual end address
  157. */
  158. .globl v4wt_dma_flush_range
  159. .equ v4wt_dma_flush_range, v4wt_dma_inv_range
  160. __INITDATA
  161. .type v4wt_cache_fns, #object
  162. ENTRY(v4wt_cache_fns)
  163. .long v4wt_flush_kern_cache_all
  164. .long v4wt_flush_user_cache_all
  165. .long v4wt_flush_user_cache_range
  166. .long v4wt_coherent_kern_range
  167. .long v4wt_coherent_user_range
  168. .long v4wt_flush_kern_dcache_page
  169. .long v4wt_dma_inv_range
  170. .long v4wt_dma_clean_range
  171. .long v4wt_dma_flush_range
  172. .size v4wt_cache_fns, . - v4wt_cache_fns