cache.S 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * Cache maintenance
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/linkage.h>
  20. #include <linux/init.h>
  21. #include <asm/assembler.h>
  22. #include "proc-macros.S"
  23. /*
  24. * __flush_dcache_all()
  25. *
  26. * Flush the whole D-cache.
  27. *
  28. * Corrupted registers: x0-x7, x9-x11
  29. */
  30. ENTRY(__flush_dcache_all)
  31. dsb sy // ensure ordering with previous memory accesses
  32. mrs x0, clidr_el1 // read clidr
  33. and x3, x0, #0x7000000 // extract loc from clidr
  34. lsr x3, x3, #23 // left align loc bit field
  35. cbz x3, finished // if loc is 0, then no need to clean
  36. mov x10, #0 // start clean at cache level 0
  37. loop1:
  38. add x2, x10, x10, lsr #1 // work out 3x current cache level
  39. lsr x1, x0, x2 // extract cache type bits from clidr
  40. and x1, x1, #7 // mask of the bits for current cache only
  41. cmp x1, #2 // see what cache we have at this level
  42. b.lt skip // skip if no cache, or just i-cache
  43. save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
  44. msr csselr_el1, x10 // select current cache level in csselr
  45. isb // isb to sych the new cssr&csidr
  46. mrs x1, ccsidr_el1 // read the new ccsidr
  47. restore_irqs x9
  48. and x2, x1, #7 // extract the length of the cache lines
  49. add x2, x2, #4 // add 4 (line length offset)
  50. mov x4, #0x3ff
  51. and x4, x4, x1, lsr #3 // find maximum number on the way size
  52. clz x5, x4 // find bit position of way size increment
  53. mov x7, #0x7fff
  54. and x7, x7, x1, lsr #13 // extract max number of the index size
  55. loop2:
  56. mov x9, x4 // create working copy of max way size
  57. loop3:
  58. lsl x6, x9, x5
  59. orr x11, x10, x6 // factor way and cache number into x11
  60. lsl x6, x7, x2
  61. orr x11, x11, x6 // factor index number into x11
  62. dc cisw, x11 // clean & invalidate by set/way
  63. subs x9, x9, #1 // decrement the way
  64. b.ge loop3
  65. subs x7, x7, #1 // decrement the index
  66. b.ge loop2
  67. skip:
  68. add x10, x10, #2 // increment cache number
  69. cmp x3, x10
  70. b.gt loop1
  71. finished:
  72. mov x10, #0 // swith back to cache level 0
  73. msr csselr_el1, x10 // select current cache level in csselr
  74. dsb sy
  75. isb
  76. ret
  77. ENDPROC(__flush_dcache_all)
  78. /*
  79. * flush_cache_all()
  80. *
  81. * Flush the entire cache system. The data cache flush is now achieved
  82. * using atomic clean / invalidates working outwards from L1 cache. This
  83. * is done using Set/Way based cache maintainance instructions. The
  84. * instruction cache can still be invalidated back to the point of
  85. * unification in a single instruction.
  86. */
  87. ENTRY(flush_cache_all)
  88. mov x12, lr
  89. bl __flush_dcache_all
  90. mov x0, #0
  91. ic ialluis // I+BTB cache invalidate
  92. ret x12
  93. ENDPROC(flush_cache_all)
  94. /*
  95. * flush_icache_range(start,end)
  96. *
  97. * Ensure that the I and D caches are coherent within specified region.
  98. * This is typically used when code has been written to a memory region,
  99. * and will be executed.
  100. *
  101. * - start - virtual start address of region
  102. * - end - virtual end address of region
  103. */
  104. ENTRY(flush_icache_range)
  105. /* FALLTHROUGH */
  106. /*
  107. * __flush_cache_user_range(start,end)
  108. *
  109. * Ensure that the I and D caches are coherent within specified region.
  110. * This is typically used when code has been written to a memory region,
  111. * and will be executed.
  112. *
  113. * - start - virtual start address of region
  114. * - end - virtual end address of region
  115. */
  116. ENTRY(__flush_cache_user_range)
  117. dcache_line_size x2, x3
  118. sub x3, x2, #1
  119. bic x4, x0, x3
  120. 1:
  121. USER(9f, dc cvau, x4 ) // clean D line to PoU
  122. add x4, x4, x2
  123. cmp x4, x1
  124. b.lo 1b
  125. dsb sy
  126. icache_line_size x2, x3
  127. sub x3, x2, #1
  128. bic x4, x0, x3
  129. 1:
  130. USER(9f, ic ivau, x4 ) // invalidate I line PoU
  131. add x4, x4, x2
  132. cmp x4, x1
  133. b.lo 1b
  134. 9: // ignore any faulting cache operation
  135. dsb sy
  136. isb
  137. ret
  138. ENDPROC(flush_icache_range)
  139. ENDPROC(__flush_cache_user_range)
  140. /*
  141. * __flush_kern_dcache_page(kaddr)
  142. *
  143. * Ensure that the data held in the page kaddr is written back to the
  144. * page in question.
  145. *
  146. * - kaddr - kernel address
  147. * - size - size in question
  148. */
  149. ENTRY(__flush_dcache_area)
  150. dcache_line_size x2, x3
  151. add x1, x0, x1
  152. sub x3, x2, #1
  153. bic x0, x0, x3
  154. 1: dc civac, x0 // clean & invalidate D line / unified line
  155. add x0, x0, x2
  156. cmp x0, x1
  157. b.lo 1b
  158. dsb sy
  159. ret
  160. ENDPROC(__flush_dcache_area)