cache-inv-by-reg.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /* MN10300 CPU cache invalidation routines, using automatic purge registers
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/linkage.h>
  13. #include <asm/smp.h>
  14. #include <asm/page.h>
  15. #include <asm/cache.h>
  16. #include <asm/irqflags.h>
  17. #include <asm/cacheflush.h>
  18. #define mn10300_local_dcache_inv_range_intr_interval \
  19. +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
  20. #if mn10300_local_dcache_inv_range_intr_interval > 0xff
  21. #error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
  22. #endif
  23. .am33_2
  24. #ifndef CONFIG_SMP
  25. .globl mn10300_icache_inv
  26. .globl mn10300_icache_inv_page
  27. .globl mn10300_icache_inv_range
  28. .globl mn10300_icache_inv_range2
  29. .globl mn10300_dcache_inv
  30. .globl mn10300_dcache_inv_page
  31. .globl mn10300_dcache_inv_range
  32. .globl mn10300_dcache_inv_range2
  33. mn10300_icache_inv = mn10300_local_icache_inv
  34. mn10300_icache_inv_page = mn10300_local_icache_inv_page
  35. mn10300_icache_inv_range = mn10300_local_icache_inv_range
  36. mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
  37. mn10300_dcache_inv = mn10300_local_dcache_inv
  38. mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
  39. mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
  40. mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
  41. #endif /* !CONFIG_SMP */
  42. ###############################################################################
  43. #
  44. # void mn10300_local_icache_inv(void)
  45. # Invalidate the entire icache
  46. #
  47. ###############################################################################
  48. ALIGN
  49. .globl mn10300_local_icache_inv
  50. .type mn10300_local_icache_inv,@function
  51. mn10300_local_icache_inv:
  52. mov CHCTR,a0
  53. movhu (a0),d0
  54. btst CHCTR_ICEN,d0
  55. beq mn10300_local_icache_inv_end
  56. # invalidate
  57. or CHCTR_ICINV,d0
  58. movhu d0,(a0)
  59. movhu (a0),d0
  60. mn10300_local_icache_inv_end:
  61. ret [],0
  62. .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
  63. ###############################################################################
  64. #
  65. # void mn10300_local_dcache_inv(void)
  66. # Invalidate the entire dcache
  67. #
  68. ###############################################################################
  69. ALIGN
  70. .globl mn10300_local_dcache_inv
  71. .type mn10300_local_dcache_inv,@function
  72. mn10300_local_dcache_inv:
  73. mov CHCTR,a0
  74. movhu (a0),d0
  75. btst CHCTR_DCEN,d0
  76. beq mn10300_local_dcache_inv_end
  77. # invalidate
  78. or CHCTR_DCINV,d0
  79. movhu d0,(a0)
  80. movhu (a0),d0
  81. mn10300_local_dcache_inv_end:
  82. ret [],0
  83. .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
  84. ###############################################################################
  85. #
  86. # void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
  87. # void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
  88. # void mn10300_local_dcache_inv_page(unsigned long start)
  89. # Invalidate a range of addresses on a page in the dcache
  90. #
  91. ###############################################################################
  92. ALIGN
  93. .globl mn10300_local_dcache_inv_page
  94. .globl mn10300_local_dcache_inv_range
  95. .globl mn10300_local_dcache_inv_range2
  96. .type mn10300_local_dcache_inv_page,@function
  97. .type mn10300_local_dcache_inv_range,@function
  98. .type mn10300_local_dcache_inv_range2,@function
  99. mn10300_local_dcache_inv_page:
  100. and ~(PAGE_SIZE-1),d0
  101. mov PAGE_SIZE,d1
  102. mn10300_local_dcache_inv_range2:
  103. add d0,d1
  104. mn10300_local_dcache_inv_range:
  105. # If we are in writeback mode we check the start and end alignments,
  106. # and if they're not cacheline-aligned, we must flush any bits outside
  107. # the range that share cachelines with stuff inside the range
  108. #ifdef CONFIG_MN10300_CACHE_WBACK
  109. btst ~(L1_CACHE_BYTES-1),d0
  110. bne 1f
  111. btst ~(L1_CACHE_BYTES-1),d1
  112. beq 2f
  113. 1:
  114. bra mn10300_local_dcache_flush_inv_range
  115. 2:
  116. #endif /* CONFIG_MN10300_CACHE_WBACK */
  117. movm [d2,d3,a2],(sp)
  118. mov CHCTR,a0
  119. movhu (a0),d2
  120. btst CHCTR_DCEN,d2
  121. beq mn10300_local_dcache_inv_range_end
  122. # round the addresses out to be full cachelines, unless we're in
  123. # writeback mode, in which case we would be in flush and invalidate by
  124. # now
  125. #ifndef CONFIG_MN10300_CACHE_WBACK
  126. and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
  127. # addr down
  128. mov L1_CACHE_BYTES-1,d2
  129. add d2,d1
  130. and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
  131. #endif /* !CONFIG_MN10300_CACHE_WBACK */
  132. sub d0,d1,d2 # calculate the total size
  133. mov d0,a2 # A2 = start address
  134. mov d1,a1 # A1 = end address
  135. LOCAL_CLI_SAVE(d3)
  136. mov DCPGCR,a0 # make sure the purger isn't busy
  137. setlb
  138. mov (a0),d0
  139. btst DCPGCR_DCPGBSY,d0
  140. lne
  141. # skip initial address alignment calculation if address is zero
  142. mov d2,d1
  143. cmp 0,a2
  144. beq 1f
  145. dcivloop:
  146. /* calculate alignsize
  147. *
  148. * alignsize = L1_CACHE_BYTES;
  149. * while (! start & alignsize) {
  150. * alignsize <<=1;
  151. * }
  152. * d1 = alignsize;
  153. */
  154. mov L1_CACHE_BYTES,d1
  155. lsr 1,d1
  156. setlb
  157. add d1,d1
  158. mov d1,d0
  159. and a2,d0
  160. leq
  161. 1:
  162. /* calculate invsize
  163. *
  164. * if (totalsize > alignsize) {
  165. * invsize = alignsize;
  166. * } else {
  167. * invsize = totalsize;
  168. * tmp = 0x80000000;
  169. * while (! invsize & tmp) {
  170. * tmp >>= 1;
  171. * }
  172. * invsize = tmp;
  173. * }
  174. * d1 = invsize
  175. */
  176. cmp d2,d1
  177. bns 2f
  178. mov d2,d1
  179. mov 0x80000000,d0 # start from 31bit=1
  180. setlb
  181. lsr 1,d0
  182. mov d0,e0
  183. and d1,e0
  184. leq
  185. mov d0,d1
  186. 2:
  187. /* set mask
  188. *
  189. * mask = ~(invsize-1);
  190. * DCPGMR = mask;
  191. */
  192. mov d1,d0
  193. add -1,d0
  194. not d0
  195. mov d0,(DCPGMR)
  196. # invalidate area
  197. mov a2,d0
  198. or DCPGCR_DCI,d0
  199. mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
  200. setlb # wait for the purge to complete
  201. mov (a0),d0
  202. btst DCPGCR_DCPGBSY,d0
  203. lne
  204. sub d1,d2 # decrease size remaining
  205. add d1,a2 # increase next start address
  206. /* check invalidating of end address
  207. *
  208. * a2 = a2 + invsize
  209. * if (a2 < end) {
  210. * goto dcivloop;
  211. * } */
  212. cmp a1,a2
  213. bns dcivloop
  214. LOCAL_IRQ_RESTORE(d3)
  215. mn10300_local_dcache_inv_range_end:
  216. ret [d2,d3,a2],12
  217. .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
  218. .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
  219. .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
  220. ###############################################################################
  221. #
  222. # void mn10300_local_icache_inv_page(unsigned long start)
  223. # void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
  224. # void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
  225. # Invalidate a range of addresses on a page in the icache
  226. #
  227. ###############################################################################
  228. ALIGN
  229. .globl mn10300_local_icache_inv_page
  230. .globl mn10300_local_icache_inv_range
  231. .globl mn10300_local_icache_inv_range2
  232. .type mn10300_local_icache_inv_page,@function
  233. .type mn10300_local_icache_inv_range,@function
  234. .type mn10300_local_icache_inv_range2,@function
  235. mn10300_local_icache_inv_page:
  236. and ~(PAGE_SIZE-1),d0
  237. mov PAGE_SIZE,d1
  238. mn10300_local_icache_inv_range2:
  239. add d0,d1
  240. mn10300_local_icache_inv_range:
  241. movm [d2,d3,a2],(sp)
  242. mov CHCTR,a0
  243. movhu (a0),d2
  244. btst CHCTR_ICEN,d2
  245. beq mn10300_local_icache_inv_range_reg_end
  246. /* calculate alignsize
  247. *
  248. * alignsize = L1_CACHE_BYTES;
  249. * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
  250. * alignsize <<= 1;
  251. * }
  252. * d2 = alignsize;
  253. */
  254. mov L1_CACHE_BYTES,d2
  255. sub d0,d1,d3
  256. add -1,d3
  257. lsr L1_CACHE_SHIFT,d3
  258. beq 2f
  259. 1:
  260. add d2,d2
  261. lsr 1,d3
  262. bne 1b
  263. 2:
  264. /* a1 = end */
  265. mov d1,a1
  266. LOCAL_CLI_SAVE(d3)
  267. mov ICIVCR,a0
  268. /* wait for busy bit of area invalidation */
  269. setlb
  270. mov (a0),d1
  271. btst ICIVCR_ICIVBSY,d1
  272. lne
  273. /* set mask
  274. *
  275. * mask = ~(alignsize-1);
  276. * ICIVMR = mask;
  277. */
  278. mov d2,d1
  279. add -1,d1
  280. not d1
  281. mov d1,(ICIVMR)
  282. /* a2 = mask & start */
  283. and d1,d0,a2
  284. icivloop:
  285. /* area invalidate
  286. *
  287. * ICIVCR = (mask & start) | ICIVCR_ICI
  288. */
  289. mov a2,d0
  290. or ICIVCR_ICI,d0
  291. mov d0,(a0)
  292. /* wait for busy bit of area invalidation */
  293. setlb
  294. mov (a0),d1
  295. btst ICIVCR_ICIVBSY,d1
  296. lne
  297. /* check invalidating of end address
  298. *
  299. * a2 = a2 + alignsize
  300. * if (a2 < end) {
  301. * goto icivloop;
  302. * } */
  303. add d2,a2
  304. cmp a1,a2
  305. bns icivloop
  306. LOCAL_IRQ_RESTORE(d3)
  307. mn10300_local_icache_inv_range_reg_end:
  308. ret [d2,d3,a2],12
  309. .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
  310. .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
  311. .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2