cache.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. #include <asm/pvr.h>
  16. static inline void __invalidate_flush_icache(unsigned int addr)
  17. {
  18. __asm__ __volatile__ ("wic %0, r0;" \
  19. : : "r" (addr));
  20. }
  21. static inline void __flush_dcache(unsigned int addr)
  22. {
  23. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  24. : : "r" (addr));
  25. }
  26. static inline void __invalidate_dcache(unsigned int baseaddr,
  27. unsigned int offset)
  28. {
  29. __asm__ __volatile__ ("wdc.clear %0, %1;" \
  30. : : "r" (baseaddr), "r" (offset));
  31. }
  32. static inline void __enable_icache_msr(void)
  33. {
  34. __asm__ __volatile__ (" msrset r0, %0; \
  35. nop; " \
  36. : : "i" (MSR_ICE) : "memory");
  37. }
  38. static inline void __disable_icache_msr(void)
  39. {
  40. __asm__ __volatile__ (" msrclr r0, %0; \
  41. nop; " \
  42. : : "i" (MSR_ICE) : "memory");
  43. }
  44. static inline void __enable_dcache_msr(void)
  45. {
  46. __asm__ __volatile__ (" msrset r0, %0; \
  47. nop; " \
  48. : \
  49. : "i" (MSR_DCE) \
  50. : "memory");
  51. }
  52. static inline void __disable_dcache_msr(void)
  53. {
  54. __asm__ __volatile__ (" msrclr r0, %0; \
  55. nop; " \
  56. : \
  57. : "i" (MSR_DCE) \
  58. : "memory");
  59. }
  60. static inline void __enable_icache_nomsr(void)
  61. {
  62. __asm__ __volatile__ (" mfs r12, rmsr; \
  63. nop; \
  64. ori r12, r12, %0; \
  65. mts rmsr, r12; \
  66. nop; " \
  67. : \
  68. : "i" (MSR_ICE) \
  69. : "memory", "r12");
  70. }
  71. static inline void __disable_icache_nomsr(void)
  72. {
  73. __asm__ __volatile__ (" mfs r12, rmsr; \
  74. nop; \
  75. andi r12, r12, ~%0; \
  76. mts rmsr, r12; \
  77. nop; " \
  78. : \
  79. : "i" (MSR_ICE) \
  80. : "memory", "r12");
  81. }
  82. static inline void __enable_dcache_nomsr(void)
  83. {
  84. __asm__ __volatile__ (" mfs r12, rmsr; \
  85. nop; \
  86. ori r12, r12, %0; \
  87. mts rmsr, r12; \
  88. nop; " \
  89. : \
  90. : "i" (MSR_DCE) \
  91. : "memory", "r12");
  92. }
  93. static inline void __disable_dcache_nomsr(void)
  94. {
  95. __asm__ __volatile__ (" mfs r12, rmsr; \
  96. nop; \
  97. andi r12, r12, ~%0; \
  98. mts rmsr, r12; \
  99. nop; " \
  100. : \
  101. : "i" (MSR_DCE) \
  102. : "memory", "r12");
  103. }
  104. /* Helper macro for computing the limits of cache range loops */
  105. #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
  106. do { \
  107. int align = ~(cache_line_length - 1); \
  108. end = min(start + cache_size, end); \
  109. start &= align; \
  110. end = ((end & align) + cache_line_length); \
  111. } while (0);
  112. /*
  113. * Helper macro to loop over the specified cache_size/line_length and
  114. * execute 'op' on that cacheline
  115. */
  116. #define CACHE_ALL_LOOP(cache_size, line_length, op) \
  117. do { \
  118. unsigned int len = cache_size; \
  119. int step = -line_length; \
  120. BUG_ON(step >= 0); \
  121. \
  122. __asm__ __volatile__ (" 1: " #op " %0, r0; \
  123. bgtid %0, 1b; \
  124. addk %0, %0, %1; \
  125. " : : "r" (len), "r" (step) \
  126. : "memory"); \
  127. } while (0);
  128. #define CACHE_ALL_LOOP2(cache_size, line_length, op) \
  129. do { \
  130. unsigned int len = cache_size; \
  131. int step = -line_length; \
  132. BUG_ON(step >= 0); \
  133. \
  134. __asm__ __volatile__ (" 1: " #op " r0, %0; \
  135. bgtid %0, 1b; \
  136. addk %0, %0, %1; \
  137. " : : "r" (len), "r" (step) \
  138. : "memory"); \
  139. } while (0);
  140. /* for wdc.flush/clear */
  141. #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
  142. do { \
  143. int step = -line_length; \
  144. int count = end - start; \
  145. BUG_ON(count <= 0); \
  146. \
  147. __asm__ __volatile__ (" 1: " #op " %0, %1; \
  148. bgtid %1, 1b; \
  149. addk %1, %1, %2; \
  150. " : : "r" (start), "r" (count), \
  151. "r" (step) : "memory"); \
  152. } while (0);
  153. /* It is used only first parameter for OP - for wic, wdc */
  154. #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
  155. do { \
  156. int step = -line_length; \
  157. int count = end - start; \
  158. BUG_ON(count <= 0); \
  159. \
  160. __asm__ __volatile__ (" 1: addk %0, %0, %1; \
  161. " #op " %0, r0; \
  162. bgtid %1, 1b; \
  163. addk %1, %1, %2; \
  164. " : : "r" (start), "r" (count), \
  165. "r" (step) : "memory"); \
  166. } while (0);
  167. static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
  168. {
  169. unsigned long flags;
  170. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  171. (unsigned int)start, (unsigned int) end);
  172. CACHE_LOOP_LIMITS(start, end,
  173. cpuinfo.icache_line_length, cpuinfo.icache_size);
  174. local_irq_save(flags);
  175. __disable_icache_msr();
  176. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  177. __enable_icache_msr();
  178. local_irq_restore(flags);
  179. }
  180. static void __flush_icache_range_nomsr_irq(unsigned long start,
  181. unsigned long end)
  182. {
  183. unsigned long flags;
  184. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  185. (unsigned int)start, (unsigned int) end);
  186. CACHE_LOOP_LIMITS(start, end,
  187. cpuinfo.icache_line_length, cpuinfo.icache_size);
  188. local_irq_save(flags);
  189. __disable_icache_nomsr();
  190. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  191. __enable_icache_nomsr();
  192. local_irq_restore(flags);
  193. }
  194. static void __flush_icache_range_noirq(unsigned long start,
  195. unsigned long end)
  196. {
  197. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  198. (unsigned int)start, (unsigned int) end);
  199. CACHE_LOOP_LIMITS(start, end,
  200. cpuinfo.icache_line_length, cpuinfo.icache_size);
  201. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  202. }
  203. static void __flush_icache_all_msr_irq(void)
  204. {
  205. unsigned long flags;
  206. pr_debug("%s\n", __func__);
  207. local_irq_save(flags);
  208. __disable_icache_msr();
  209. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  210. __enable_icache_msr();
  211. local_irq_restore(flags);
  212. }
  213. static void __flush_icache_all_nomsr_irq(void)
  214. {
  215. unsigned long flags;
  216. pr_debug("%s\n", __func__);
  217. local_irq_save(flags);
  218. __disable_icache_nomsr();
  219. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  220. __enable_icache_nomsr();
  221. local_irq_restore(flags);
  222. }
  223. static void __flush_icache_all_noirq(void)
  224. {
  225. pr_debug("%s\n", __func__);
  226. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  227. }
  228. static void __invalidate_dcache_all_msr_irq(void)
  229. {
  230. unsigned long flags;
  231. pr_debug("%s\n", __func__);
  232. local_irq_save(flags);
  233. __disable_dcache_msr();
  234. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  235. __enable_dcache_msr();
  236. local_irq_restore(flags);
  237. }
  238. static void __invalidate_dcache_all_nomsr_irq(void)
  239. {
  240. unsigned long flags;
  241. pr_debug("%s\n", __func__);
  242. local_irq_save(flags);
  243. __disable_dcache_nomsr();
  244. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  245. __enable_dcache_nomsr();
  246. local_irq_restore(flags);
  247. }
  248. static void __invalidate_dcache_all_noirq_wt(void)
  249. {
  250. pr_debug("%s\n", __func__);
  251. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
  252. }
  253. /* FIXME this is weird - should be only wdc but not work
  254. * MS: I am getting bus errors and other weird things */
  255. static void __invalidate_dcache_all_wb(void)
  256. {
  257. pr_debug("%s\n", __func__);
  258. CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  259. wdc.clear)
  260. #if 0
  261. unsigned int i;
  262. pr_debug("%s\n", __func__);
  263. /* Just loop through cache size and invalidate it */
  264. for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
  265. __invalidate_dcache(0, i);
  266. #endif
  267. }
  268. static void __invalidate_dcache_range_wb(unsigned long start,
  269. unsigned long end)
  270. {
  271. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  272. (unsigned int)start, (unsigned int) end);
  273. CACHE_LOOP_LIMITS(start, end,
  274. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  275. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
  276. }
  277. static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
  278. unsigned long end)
  279. {
  280. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  281. (unsigned int)start, (unsigned int) end);
  282. CACHE_LOOP_LIMITS(start, end,
  283. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  284. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  285. }
  286. static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
  287. unsigned long end)
  288. {
  289. unsigned long flags;
  290. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  291. (unsigned int)start, (unsigned int) end);
  292. CACHE_LOOP_LIMITS(start, end,
  293. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  294. local_irq_save(flags);
  295. __disable_dcache_msr();
  296. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  297. __enable_dcache_msr();
  298. local_irq_restore(flags);
  299. }
  300. static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
  301. unsigned long end)
  302. {
  303. unsigned long flags;
  304. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  305. (unsigned int)start, (unsigned int) end);
  306. CACHE_LOOP_LIMITS(start, end,
  307. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  308. local_irq_save(flags);
  309. __disable_dcache_nomsr();
  310. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  311. __enable_dcache_nomsr();
  312. local_irq_restore(flags);
  313. }
  314. static void __flush_dcache_all_wb(void)
  315. {
  316. pr_debug("%s\n", __func__);
  317. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  318. wdc.flush);
  319. }
  320. static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
  321. {
  322. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  323. (unsigned int)start, (unsigned int) end);
  324. CACHE_LOOP_LIMITS(start, end,
  325. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  326. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
  327. }
  328. /* struct for wb caches and for wt caches */
  329. struct scache *mbc;
  330. /* new wb cache model */
  331. const struct scache wb_msr = {
  332. .ie = __enable_icache_msr,
  333. .id = __disable_icache_msr,
  334. .ifl = __flush_icache_all_noirq,
  335. .iflr = __flush_icache_range_noirq,
  336. .iin = __flush_icache_all_noirq,
  337. .iinr = __flush_icache_range_noirq,
  338. .de = __enable_dcache_msr,
  339. .dd = __disable_dcache_msr,
  340. .dfl = __flush_dcache_all_wb,
  341. .dflr = __flush_dcache_range_wb,
  342. .din = __invalidate_dcache_all_wb,
  343. .dinr = __invalidate_dcache_range_wb,
  344. };
  345. /* There is only difference in ie, id, de, dd functions */
  346. const struct scache wb_nomsr = {
  347. .ie = __enable_icache_nomsr,
  348. .id = __disable_icache_nomsr,
  349. .ifl = __flush_icache_all_noirq,
  350. .iflr = __flush_icache_range_noirq,
  351. .iin = __flush_icache_all_noirq,
  352. .iinr = __flush_icache_range_noirq,
  353. .de = __enable_dcache_nomsr,
  354. .dd = __disable_dcache_nomsr,
  355. .dfl = __flush_dcache_all_wb,
  356. .dflr = __flush_dcache_range_wb,
  357. .din = __invalidate_dcache_all_wb,
  358. .dinr = __invalidate_dcache_range_wb,
  359. };
  360. /* Old wt cache model with disabling irq and turn off cache */
  361. const struct scache wt_msr = {
  362. .ie = __enable_icache_msr,
  363. .id = __disable_icache_msr,
  364. .ifl = __flush_icache_all_msr_irq,
  365. .iflr = __flush_icache_range_msr_irq,
  366. .iin = __flush_icache_all_msr_irq,
  367. .iinr = __flush_icache_range_msr_irq,
  368. .de = __enable_dcache_msr,
  369. .dd = __disable_dcache_msr,
  370. .dfl = __invalidate_dcache_all_msr_irq,
  371. .dflr = __invalidate_dcache_range_msr_irq_wt,
  372. .din = __invalidate_dcache_all_msr_irq,
  373. .dinr = __invalidate_dcache_range_msr_irq_wt,
  374. };
  375. const struct scache wt_nomsr = {
  376. .ie = __enable_icache_nomsr,
  377. .id = __disable_icache_nomsr,
  378. .ifl = __flush_icache_all_nomsr_irq,
  379. .iflr = __flush_icache_range_nomsr_irq,
  380. .iin = __flush_icache_all_nomsr_irq,
  381. .iinr = __flush_icache_range_nomsr_irq,
  382. .de = __enable_dcache_nomsr,
  383. .dd = __disable_dcache_nomsr,
  384. .dfl = __invalidate_dcache_all_nomsr_irq,
  385. .dflr = __invalidate_dcache_range_nomsr_irq,
  386. .din = __invalidate_dcache_all_nomsr_irq,
  387. .dinr = __invalidate_dcache_range_nomsr_irq,
  388. };
  389. /* New wt cache model for newer Microblaze versions */
  390. const struct scache wt_msr_noirq = {
  391. .ie = __enable_icache_msr,
  392. .id = __disable_icache_msr,
  393. .ifl = __flush_icache_all_noirq,
  394. .iflr = __flush_icache_range_noirq,
  395. .iin = __flush_icache_all_noirq,
  396. .iinr = __flush_icache_range_noirq,
  397. .de = __enable_dcache_msr,
  398. .dd = __disable_dcache_msr,
  399. .dfl = __invalidate_dcache_all_noirq_wt,
  400. .dflr = __invalidate_dcache_range_nomsr_wt,
  401. .din = __invalidate_dcache_all_noirq_wt,
  402. .dinr = __invalidate_dcache_range_nomsr_wt,
  403. };
  404. const struct scache wt_nomsr_noirq = {
  405. .ie = __enable_icache_nomsr,
  406. .id = __disable_icache_nomsr,
  407. .ifl = __flush_icache_all_noirq,
  408. .iflr = __flush_icache_range_noirq,
  409. .iin = __flush_icache_all_noirq,
  410. .iinr = __flush_icache_range_noirq,
  411. .de = __enable_dcache_nomsr,
  412. .dd = __disable_dcache_nomsr,
  413. .dfl = __invalidate_dcache_all_noirq_wt,
  414. .dflr = __invalidate_dcache_range_nomsr_wt,
  415. .din = __invalidate_dcache_all_noirq_wt,
  416. .dinr = __invalidate_dcache_range_nomsr_wt,
  417. };
  418. /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
  419. #define CPUVER_7_20_A 0x0c
  420. #define CPUVER_7_20_D 0x0f
  421. #define INFO(s) printk(KERN_INFO "cache: " s " \n");
  422. void microblaze_cache_init(void)
  423. {
  424. if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
  425. if (cpuinfo.dcache_wb) {
  426. INFO("wb_msr");
  427. mbc = (struct scache *)&wb_msr;
  428. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  429. /* MS: problem with signal handling - hw bug */
  430. INFO("WB won't work properly");
  431. }
  432. } else {
  433. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  434. INFO("wt_msr_noirq");
  435. mbc = (struct scache *)&wt_msr_noirq;
  436. } else {
  437. INFO("wt_msr");
  438. mbc = (struct scache *)&wt_msr;
  439. }
  440. }
  441. } else {
  442. if (cpuinfo.dcache_wb) {
  443. INFO("wb_nomsr");
  444. mbc = (struct scache *)&wb_nomsr;
  445. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  446. /* MS: problem with signal handling - hw bug */
  447. INFO("WB won't work properly");
  448. }
  449. } else {
  450. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  451. INFO("wt_nomsr_noirq");
  452. mbc = (struct scache *)&wt_nomsr_noirq;
  453. } else {
  454. INFO("wt_nomsr");
  455. mbc = (struct scache *)&wt_nomsr;
  456. }
  457. }
  458. }
  459. }