cache.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. #include <asm/pvr.h>
  16. static inline void __enable_icache_msr(void)
  17. {
  18. __asm__ __volatile__ (" msrset r0, %0; \
  19. nop; " \
  20. : : "i" (MSR_ICE) : "memory");
  21. }
  22. static inline void __disable_icache_msr(void)
  23. {
  24. __asm__ __volatile__ (" msrclr r0, %0; \
  25. nop; " \
  26. : : "i" (MSR_ICE) : "memory");
  27. }
  28. static inline void __enable_dcache_msr(void)
  29. {
  30. __asm__ __volatile__ (" msrset r0, %0; \
  31. nop; " \
  32. : \
  33. : "i" (MSR_DCE) \
  34. : "memory");
  35. }
  36. static inline void __disable_dcache_msr(void)
  37. {
  38. __asm__ __volatile__ (" msrclr r0, %0; \
  39. nop; " \
  40. : \
  41. : "i" (MSR_DCE) \
  42. : "memory");
  43. }
  44. static inline void __enable_icache_nomsr(void)
  45. {
  46. __asm__ __volatile__ (" mfs r12, rmsr; \
  47. nop; \
  48. ori r12, r12, %0; \
  49. mts rmsr, r12; \
  50. nop; " \
  51. : \
  52. : "i" (MSR_ICE) \
  53. : "memory", "r12");
  54. }
  55. static inline void __disable_icache_nomsr(void)
  56. {
  57. __asm__ __volatile__ (" mfs r12, rmsr; \
  58. nop; \
  59. andi r12, r12, ~%0; \
  60. mts rmsr, r12; \
  61. nop; " \
  62. : \
  63. : "i" (MSR_ICE) \
  64. : "memory", "r12");
  65. }
  66. static inline void __enable_dcache_nomsr(void)
  67. {
  68. __asm__ __volatile__ (" mfs r12, rmsr; \
  69. nop; \
  70. ori r12, r12, %0; \
  71. mts rmsr, r12; \
  72. nop; " \
  73. : \
  74. : "i" (MSR_DCE) \
  75. : "memory", "r12");
  76. }
  77. static inline void __disable_dcache_nomsr(void)
  78. {
  79. __asm__ __volatile__ (" mfs r12, rmsr; \
  80. nop; \
  81. andi r12, r12, ~%0; \
  82. mts rmsr, r12; \
  83. nop; " \
  84. : \
  85. : "i" (MSR_DCE) \
  86. : "memory", "r12");
  87. }
  88. /* Helper macro for computing the limits of cache range loops
  89. *
  90. * End address can be unaligned which is OK for C implementation.
  91. * ASM implementation align it in ASM macros
  92. */
  93. #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
  94. do { \
  95. int align = ~(cache_line_length - 1); \
  96. end = min(start + cache_size, end); \
  97. start &= align; \
  98. } while (0);
  99. /*
  100. * Helper macro to loop over the specified cache_size/line_length and
  101. * execute 'op' on that cacheline
  102. */
  103. #define CACHE_ALL_LOOP(cache_size, line_length, op) \
  104. do { \
  105. unsigned int len = cache_size - line_length; \
  106. int step = -line_length; \
  107. WARN_ON(step >= 0); \
  108. \
  109. __asm__ __volatile__ (" 1: " #op " %0, r0; \
  110. bgtid %0, 1b; \
  111. addk %0, %0, %1; \
  112. " : : "r" (len), "r" (step) \
  113. : "memory"); \
  114. } while (0);
  115. /* Used for wdc.flush/clear which can use rB for offset which is not possible
  116. * to use for simple wdc or wic.
  117. *
  118. * start address is cache aligned
  119. * end address is not aligned, if end is aligned then I have to substract
  120. * cacheline length because I can't flush/invalidate the next cacheline.
  121. * If is not, I align it because I will flush/invalidate whole line.
  122. */
  123. #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
  124. do { \
  125. int step = -line_length; \
  126. int align = ~(line_length - 1); \
  127. end = ((end & align) == end) ? end - line_length : end & align; \
  128. int count = end - start; \
  129. WARN_ON(count < 0); \
  130. \
  131. __asm__ __volatile__ (" 1: " #op " %0, %1; \
  132. bgtid %1, 1b; \
  133. addk %1, %1, %2; \
  134. " : : "r" (start), "r" (count), \
  135. "r" (step) : "memory"); \
  136. } while (0);
  137. /* It is used only first parameter for OP - for wic, wdc */
  138. #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
  139. do { \
  140. int volatile temp; \
  141. int align = ~(line_length - 1); \
  142. end = ((end & align) == end) ? end - line_length : end & align; \
  143. WARN_ON(end - start < 0); \
  144. \
  145. __asm__ __volatile__ (" 1: " #op " %1, r0; \
  146. cmpu %0, %1, %2; \
  147. bgtid %0, 1b; \
  148. addk %1, %1, %3; \
  149. " : : "r" (temp), "r" (start), "r" (end),\
  150. "r" (line_length) : "memory"); \
  151. } while (0);
  152. #define ASM_LOOP
  153. static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
  154. {
  155. unsigned long flags;
  156. #ifndef ASM_LOOP
  157. int i;
  158. #endif
  159. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  160. (unsigned int)start, (unsigned int) end);
  161. CACHE_LOOP_LIMITS(start, end,
  162. cpuinfo.icache_line_length, cpuinfo.icache_size);
  163. local_irq_save(flags);
  164. __disable_icache_msr();
  165. #ifdef ASM_LOOP
  166. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  167. #else
  168. for (i = start; i < end; i += cpuinfo.icache_line_length)
  169. __asm__ __volatile__ ("wic %0, r0;" \
  170. : : "r" (i));
  171. #endif
  172. __enable_icache_msr();
  173. local_irq_restore(flags);
  174. }
  175. static void __flush_icache_range_nomsr_irq(unsigned long start,
  176. unsigned long end)
  177. {
  178. unsigned long flags;
  179. #ifndef ASM_LOOP
  180. int i;
  181. #endif
  182. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  183. (unsigned int)start, (unsigned int) end);
  184. CACHE_LOOP_LIMITS(start, end,
  185. cpuinfo.icache_line_length, cpuinfo.icache_size);
  186. local_irq_save(flags);
  187. __disable_icache_nomsr();
  188. #ifdef ASM_LOOP
  189. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  190. #else
  191. for (i = start; i < end; i += cpuinfo.icache_line_length)
  192. __asm__ __volatile__ ("wic %0, r0;" \
  193. : : "r" (i));
  194. #endif
  195. __enable_icache_nomsr();
  196. local_irq_restore(flags);
  197. }
  198. static void __flush_icache_range_noirq(unsigned long start,
  199. unsigned long end)
  200. {
  201. #ifndef ASM_LOOP
  202. int i;
  203. #endif
  204. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  205. (unsigned int)start, (unsigned int) end);
  206. CACHE_LOOP_LIMITS(start, end,
  207. cpuinfo.icache_line_length, cpuinfo.icache_size);
  208. #ifdef ASM_LOOP
  209. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  210. #else
  211. for (i = start; i < end; i += cpuinfo.icache_line_length)
  212. __asm__ __volatile__ ("wic %0, r0;" \
  213. : : "r" (i));
  214. #endif
  215. }
  216. static void __flush_icache_all_msr_irq(void)
  217. {
  218. unsigned long flags;
  219. #ifndef ASM_LOOP
  220. int i;
  221. #endif
  222. pr_debug("%s\n", __func__);
  223. local_irq_save(flags);
  224. __disable_icache_msr();
  225. #ifdef ASM_LOOP
  226. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  227. #else
  228. for (i = 0; i < cpuinfo.icache_size;
  229. i += cpuinfo.icache_line_length)
  230. __asm__ __volatile__ ("wic %0, r0;" \
  231. : : "r" (i));
  232. #endif
  233. __enable_icache_msr();
  234. local_irq_restore(flags);
  235. }
  236. static void __flush_icache_all_nomsr_irq(void)
  237. {
  238. unsigned long flags;
  239. #ifndef ASM_LOOP
  240. int i;
  241. #endif
  242. pr_debug("%s\n", __func__);
  243. local_irq_save(flags);
  244. __disable_icache_nomsr();
  245. #ifdef ASM_LOOP
  246. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  247. #else
  248. for (i = 0; i < cpuinfo.icache_size;
  249. i += cpuinfo.icache_line_length)
  250. __asm__ __volatile__ ("wic %0, r0;" \
  251. : : "r" (i));
  252. #endif
  253. __enable_icache_nomsr();
  254. local_irq_restore(flags);
  255. }
  256. static void __flush_icache_all_noirq(void)
  257. {
  258. #ifndef ASM_LOOP
  259. int i;
  260. #endif
  261. pr_debug("%s\n", __func__);
  262. #ifdef ASM_LOOP
  263. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  264. #else
  265. for (i = 0; i < cpuinfo.icache_size;
  266. i += cpuinfo.icache_line_length)
  267. __asm__ __volatile__ ("wic %0, r0;" \
  268. : : "r" (i));
  269. #endif
  270. }
  271. static void __invalidate_dcache_all_msr_irq(void)
  272. {
  273. unsigned long flags;
  274. #ifndef ASM_LOOP
  275. int i;
  276. #endif
  277. pr_debug("%s\n", __func__);
  278. local_irq_save(flags);
  279. __disable_dcache_msr();
  280. #ifdef ASM_LOOP
  281. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  282. #else
  283. for (i = 0; i < cpuinfo.dcache_size;
  284. i += cpuinfo.dcache_line_length)
  285. __asm__ __volatile__ ("wdc %0, r0;" \
  286. : : "r" (i));
  287. #endif
  288. __enable_dcache_msr();
  289. local_irq_restore(flags);
  290. }
  291. static void __invalidate_dcache_all_nomsr_irq(void)
  292. {
  293. unsigned long flags;
  294. #ifndef ASM_LOOP
  295. int i;
  296. #endif
  297. pr_debug("%s\n", __func__);
  298. local_irq_save(flags);
  299. __disable_dcache_nomsr();
  300. #ifdef ASM_LOOP
  301. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  302. #else
  303. for (i = 0; i < cpuinfo.dcache_size;
  304. i += cpuinfo.dcache_line_length)
  305. __asm__ __volatile__ ("wdc %0, r0;" \
  306. : : "r" (i));
  307. #endif
  308. __enable_dcache_nomsr();
  309. local_irq_restore(flags);
  310. }
  311. static void __invalidate_dcache_all_noirq_wt(void)
  312. {
  313. #ifndef ASM_LOOP
  314. int i;
  315. #endif
  316. pr_debug("%s\n", __func__);
  317. #ifdef ASM_LOOP
  318. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
  319. #else
  320. for (i = 0; i < cpuinfo.dcache_size;
  321. i += cpuinfo.dcache_line_length)
  322. __asm__ __volatile__ ("wdc %0, r0;" \
  323. : : "r" (i));
  324. #endif
  325. }
  326. /* FIXME It is blindly invalidation as is expected
  327. * but can't be called on noMMU in microblaze_cache_init below
  328. *
  329. * MS: noMMU kernel won't boot if simple wdc is used
  330. * The reason should be that there are discared data which kernel needs
  331. */
  332. static void __invalidate_dcache_all_wb(void)
  333. {
  334. #ifndef ASM_LOOP
  335. int i;
  336. #endif
  337. pr_debug("%s\n", __func__);
  338. #ifdef ASM_LOOP
  339. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  340. wdc)
  341. #else
  342. for (i = 0; i < cpuinfo.dcache_size;
  343. i += cpuinfo.dcache_line_length)
  344. __asm__ __volatile__ ("wdc %0, r0;" \
  345. : : "r" (i));
  346. #endif
  347. }
  348. static void __invalidate_dcache_range_wb(unsigned long start,
  349. unsigned long end)
  350. {
  351. #ifndef ASM_LOOP
  352. int i;
  353. #endif
  354. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  355. (unsigned int)start, (unsigned int) end);
  356. CACHE_LOOP_LIMITS(start, end,
  357. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  358. #ifdef ASM_LOOP
  359. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
  360. #else
  361. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  362. __asm__ __volatile__ ("wdc.clear %0, r0;" \
  363. : : "r" (i));
  364. #endif
  365. }
  366. static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
  367. unsigned long end)
  368. {
  369. #ifndef ASM_LOOP
  370. int i;
  371. #endif
  372. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  373. (unsigned int)start, (unsigned int) end);
  374. CACHE_LOOP_LIMITS(start, end,
  375. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  376. #ifdef ASM_LOOP
  377. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  378. #else
  379. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  380. __asm__ __volatile__ ("wdc %0, r0;" \
  381. : : "r" (i));
  382. #endif
  383. }
  384. static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
  385. unsigned long end)
  386. {
  387. unsigned long flags;
  388. #ifndef ASM_LOOP
  389. int i;
  390. #endif
  391. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  392. (unsigned int)start, (unsigned int) end);
  393. CACHE_LOOP_LIMITS(start, end,
  394. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  395. local_irq_save(flags);
  396. __disable_dcache_msr();
  397. #ifdef ASM_LOOP
  398. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  399. #else
  400. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  401. __asm__ __volatile__ ("wdc %0, r0;" \
  402. : : "r" (i));
  403. #endif
  404. __enable_dcache_msr();
  405. local_irq_restore(flags);
  406. }
  407. static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
  408. unsigned long end)
  409. {
  410. unsigned long flags;
  411. #ifndef ASM_LOOP
  412. int i;
  413. #endif
  414. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  415. (unsigned int)start, (unsigned int) end);
  416. CACHE_LOOP_LIMITS(start, end,
  417. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  418. local_irq_save(flags);
  419. __disable_dcache_nomsr();
  420. #ifdef ASM_LOOP
  421. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  422. #else
  423. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  424. __asm__ __volatile__ ("wdc %0, r0;" \
  425. : : "r" (i));
  426. #endif
  427. __enable_dcache_nomsr();
  428. local_irq_restore(flags);
  429. }
  430. static void __flush_dcache_all_wb(void)
  431. {
  432. #ifndef ASM_LOOP
  433. int i;
  434. #endif
  435. pr_debug("%s\n", __func__);
  436. #ifdef ASM_LOOP
  437. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  438. wdc.flush);
  439. #else
  440. for (i = 0; i < cpuinfo.dcache_size;
  441. i += cpuinfo.dcache_line_length)
  442. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  443. : : "r" (i));
  444. #endif
  445. }
  446. static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
  447. {
  448. #ifndef ASM_LOOP
  449. int i;
  450. #endif
  451. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  452. (unsigned int)start, (unsigned int) end);
  453. CACHE_LOOP_LIMITS(start, end,
  454. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  455. #ifdef ASM_LOOP
  456. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
  457. #else
  458. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  459. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  460. : : "r" (i));
  461. #endif
  462. }
  463. /* struct for wb caches and for wt caches */
  464. struct scache *mbc;
  465. /* new wb cache model */
  466. const struct scache wb_msr = {
  467. .ie = __enable_icache_msr,
  468. .id = __disable_icache_msr,
  469. .ifl = __flush_icache_all_noirq,
  470. .iflr = __flush_icache_range_noirq,
  471. .iin = __flush_icache_all_noirq,
  472. .iinr = __flush_icache_range_noirq,
  473. .de = __enable_dcache_msr,
  474. .dd = __disable_dcache_msr,
  475. .dfl = __flush_dcache_all_wb,
  476. .dflr = __flush_dcache_range_wb,
  477. .din = __invalidate_dcache_all_wb,
  478. .dinr = __invalidate_dcache_range_wb,
  479. };
  480. /* There is only difference in ie, id, de, dd functions */
  481. const struct scache wb_nomsr = {
  482. .ie = __enable_icache_nomsr,
  483. .id = __disable_icache_nomsr,
  484. .ifl = __flush_icache_all_noirq,
  485. .iflr = __flush_icache_range_noirq,
  486. .iin = __flush_icache_all_noirq,
  487. .iinr = __flush_icache_range_noirq,
  488. .de = __enable_dcache_nomsr,
  489. .dd = __disable_dcache_nomsr,
  490. .dfl = __flush_dcache_all_wb,
  491. .dflr = __flush_dcache_range_wb,
  492. .din = __invalidate_dcache_all_wb,
  493. .dinr = __invalidate_dcache_range_wb,
  494. };
  495. /* Old wt cache model with disabling irq and turn off cache */
  496. const struct scache wt_msr = {
  497. .ie = __enable_icache_msr,
  498. .id = __disable_icache_msr,
  499. .ifl = __flush_icache_all_msr_irq,
  500. .iflr = __flush_icache_range_msr_irq,
  501. .iin = __flush_icache_all_msr_irq,
  502. .iinr = __flush_icache_range_msr_irq,
  503. .de = __enable_dcache_msr,
  504. .dd = __disable_dcache_msr,
  505. .dfl = __invalidate_dcache_all_msr_irq,
  506. .dflr = __invalidate_dcache_range_msr_irq_wt,
  507. .din = __invalidate_dcache_all_msr_irq,
  508. .dinr = __invalidate_dcache_range_msr_irq_wt,
  509. };
  510. const struct scache wt_nomsr = {
  511. .ie = __enable_icache_nomsr,
  512. .id = __disable_icache_nomsr,
  513. .ifl = __flush_icache_all_nomsr_irq,
  514. .iflr = __flush_icache_range_nomsr_irq,
  515. .iin = __flush_icache_all_nomsr_irq,
  516. .iinr = __flush_icache_range_nomsr_irq,
  517. .de = __enable_dcache_nomsr,
  518. .dd = __disable_dcache_nomsr,
  519. .dfl = __invalidate_dcache_all_nomsr_irq,
  520. .dflr = __invalidate_dcache_range_nomsr_irq,
  521. .din = __invalidate_dcache_all_nomsr_irq,
  522. .dinr = __invalidate_dcache_range_nomsr_irq,
  523. };
  524. /* New wt cache model for newer Microblaze versions */
  525. const struct scache wt_msr_noirq = {
  526. .ie = __enable_icache_msr,
  527. .id = __disable_icache_msr,
  528. .ifl = __flush_icache_all_noirq,
  529. .iflr = __flush_icache_range_noirq,
  530. .iin = __flush_icache_all_noirq,
  531. .iinr = __flush_icache_range_noirq,
  532. .de = __enable_dcache_msr,
  533. .dd = __disable_dcache_msr,
  534. .dfl = __invalidate_dcache_all_noirq_wt,
  535. .dflr = __invalidate_dcache_range_nomsr_wt,
  536. .din = __invalidate_dcache_all_noirq_wt,
  537. .dinr = __invalidate_dcache_range_nomsr_wt,
  538. };
  539. const struct scache wt_nomsr_noirq = {
  540. .ie = __enable_icache_nomsr,
  541. .id = __disable_icache_nomsr,
  542. .ifl = __flush_icache_all_noirq,
  543. .iflr = __flush_icache_range_noirq,
  544. .iin = __flush_icache_all_noirq,
  545. .iinr = __flush_icache_range_noirq,
  546. .de = __enable_dcache_nomsr,
  547. .dd = __disable_dcache_nomsr,
  548. .dfl = __invalidate_dcache_all_noirq_wt,
  549. .dflr = __invalidate_dcache_range_nomsr_wt,
  550. .din = __invalidate_dcache_all_noirq_wt,
  551. .dinr = __invalidate_dcache_range_nomsr_wt,
  552. };
  553. /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
  554. #define CPUVER_7_20_A 0x0c
  555. #define CPUVER_7_20_D 0x0f
  556. #define INFO(s) printk(KERN_INFO "cache: " s "\n");
  557. void microblaze_cache_init(void)
  558. {
  559. if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
  560. if (cpuinfo.dcache_wb) {
  561. INFO("wb_msr");
  562. mbc = (struct scache *)&wb_msr;
  563. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  564. /* MS: problem with signal handling - hw bug */
  565. INFO("WB won't work properly");
  566. }
  567. } else {
  568. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  569. INFO("wt_msr_noirq");
  570. mbc = (struct scache *)&wt_msr_noirq;
  571. } else {
  572. INFO("wt_msr");
  573. mbc = (struct scache *)&wt_msr;
  574. }
  575. }
  576. } else {
  577. if (cpuinfo.dcache_wb) {
  578. INFO("wb_nomsr");
  579. mbc = (struct scache *)&wb_nomsr;
  580. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  581. /* MS: problem with signal handling - hw bug */
  582. INFO("WB won't work properly");
  583. }
  584. } else {
  585. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  586. INFO("wt_nomsr_noirq");
  587. mbc = (struct scache *)&wt_nomsr_noirq;
  588. } else {
  589. INFO("wt_nomsr");
  590. mbc = (struct scache *)&wt_nomsr;
  591. }
  592. }
  593. }
  594. /* FIXME Invalidation is done in U-BOOT
  595. * WT cache: Data is already written to main memory
  596. * WB cache: Discard data on noMMU which caused that kernel doesn't boot
  597. */
  598. /* invalidate_dcache(); */
  599. enable_dcache();
  600. invalidate_icache();
  601. enable_icache();
  602. }