cache.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. #include <asm/pvr.h>
  16. static inline void __enable_icache_msr(void)
  17. {
  18. __asm__ __volatile__ (" msrset r0, %0; \
  19. nop; " \
  20. : : "i" (MSR_ICE) : "memory");
  21. }
  22. static inline void __disable_icache_msr(void)
  23. {
  24. __asm__ __volatile__ (" msrclr r0, %0; \
  25. nop; " \
  26. : : "i" (MSR_ICE) : "memory");
  27. }
  28. static inline void __enable_dcache_msr(void)
  29. {
  30. __asm__ __volatile__ (" msrset r0, %0; \
  31. nop; " \
  32. : \
  33. : "i" (MSR_DCE) \
  34. : "memory");
  35. }
  36. static inline void __disable_dcache_msr(void)
  37. {
  38. __asm__ __volatile__ (" msrclr r0, %0; \
  39. nop; " \
  40. : \
  41. : "i" (MSR_DCE) \
  42. : "memory");
  43. }
  44. static inline void __enable_icache_nomsr(void)
  45. {
  46. __asm__ __volatile__ (" mfs r12, rmsr; \
  47. nop; \
  48. ori r12, r12, %0; \
  49. mts rmsr, r12; \
  50. nop; " \
  51. : \
  52. : "i" (MSR_ICE) \
  53. : "memory", "r12");
  54. }
  55. static inline void __disable_icache_nomsr(void)
  56. {
  57. __asm__ __volatile__ (" mfs r12, rmsr; \
  58. nop; \
  59. andi r12, r12, ~%0; \
  60. mts rmsr, r12; \
  61. nop; " \
  62. : \
  63. : "i" (MSR_ICE) \
  64. : "memory", "r12");
  65. }
  66. static inline void __enable_dcache_nomsr(void)
  67. {
  68. __asm__ __volatile__ (" mfs r12, rmsr; \
  69. nop; \
  70. ori r12, r12, %0; \
  71. mts rmsr, r12; \
  72. nop; " \
  73. : \
  74. : "i" (MSR_DCE) \
  75. : "memory", "r12");
  76. }
  77. static inline void __disable_dcache_nomsr(void)
  78. {
  79. __asm__ __volatile__ (" mfs r12, rmsr; \
  80. nop; \
  81. andi r12, r12, ~%0; \
  82. mts rmsr, r12; \
  83. nop; " \
  84. : \
  85. : "i" (MSR_DCE) \
  86. : "memory", "r12");
  87. }
  88. /* Helper macro for computing the limits of cache range loops
  89. *
  90. * End address can be unaligned which is OK for C implementation.
  91. * ASM implementation align it in ASM macros
  92. */
  93. #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
  94. do { \
  95. int align = ~(cache_line_length - 1); \
  96. end = min(start + cache_size, end); \
  97. start &= align; \
  98. } while (0);
  99. /*
  100. * Helper macro to loop over the specified cache_size/line_length and
  101. * execute 'op' on that cacheline
  102. */
  103. #define CACHE_ALL_LOOP(cache_size, line_length, op) \
  104. do { \
  105. unsigned int len = cache_size - line_length; \
  106. int step = -line_length; \
  107. WARN_ON(step >= 0); \
  108. \
  109. __asm__ __volatile__ (" 1: " #op " %0, r0; \
  110. bgtid %0, 1b; \
  111. addk %0, %0, %1; \
  112. " : : "r" (len), "r" (step) \
  113. : "memory"); \
  114. } while (0);
  115. /* Used for wdc.flush/clear which can use rB for offset which is not possible
  116. * to use for simple wdc or wic.
  117. *
  118. * start address is cache aligned
  119. * end address is not aligned, if end is aligned then I have to substract
  120. * cacheline length because I can't flush/invalidate the next cacheline.
  121. * If is not, I align it because I will flush/invalidate whole line.
  122. */
  123. #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
  124. do { \
  125. int step = -line_length; \
  126. int align = ~(line_length - 1); \
  127. int count; \
  128. end = ((end & align) == end) ? end - line_length : end & align; \
  129. count = end - start; \
  130. WARN_ON(count < 0); \
  131. \
  132. __asm__ __volatile__ (" 1: " #op " %0, %1; \
  133. bgtid %1, 1b; \
  134. addk %1, %1, %2; \
  135. " : : "r" (start), "r" (count), \
  136. "r" (step) : "memory"); \
  137. } while (0);
  138. /* It is used only first parameter for OP - for wic, wdc */
  139. #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
  140. do { \
  141. int volatile temp; \
  142. int align = ~(line_length - 1); \
  143. end = ((end & align) == end) ? end - line_length : end & align; \
  144. WARN_ON(end - start < 0); \
  145. \
  146. __asm__ __volatile__ (" 1: " #op " %1, r0; \
  147. cmpu %0, %1, %2; \
  148. bgtid %0, 1b; \
  149. addk %1, %1, %3; \
  150. " : : "r" (temp), "r" (start), "r" (end),\
  151. "r" (line_length) : "memory"); \
  152. } while (0);
  153. #define ASM_LOOP
  154. static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
  155. {
  156. unsigned long flags;
  157. #ifndef ASM_LOOP
  158. int i;
  159. #endif
  160. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  161. (unsigned int)start, (unsigned int) end);
  162. CACHE_LOOP_LIMITS(start, end,
  163. cpuinfo.icache_line_length, cpuinfo.icache_size);
  164. local_irq_save(flags);
  165. __disable_icache_msr();
  166. #ifdef ASM_LOOP
  167. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  168. #else
  169. for (i = start; i < end; i += cpuinfo.icache_line_length)
  170. __asm__ __volatile__ ("wic %0, r0;" \
  171. : : "r" (i));
  172. #endif
  173. __enable_icache_msr();
  174. local_irq_restore(flags);
  175. }
  176. static void __flush_icache_range_nomsr_irq(unsigned long start,
  177. unsigned long end)
  178. {
  179. unsigned long flags;
  180. #ifndef ASM_LOOP
  181. int i;
  182. #endif
  183. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  184. (unsigned int)start, (unsigned int) end);
  185. CACHE_LOOP_LIMITS(start, end,
  186. cpuinfo.icache_line_length, cpuinfo.icache_size);
  187. local_irq_save(flags);
  188. __disable_icache_nomsr();
  189. #ifdef ASM_LOOP
  190. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  191. #else
  192. for (i = start; i < end; i += cpuinfo.icache_line_length)
  193. __asm__ __volatile__ ("wic %0, r0;" \
  194. : : "r" (i));
  195. #endif
  196. __enable_icache_nomsr();
  197. local_irq_restore(flags);
  198. }
  199. static void __flush_icache_range_noirq(unsigned long start,
  200. unsigned long end)
  201. {
  202. #ifndef ASM_LOOP
  203. int i;
  204. #endif
  205. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  206. (unsigned int)start, (unsigned int) end);
  207. CACHE_LOOP_LIMITS(start, end,
  208. cpuinfo.icache_line_length, cpuinfo.icache_size);
  209. #ifdef ASM_LOOP
  210. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  211. #else
  212. for (i = start; i < end; i += cpuinfo.icache_line_length)
  213. __asm__ __volatile__ ("wic %0, r0;" \
  214. : : "r" (i));
  215. #endif
  216. }
  217. static void __flush_icache_all_msr_irq(void)
  218. {
  219. unsigned long flags;
  220. #ifndef ASM_LOOP
  221. int i;
  222. #endif
  223. pr_debug("%s\n", __func__);
  224. local_irq_save(flags);
  225. __disable_icache_msr();
  226. #ifdef ASM_LOOP
  227. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  228. #else
  229. for (i = 0; i < cpuinfo.icache_size;
  230. i += cpuinfo.icache_line_length)
  231. __asm__ __volatile__ ("wic %0, r0;" \
  232. : : "r" (i));
  233. #endif
  234. __enable_icache_msr();
  235. local_irq_restore(flags);
  236. }
  237. static void __flush_icache_all_nomsr_irq(void)
  238. {
  239. unsigned long flags;
  240. #ifndef ASM_LOOP
  241. int i;
  242. #endif
  243. pr_debug("%s\n", __func__);
  244. local_irq_save(flags);
  245. __disable_icache_nomsr();
  246. #ifdef ASM_LOOP
  247. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  248. #else
  249. for (i = 0; i < cpuinfo.icache_size;
  250. i += cpuinfo.icache_line_length)
  251. __asm__ __volatile__ ("wic %0, r0;" \
  252. : : "r" (i));
  253. #endif
  254. __enable_icache_nomsr();
  255. local_irq_restore(flags);
  256. }
  257. static void __flush_icache_all_noirq(void)
  258. {
  259. #ifndef ASM_LOOP
  260. int i;
  261. #endif
  262. pr_debug("%s\n", __func__);
  263. #ifdef ASM_LOOP
  264. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  265. #else
  266. for (i = 0; i < cpuinfo.icache_size;
  267. i += cpuinfo.icache_line_length)
  268. __asm__ __volatile__ ("wic %0, r0;" \
  269. : : "r" (i));
  270. #endif
  271. }
  272. static void __invalidate_dcache_all_msr_irq(void)
  273. {
  274. unsigned long flags;
  275. #ifndef ASM_LOOP
  276. int i;
  277. #endif
  278. pr_debug("%s\n", __func__);
  279. local_irq_save(flags);
  280. __disable_dcache_msr();
  281. #ifdef ASM_LOOP
  282. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  283. #else
  284. for (i = 0; i < cpuinfo.dcache_size;
  285. i += cpuinfo.dcache_line_length)
  286. __asm__ __volatile__ ("wdc %0, r0;" \
  287. : : "r" (i));
  288. #endif
  289. __enable_dcache_msr();
  290. local_irq_restore(flags);
  291. }
  292. static void __invalidate_dcache_all_nomsr_irq(void)
  293. {
  294. unsigned long flags;
  295. #ifndef ASM_LOOP
  296. int i;
  297. #endif
  298. pr_debug("%s\n", __func__);
  299. local_irq_save(flags);
  300. __disable_dcache_nomsr();
  301. #ifdef ASM_LOOP
  302. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  303. #else
  304. for (i = 0; i < cpuinfo.dcache_size;
  305. i += cpuinfo.dcache_line_length)
  306. __asm__ __volatile__ ("wdc %0, r0;" \
  307. : : "r" (i));
  308. #endif
  309. __enable_dcache_nomsr();
  310. local_irq_restore(flags);
  311. }
  312. static void __invalidate_dcache_all_noirq_wt(void)
  313. {
  314. #ifndef ASM_LOOP
  315. int i;
  316. #endif
  317. pr_debug("%s\n", __func__);
  318. #ifdef ASM_LOOP
  319. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
  320. #else
  321. for (i = 0; i < cpuinfo.dcache_size;
  322. i += cpuinfo.dcache_line_length)
  323. __asm__ __volatile__ ("wdc %0, r0;" \
  324. : : "r" (i));
  325. #endif
  326. }
  327. /* FIXME It is blindly invalidation as is expected
  328. * but can't be called on noMMU in microblaze_cache_init below
  329. *
  330. * MS: noMMU kernel won't boot if simple wdc is used
  331. * The reason should be that there are discared data which kernel needs
  332. */
  333. static void __invalidate_dcache_all_wb(void)
  334. {
  335. #ifndef ASM_LOOP
  336. int i;
  337. #endif
  338. pr_debug("%s\n", __func__);
  339. #ifdef ASM_LOOP
  340. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  341. wdc)
  342. #else
  343. for (i = 0; i < cpuinfo.dcache_size;
  344. i += cpuinfo.dcache_line_length)
  345. __asm__ __volatile__ ("wdc %0, r0;" \
  346. : : "r" (i));
  347. #endif
  348. }
  349. static void __invalidate_dcache_range_wb(unsigned long start,
  350. unsigned long end)
  351. {
  352. #ifndef ASM_LOOP
  353. int i;
  354. #endif
  355. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  356. (unsigned int)start, (unsigned int) end);
  357. CACHE_LOOP_LIMITS(start, end,
  358. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  359. #ifdef ASM_LOOP
  360. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
  361. #else
  362. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  363. __asm__ __volatile__ ("wdc.clear %0, r0;" \
  364. : : "r" (i));
  365. #endif
  366. }
  367. static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
  368. unsigned long end)
  369. {
  370. #ifndef ASM_LOOP
  371. int i;
  372. #endif
  373. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  374. (unsigned int)start, (unsigned int) end);
  375. CACHE_LOOP_LIMITS(start, end,
  376. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  377. #ifdef ASM_LOOP
  378. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  379. #else
  380. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  381. __asm__ __volatile__ ("wdc %0, r0;" \
  382. : : "r" (i));
  383. #endif
  384. }
  385. static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
  386. unsigned long end)
  387. {
  388. unsigned long flags;
  389. #ifndef ASM_LOOP
  390. int i;
  391. #endif
  392. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  393. (unsigned int)start, (unsigned int) end);
  394. CACHE_LOOP_LIMITS(start, end,
  395. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  396. local_irq_save(flags);
  397. __disable_dcache_msr();
  398. #ifdef ASM_LOOP
  399. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  400. #else
  401. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  402. __asm__ __volatile__ ("wdc %0, r0;" \
  403. : : "r" (i));
  404. #endif
  405. __enable_dcache_msr();
  406. local_irq_restore(flags);
  407. }
  408. static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
  409. unsigned long end)
  410. {
  411. unsigned long flags;
  412. #ifndef ASM_LOOP
  413. int i;
  414. #endif
  415. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  416. (unsigned int)start, (unsigned int) end);
  417. CACHE_LOOP_LIMITS(start, end,
  418. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  419. local_irq_save(flags);
  420. __disable_dcache_nomsr();
  421. #ifdef ASM_LOOP
  422. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  423. #else
  424. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  425. __asm__ __volatile__ ("wdc %0, r0;" \
  426. : : "r" (i));
  427. #endif
  428. __enable_dcache_nomsr();
  429. local_irq_restore(flags);
  430. }
  431. static void __flush_dcache_all_wb(void)
  432. {
  433. #ifndef ASM_LOOP
  434. int i;
  435. #endif
  436. pr_debug("%s\n", __func__);
  437. #ifdef ASM_LOOP
  438. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  439. wdc.flush);
  440. #else
  441. for (i = 0; i < cpuinfo.dcache_size;
  442. i += cpuinfo.dcache_line_length)
  443. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  444. : : "r" (i));
  445. #endif
  446. }
  447. static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
  448. {
  449. #ifndef ASM_LOOP
  450. int i;
  451. #endif
  452. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  453. (unsigned int)start, (unsigned int) end);
  454. CACHE_LOOP_LIMITS(start, end,
  455. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  456. #ifdef ASM_LOOP
  457. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
  458. #else
  459. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  460. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  461. : : "r" (i));
  462. #endif
  463. }
  464. /* struct for wb caches and for wt caches */
  465. struct scache *mbc;
  466. /* new wb cache model */
  467. const struct scache wb_msr = {
  468. .ie = __enable_icache_msr,
  469. .id = __disable_icache_msr,
  470. .ifl = __flush_icache_all_noirq,
  471. .iflr = __flush_icache_range_noirq,
  472. .iin = __flush_icache_all_noirq,
  473. .iinr = __flush_icache_range_noirq,
  474. .de = __enable_dcache_msr,
  475. .dd = __disable_dcache_msr,
  476. .dfl = __flush_dcache_all_wb,
  477. .dflr = __flush_dcache_range_wb,
  478. .din = __invalidate_dcache_all_wb,
  479. .dinr = __invalidate_dcache_range_wb,
  480. };
  481. /* There is only difference in ie, id, de, dd functions */
  482. const struct scache wb_nomsr = {
  483. .ie = __enable_icache_nomsr,
  484. .id = __disable_icache_nomsr,
  485. .ifl = __flush_icache_all_noirq,
  486. .iflr = __flush_icache_range_noirq,
  487. .iin = __flush_icache_all_noirq,
  488. .iinr = __flush_icache_range_noirq,
  489. .de = __enable_dcache_nomsr,
  490. .dd = __disable_dcache_nomsr,
  491. .dfl = __flush_dcache_all_wb,
  492. .dflr = __flush_dcache_range_wb,
  493. .din = __invalidate_dcache_all_wb,
  494. .dinr = __invalidate_dcache_range_wb,
  495. };
  496. /* Old wt cache model with disabling irq and turn off cache */
  497. const struct scache wt_msr = {
  498. .ie = __enable_icache_msr,
  499. .id = __disable_icache_msr,
  500. .ifl = __flush_icache_all_msr_irq,
  501. .iflr = __flush_icache_range_msr_irq,
  502. .iin = __flush_icache_all_msr_irq,
  503. .iinr = __flush_icache_range_msr_irq,
  504. .de = __enable_dcache_msr,
  505. .dd = __disable_dcache_msr,
  506. .dfl = __invalidate_dcache_all_msr_irq,
  507. .dflr = __invalidate_dcache_range_msr_irq_wt,
  508. .din = __invalidate_dcache_all_msr_irq,
  509. .dinr = __invalidate_dcache_range_msr_irq_wt,
  510. };
  511. const struct scache wt_nomsr = {
  512. .ie = __enable_icache_nomsr,
  513. .id = __disable_icache_nomsr,
  514. .ifl = __flush_icache_all_nomsr_irq,
  515. .iflr = __flush_icache_range_nomsr_irq,
  516. .iin = __flush_icache_all_nomsr_irq,
  517. .iinr = __flush_icache_range_nomsr_irq,
  518. .de = __enable_dcache_nomsr,
  519. .dd = __disable_dcache_nomsr,
  520. .dfl = __invalidate_dcache_all_nomsr_irq,
  521. .dflr = __invalidate_dcache_range_nomsr_irq,
  522. .din = __invalidate_dcache_all_nomsr_irq,
  523. .dinr = __invalidate_dcache_range_nomsr_irq,
  524. };
  525. /* New wt cache model for newer Microblaze versions */
  526. const struct scache wt_msr_noirq = {
  527. .ie = __enable_icache_msr,
  528. .id = __disable_icache_msr,
  529. .ifl = __flush_icache_all_noirq,
  530. .iflr = __flush_icache_range_noirq,
  531. .iin = __flush_icache_all_noirq,
  532. .iinr = __flush_icache_range_noirq,
  533. .de = __enable_dcache_msr,
  534. .dd = __disable_dcache_msr,
  535. .dfl = __invalidate_dcache_all_noirq_wt,
  536. .dflr = __invalidate_dcache_range_nomsr_wt,
  537. .din = __invalidate_dcache_all_noirq_wt,
  538. .dinr = __invalidate_dcache_range_nomsr_wt,
  539. };
  540. const struct scache wt_nomsr_noirq = {
  541. .ie = __enable_icache_nomsr,
  542. .id = __disable_icache_nomsr,
  543. .ifl = __flush_icache_all_noirq,
  544. .iflr = __flush_icache_range_noirq,
  545. .iin = __flush_icache_all_noirq,
  546. .iinr = __flush_icache_range_noirq,
  547. .de = __enable_dcache_nomsr,
  548. .dd = __disable_dcache_nomsr,
  549. .dfl = __invalidate_dcache_all_noirq_wt,
  550. .dflr = __invalidate_dcache_range_nomsr_wt,
  551. .din = __invalidate_dcache_all_noirq_wt,
  552. .dinr = __invalidate_dcache_range_nomsr_wt,
  553. };
  554. /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
  555. #define CPUVER_7_20_A 0x0c
  556. #define CPUVER_7_20_D 0x0f
  557. #define INFO(s) printk(KERN_INFO "cache: " s "\n");
  558. void microblaze_cache_init(void)
  559. {
  560. if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
  561. if (cpuinfo.dcache_wb) {
  562. INFO("wb_msr");
  563. mbc = (struct scache *)&wb_msr;
  564. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  565. /* MS: problem with signal handling - hw bug */
  566. INFO("WB won't work properly");
  567. }
  568. } else {
  569. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  570. INFO("wt_msr_noirq");
  571. mbc = (struct scache *)&wt_msr_noirq;
  572. } else {
  573. INFO("wt_msr");
  574. mbc = (struct scache *)&wt_msr;
  575. }
  576. }
  577. } else {
  578. if (cpuinfo.dcache_wb) {
  579. INFO("wb_nomsr");
  580. mbc = (struct scache *)&wb_nomsr;
  581. if (cpuinfo.ver_code < CPUVER_7_20_D) {
  582. /* MS: problem with signal handling - hw bug */
  583. INFO("WB won't work properly");
  584. }
  585. } else {
  586. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  587. INFO("wt_nomsr_noirq");
  588. mbc = (struct scache *)&wt_nomsr_noirq;
  589. } else {
  590. INFO("wt_nomsr");
  591. mbc = (struct scache *)&wt_nomsr;
  592. }
  593. }
  594. }
  595. /* FIXME Invalidation is done in U-BOOT
  596. * WT cache: Data is already written to main memory
  597. * WB cache: Discard data on noMMU which caused that kernel doesn't boot
  598. */
  599. /* invalidate_dcache(); */
  600. enable_dcache();
  601. invalidate_icache();
  602. enable_icache();
  603. }