c-r4k.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/hardirq.h>
  11. #include <linux/init.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/sched.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/bitops.h>
  19. #include <asm/bcache.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/cache.h>
  22. #include <asm/cacheops.h>
  23. #include <asm/cpu.h>
  24. #include <asm/cpu-features.h>
  25. #include <asm/io.h>
  26. #include <asm/page.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/r4kcache.h>
  29. #include <asm/sections.h>
  30. #include <asm/system.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/war.h>
  33. #include <asm/cacheflush.h> /* for run_uncached() */
  34. /*
  35. * Special Variant of smp_call_function for use by cache functions:
  36. *
  37. * o No return value
  38. * o collapses to normal function call on UP kernels
  39. * o collapses to normal function call on systems with a single shared
  40. * primary cache.
  41. */
  42. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  43. int wait)
  44. {
  45. preempt_disable();
  46. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  47. smp_call_function(func, info, wait);
  48. #endif
  49. func(info);
  50. preempt_enable();
  51. }
  52. #if defined(CONFIG_MIPS_CMP)
  53. #define cpu_has_safe_index_cacheops 0
  54. #else
  55. #define cpu_has_safe_index_cacheops 1
  56. #endif
  57. /*
  58. * Must die.
  59. */
  60. static unsigned long icache_size __read_mostly;
  61. static unsigned long dcache_size __read_mostly;
  62. static unsigned long scache_size __read_mostly;
  63. /*
  64. * Dummy cache handling routines for machines without boardcaches
  65. */
  66. static void cache_noop(void) {}
  67. static struct bcache_ops no_sc_ops = {
  68. .bc_enable = (void *)cache_noop,
  69. .bc_disable = (void *)cache_noop,
  70. .bc_wback_inv = (void *)cache_noop,
  71. .bc_inv = (void *)cache_noop
  72. };
  73. struct bcache_ops *bcops = &no_sc_ops;
  74. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  75. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  76. #define R4600_HIT_CACHEOP_WAR_IMPL \
  77. do { \
  78. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  79. *(volatile unsigned long *)CKSEG1; \
  80. if (R4600_V1_HIT_CACHEOP_WAR) \
  81. __asm__ __volatile__("nop;nop;nop;nop"); \
  82. } while (0)
  83. static void (*r4k_blast_dcache_page)(unsigned long addr);
  84. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  85. {
  86. R4600_HIT_CACHEOP_WAR_IMPL;
  87. blast_dcache32_page(addr);
  88. }
  89. static void __cpuinit r4k_blast_dcache_page_setup(void)
  90. {
  91. unsigned long dc_lsize = cpu_dcache_line_size();
  92. if (dc_lsize == 0)
  93. r4k_blast_dcache_page = (void *)cache_noop;
  94. else if (dc_lsize == 16)
  95. r4k_blast_dcache_page = blast_dcache16_page;
  96. else if (dc_lsize == 32)
  97. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  98. }
  99. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  100. static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
  101. {
  102. unsigned long dc_lsize = cpu_dcache_line_size();
  103. if (dc_lsize == 0)
  104. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  105. else if (dc_lsize == 16)
  106. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  107. else if (dc_lsize == 32)
  108. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  109. }
  110. static void (* r4k_blast_dcache)(void);
  111. static void __cpuinit r4k_blast_dcache_setup(void)
  112. {
  113. unsigned long dc_lsize = cpu_dcache_line_size();
  114. if (dc_lsize == 0)
  115. r4k_blast_dcache = (void *)cache_noop;
  116. else if (dc_lsize == 16)
  117. r4k_blast_dcache = blast_dcache16;
  118. else if (dc_lsize == 32)
  119. r4k_blast_dcache = blast_dcache32;
  120. }
  121. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  122. #define JUMP_TO_ALIGN(order) \
  123. __asm__ __volatile__( \
  124. "b\t1f\n\t" \
  125. ".align\t" #order "\n\t" \
  126. "1:\n\t" \
  127. )
  128. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  129. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  130. static inline void blast_r4600_v1_icache32(void)
  131. {
  132. unsigned long flags;
  133. local_irq_save(flags);
  134. blast_icache32();
  135. local_irq_restore(flags);
  136. }
  137. static inline void tx49_blast_icache32(void)
  138. {
  139. unsigned long start = INDEX_BASE;
  140. unsigned long end = start + current_cpu_data.icache.waysize;
  141. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  142. unsigned long ws_end = current_cpu_data.icache.ways <<
  143. current_cpu_data.icache.waybit;
  144. unsigned long ws, addr;
  145. CACHE32_UNROLL32_ALIGN2;
  146. /* I'm in even chunk. blast odd chunks */
  147. for (ws = 0; ws < ws_end; ws += ws_inc)
  148. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  149. cache32_unroll32(addr|ws, Index_Invalidate_I);
  150. CACHE32_UNROLL32_ALIGN;
  151. /* I'm in odd chunk. blast even chunks */
  152. for (ws = 0; ws < ws_end; ws += ws_inc)
  153. for (addr = start; addr < end; addr += 0x400 * 2)
  154. cache32_unroll32(addr|ws, Index_Invalidate_I);
  155. }
  156. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  157. {
  158. unsigned long flags;
  159. local_irq_save(flags);
  160. blast_icache32_page_indexed(page);
  161. local_irq_restore(flags);
  162. }
  163. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  164. {
  165. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  166. unsigned long start = INDEX_BASE + (page & indexmask);
  167. unsigned long end = start + PAGE_SIZE;
  168. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  169. unsigned long ws_end = current_cpu_data.icache.ways <<
  170. current_cpu_data.icache.waybit;
  171. unsigned long ws, addr;
  172. CACHE32_UNROLL32_ALIGN2;
  173. /* I'm in even chunk. blast odd chunks */
  174. for (ws = 0; ws < ws_end; ws += ws_inc)
  175. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  176. cache32_unroll32(addr|ws, Index_Invalidate_I);
  177. CACHE32_UNROLL32_ALIGN;
  178. /* I'm in odd chunk. blast even chunks */
  179. for (ws = 0; ws < ws_end; ws += ws_inc)
  180. for (addr = start; addr < end; addr += 0x400 * 2)
  181. cache32_unroll32(addr|ws, Index_Invalidate_I);
  182. }
  183. static void (* r4k_blast_icache_page)(unsigned long addr);
  184. static void __cpuinit r4k_blast_icache_page_setup(void)
  185. {
  186. unsigned long ic_lsize = cpu_icache_line_size();
  187. if (ic_lsize == 0)
  188. r4k_blast_icache_page = (void *)cache_noop;
  189. else if (ic_lsize == 16)
  190. r4k_blast_icache_page = blast_icache16_page;
  191. else if (ic_lsize == 32)
  192. r4k_blast_icache_page = blast_icache32_page;
  193. else if (ic_lsize == 64)
  194. r4k_blast_icache_page = blast_icache64_page;
  195. }
  196. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  197. static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
  198. {
  199. unsigned long ic_lsize = cpu_icache_line_size();
  200. if (ic_lsize == 0)
  201. r4k_blast_icache_page_indexed = (void *)cache_noop;
  202. else if (ic_lsize == 16)
  203. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  204. else if (ic_lsize == 32) {
  205. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  206. r4k_blast_icache_page_indexed =
  207. blast_icache32_r4600_v1_page_indexed;
  208. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  209. r4k_blast_icache_page_indexed =
  210. tx49_blast_icache32_page_indexed;
  211. else
  212. r4k_blast_icache_page_indexed =
  213. blast_icache32_page_indexed;
  214. } else if (ic_lsize == 64)
  215. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  216. }
  217. static void (* r4k_blast_icache)(void);
  218. static void __cpuinit r4k_blast_icache_setup(void)
  219. {
  220. unsigned long ic_lsize = cpu_icache_line_size();
  221. if (ic_lsize == 0)
  222. r4k_blast_icache = (void *)cache_noop;
  223. else if (ic_lsize == 16)
  224. r4k_blast_icache = blast_icache16;
  225. else if (ic_lsize == 32) {
  226. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  227. r4k_blast_icache = blast_r4600_v1_icache32;
  228. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  229. r4k_blast_icache = tx49_blast_icache32;
  230. else
  231. r4k_blast_icache = blast_icache32;
  232. } else if (ic_lsize == 64)
  233. r4k_blast_icache = blast_icache64;
  234. }
  235. static void (* r4k_blast_scache_page)(unsigned long addr);
  236. static void __cpuinit r4k_blast_scache_page_setup(void)
  237. {
  238. unsigned long sc_lsize = cpu_scache_line_size();
  239. if (scache_size == 0)
  240. r4k_blast_scache_page = (void *)cache_noop;
  241. else if (sc_lsize == 16)
  242. r4k_blast_scache_page = blast_scache16_page;
  243. else if (sc_lsize == 32)
  244. r4k_blast_scache_page = blast_scache32_page;
  245. else if (sc_lsize == 64)
  246. r4k_blast_scache_page = blast_scache64_page;
  247. else if (sc_lsize == 128)
  248. r4k_blast_scache_page = blast_scache128_page;
  249. }
  250. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  251. static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
  252. {
  253. unsigned long sc_lsize = cpu_scache_line_size();
  254. if (scache_size == 0)
  255. r4k_blast_scache_page_indexed = (void *)cache_noop;
  256. else if (sc_lsize == 16)
  257. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  258. else if (sc_lsize == 32)
  259. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  260. else if (sc_lsize == 64)
  261. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  262. else if (sc_lsize == 128)
  263. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  264. }
  265. static void (* r4k_blast_scache)(void);
  266. static void __cpuinit r4k_blast_scache_setup(void)
  267. {
  268. unsigned long sc_lsize = cpu_scache_line_size();
  269. if (scache_size == 0)
  270. r4k_blast_scache = (void *)cache_noop;
  271. else if (sc_lsize == 16)
  272. r4k_blast_scache = blast_scache16;
  273. else if (sc_lsize == 32)
  274. r4k_blast_scache = blast_scache32;
  275. else if (sc_lsize == 64)
  276. r4k_blast_scache = blast_scache64;
  277. else if (sc_lsize == 128)
  278. r4k_blast_scache = blast_scache128;
  279. }
  280. static inline void local_r4k___flush_cache_all(void * args)
  281. {
  282. #if defined(CONFIG_CPU_LOONGSON2)
  283. r4k_blast_scache();
  284. return;
  285. #endif
  286. r4k_blast_dcache();
  287. r4k_blast_icache();
  288. switch (current_cpu_type()) {
  289. case CPU_R4000SC:
  290. case CPU_R4000MC:
  291. case CPU_R4400SC:
  292. case CPU_R4400MC:
  293. case CPU_R10000:
  294. case CPU_R12000:
  295. case CPU_R14000:
  296. r4k_blast_scache();
  297. }
  298. }
  299. static void r4k___flush_cache_all(void)
  300. {
  301. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
  302. }
  303. static inline int has_valid_asid(const struct mm_struct *mm)
  304. {
  305. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  306. int i;
  307. for_each_online_cpu(i)
  308. if (cpu_context(i, mm))
  309. return 1;
  310. return 0;
  311. #else
  312. return cpu_context(smp_processor_id(), mm);
  313. #endif
  314. }
  315. static void r4k__flush_cache_vmap(void)
  316. {
  317. r4k_blast_dcache();
  318. }
  319. static void r4k__flush_cache_vunmap(void)
  320. {
  321. r4k_blast_dcache();
  322. }
  323. static inline void local_r4k_flush_cache_range(void * args)
  324. {
  325. struct vm_area_struct *vma = args;
  326. int exec = vma->vm_flags & VM_EXEC;
  327. if (!(has_valid_asid(vma->vm_mm)))
  328. return;
  329. r4k_blast_dcache();
  330. if (exec)
  331. r4k_blast_icache();
  332. }
  333. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  334. unsigned long start, unsigned long end)
  335. {
  336. int exec = vma->vm_flags & VM_EXEC;
  337. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  338. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
  339. }
  340. static inline void local_r4k_flush_cache_mm(void * args)
  341. {
  342. struct mm_struct *mm = args;
  343. if (!has_valid_asid(mm))
  344. return;
  345. /*
  346. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  347. * only flush the primary caches but R10000 and R12000 behave sane ...
  348. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  349. * caches, so we can bail out early.
  350. */
  351. if (current_cpu_type() == CPU_R4000SC ||
  352. current_cpu_type() == CPU_R4000MC ||
  353. current_cpu_type() == CPU_R4400SC ||
  354. current_cpu_type() == CPU_R4400MC) {
  355. r4k_blast_scache();
  356. return;
  357. }
  358. r4k_blast_dcache();
  359. }
  360. static void r4k_flush_cache_mm(struct mm_struct *mm)
  361. {
  362. if (!cpu_has_dc_aliases)
  363. return;
  364. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
  365. }
  366. struct flush_cache_page_args {
  367. struct vm_area_struct *vma;
  368. unsigned long addr;
  369. unsigned long pfn;
  370. };
  371. static inline void local_r4k_flush_cache_page(void *args)
  372. {
  373. struct flush_cache_page_args *fcp_args = args;
  374. struct vm_area_struct *vma = fcp_args->vma;
  375. unsigned long addr = fcp_args->addr;
  376. struct page *page = pfn_to_page(fcp_args->pfn);
  377. int exec = vma->vm_flags & VM_EXEC;
  378. struct mm_struct *mm = vma->vm_mm;
  379. int map_coherent = 0;
  380. pgd_t *pgdp;
  381. pud_t *pudp;
  382. pmd_t *pmdp;
  383. pte_t *ptep;
  384. void *vaddr;
  385. /*
  386. * If ownes no valid ASID yet, cannot possibly have gotten
  387. * this page into the cache.
  388. */
  389. if (!has_valid_asid(mm))
  390. return;
  391. addr &= PAGE_MASK;
  392. pgdp = pgd_offset(mm, addr);
  393. pudp = pud_offset(pgdp, addr);
  394. pmdp = pmd_offset(pudp, addr);
  395. ptep = pte_offset(pmdp, addr);
  396. /*
  397. * If the page isn't marked valid, the page cannot possibly be
  398. * in the cache.
  399. */
  400. if (!(pte_present(*ptep)))
  401. return;
  402. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  403. vaddr = NULL;
  404. else {
  405. /*
  406. * Use kmap_coherent or kmap_atomic to do flushes for
  407. * another ASID than the current one.
  408. */
  409. map_coherent = (cpu_has_dc_aliases &&
  410. page_mapped(page) && !Page_dcache_dirty(page));
  411. if (map_coherent)
  412. vaddr = kmap_coherent(page, addr);
  413. else
  414. vaddr = kmap_atomic(page, KM_USER0);
  415. addr = (unsigned long)vaddr;
  416. }
  417. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  418. r4k_blast_dcache_page(addr);
  419. if (exec && !cpu_icache_snoops_remote_store)
  420. r4k_blast_scache_page(addr);
  421. }
  422. if (exec) {
  423. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  424. int cpu = smp_processor_id();
  425. if (cpu_context(cpu, mm) != 0)
  426. drop_mmu_context(mm, cpu);
  427. } else
  428. r4k_blast_icache_page(addr);
  429. }
  430. if (vaddr) {
  431. if (map_coherent)
  432. kunmap_coherent();
  433. else
  434. kunmap_atomic(vaddr, KM_USER0);
  435. }
  436. }
  437. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  438. unsigned long addr, unsigned long pfn)
  439. {
  440. struct flush_cache_page_args args;
  441. args.vma = vma;
  442. args.addr = addr;
  443. args.pfn = pfn;
  444. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
  445. }
  446. static inline void local_r4k_flush_data_cache_page(void * addr)
  447. {
  448. r4k_blast_dcache_page((unsigned long) addr);
  449. }
  450. static void r4k_flush_data_cache_page(unsigned long addr)
  451. {
  452. if (in_atomic())
  453. local_r4k_flush_data_cache_page((void *)addr);
  454. else
  455. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
  456. 1);
  457. }
  458. struct flush_icache_range_args {
  459. unsigned long start;
  460. unsigned long end;
  461. };
  462. static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  463. {
  464. if (!cpu_has_ic_fills_f_dc) {
  465. if (end - start >= dcache_size) {
  466. r4k_blast_dcache();
  467. } else {
  468. R4600_HIT_CACHEOP_WAR_IMPL;
  469. protected_blast_dcache_range(start, end);
  470. }
  471. }
  472. if (end - start > icache_size)
  473. r4k_blast_icache();
  474. else
  475. protected_blast_icache_range(start, end);
  476. }
  477. static inline void local_r4k_flush_icache_range_ipi(void *args)
  478. {
  479. struct flush_icache_range_args *fir_args = args;
  480. unsigned long start = fir_args->start;
  481. unsigned long end = fir_args->end;
  482. local_r4k_flush_icache_range(start, end);
  483. }
  484. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  485. {
  486. struct flush_icache_range_args args;
  487. args.start = start;
  488. args.end = end;
  489. r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
  490. instruction_hazard();
  491. }
  492. #ifdef CONFIG_DMA_NONCOHERENT
  493. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  494. {
  495. /* Catch bad driver code */
  496. BUG_ON(size == 0);
  497. if (cpu_has_inclusive_pcaches) {
  498. if (size >= scache_size)
  499. r4k_blast_scache();
  500. else
  501. blast_scache_range(addr, addr + size);
  502. return;
  503. }
  504. /*
  505. * Either no secondary cache or the available caches don't have the
  506. * subset property so we have to flush the primary caches
  507. * explicitly
  508. */
  509. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  510. r4k_blast_dcache();
  511. } else {
  512. R4600_HIT_CACHEOP_WAR_IMPL;
  513. blast_dcache_range(addr, addr + size);
  514. }
  515. bc_wback_inv(addr, size);
  516. }
  517. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  518. {
  519. /* Catch bad driver code */
  520. BUG_ON(size == 0);
  521. if (cpu_has_inclusive_pcaches) {
  522. if (size >= scache_size)
  523. r4k_blast_scache();
  524. else {
  525. unsigned long lsize = cpu_scache_line_size();
  526. unsigned long almask = ~(lsize - 1);
  527. /*
  528. * There is no clearly documented alignment requirement
  529. * for the cache instruction on MIPS processors and
  530. * some processors, among them the RM5200 and RM7000
  531. * QED processors will throw an address error for cache
  532. * hit ops with insufficient alignment. Solved by
  533. * aligning the address to cache line size.
  534. */
  535. cache_op(Hit_Writeback_Inv_SD, addr & almask);
  536. cache_op(Hit_Writeback_Inv_SD,
  537. (addr + size - 1) & almask);
  538. blast_inv_scache_range(addr, addr + size);
  539. }
  540. return;
  541. }
  542. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  543. r4k_blast_dcache();
  544. } else {
  545. unsigned long lsize = cpu_dcache_line_size();
  546. unsigned long almask = ~(lsize - 1);
  547. R4600_HIT_CACHEOP_WAR_IMPL;
  548. cache_op(Hit_Writeback_Inv_D, addr & almask);
  549. cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
  550. blast_inv_dcache_range(addr, addr + size);
  551. }
  552. bc_inv(addr, size);
  553. }
  554. #endif /* CONFIG_DMA_NONCOHERENT */
  555. /*
  556. * While we're protected against bad userland addresses we don't care
  557. * very much about what happens in that case. Usually a segmentation
  558. * fault will dump the process later on anyway ...
  559. */
  560. static void local_r4k_flush_cache_sigtramp(void * arg)
  561. {
  562. unsigned long ic_lsize = cpu_icache_line_size();
  563. unsigned long dc_lsize = cpu_dcache_line_size();
  564. unsigned long sc_lsize = cpu_scache_line_size();
  565. unsigned long addr = (unsigned long) arg;
  566. R4600_HIT_CACHEOP_WAR_IMPL;
  567. if (dc_lsize)
  568. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  569. if (!cpu_icache_snoops_remote_store && scache_size)
  570. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  571. if (ic_lsize)
  572. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  573. if (MIPS4K_ICACHE_REFILL_WAR) {
  574. __asm__ __volatile__ (
  575. ".set push\n\t"
  576. ".set noat\n\t"
  577. ".set mips3\n\t"
  578. #ifdef CONFIG_32BIT
  579. "la $at,1f\n\t"
  580. #endif
  581. #ifdef CONFIG_64BIT
  582. "dla $at,1f\n\t"
  583. #endif
  584. "cache %0,($at)\n\t"
  585. "nop; nop; nop\n"
  586. "1:\n\t"
  587. ".set pop"
  588. :
  589. : "i" (Hit_Invalidate_I));
  590. }
  591. if (MIPS_CACHE_SYNC_WAR)
  592. __asm__ __volatile__ ("sync");
  593. }
  594. static void r4k_flush_cache_sigtramp(unsigned long addr)
  595. {
  596. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
  597. }
  598. static void r4k_flush_icache_all(void)
  599. {
  600. if (cpu_has_vtag_icache)
  601. r4k_blast_icache();
  602. }
  603. static inline void rm7k_erratum31(void)
  604. {
  605. const unsigned long ic_lsize = 32;
  606. unsigned long addr;
  607. /* RM7000 erratum #31. The icache is screwed at startup. */
  608. write_c0_taglo(0);
  609. write_c0_taghi(0);
  610. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  611. __asm__ __volatile__ (
  612. ".set push\n\t"
  613. ".set noreorder\n\t"
  614. ".set mips3\n\t"
  615. "cache\t%1, 0(%0)\n\t"
  616. "cache\t%1, 0x1000(%0)\n\t"
  617. "cache\t%1, 0x2000(%0)\n\t"
  618. "cache\t%1, 0x3000(%0)\n\t"
  619. "cache\t%2, 0(%0)\n\t"
  620. "cache\t%2, 0x1000(%0)\n\t"
  621. "cache\t%2, 0x2000(%0)\n\t"
  622. "cache\t%2, 0x3000(%0)\n\t"
  623. "cache\t%1, 0(%0)\n\t"
  624. "cache\t%1, 0x1000(%0)\n\t"
  625. "cache\t%1, 0x2000(%0)\n\t"
  626. "cache\t%1, 0x3000(%0)\n\t"
  627. ".set pop\n"
  628. :
  629. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  630. }
  631. }
  632. static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
  633. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  634. };
  635. static void __cpuinit probe_pcache(void)
  636. {
  637. struct cpuinfo_mips *c = &current_cpu_data;
  638. unsigned int config = read_c0_config();
  639. unsigned int prid = read_c0_prid();
  640. unsigned long config1;
  641. unsigned int lsize;
  642. switch (c->cputype) {
  643. case CPU_R4600: /* QED style two way caches? */
  644. case CPU_R4700:
  645. case CPU_R5000:
  646. case CPU_NEVADA:
  647. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  648. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  649. c->icache.ways = 2;
  650. c->icache.waybit = __ffs(icache_size/2);
  651. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  652. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  653. c->dcache.ways = 2;
  654. c->dcache.waybit= __ffs(dcache_size/2);
  655. c->options |= MIPS_CPU_CACHE_CDEX_P;
  656. break;
  657. case CPU_R5432:
  658. case CPU_R5500:
  659. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  660. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  661. c->icache.ways = 2;
  662. c->icache.waybit= 0;
  663. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  664. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  665. c->dcache.ways = 2;
  666. c->dcache.waybit = 0;
  667. c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
  668. break;
  669. case CPU_TX49XX:
  670. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  671. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  672. c->icache.ways = 4;
  673. c->icache.waybit= 0;
  674. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  675. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  676. c->dcache.ways = 4;
  677. c->dcache.waybit = 0;
  678. c->options |= MIPS_CPU_CACHE_CDEX_P;
  679. c->options |= MIPS_CPU_PREFETCH;
  680. break;
  681. case CPU_R4000PC:
  682. case CPU_R4000SC:
  683. case CPU_R4000MC:
  684. case CPU_R4400PC:
  685. case CPU_R4400SC:
  686. case CPU_R4400MC:
  687. case CPU_R4300:
  688. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  689. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  690. c->icache.ways = 1;
  691. c->icache.waybit = 0; /* doesn't matter */
  692. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  693. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  694. c->dcache.ways = 1;
  695. c->dcache.waybit = 0; /* does not matter */
  696. c->options |= MIPS_CPU_CACHE_CDEX_P;
  697. break;
  698. case CPU_R10000:
  699. case CPU_R12000:
  700. case CPU_R14000:
  701. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  702. c->icache.linesz = 64;
  703. c->icache.ways = 2;
  704. c->icache.waybit = 0;
  705. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  706. c->dcache.linesz = 32;
  707. c->dcache.ways = 2;
  708. c->dcache.waybit = 0;
  709. c->options |= MIPS_CPU_PREFETCH;
  710. break;
  711. case CPU_VR4133:
  712. write_c0_config(config & ~VR41_CONF_P4K);
  713. case CPU_VR4131:
  714. /* Workaround for cache instruction bug of VR4131 */
  715. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  716. c->processor_id == 0x0c82U) {
  717. config |= 0x00400000U;
  718. if (c->processor_id == 0x0c80U)
  719. config |= VR41_CONF_BP;
  720. write_c0_config(config);
  721. } else
  722. c->options |= MIPS_CPU_CACHE_CDEX_P;
  723. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  724. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  725. c->icache.ways = 2;
  726. c->icache.waybit = __ffs(icache_size/2);
  727. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  728. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  729. c->dcache.ways = 2;
  730. c->dcache.waybit = __ffs(dcache_size/2);
  731. break;
  732. case CPU_VR41XX:
  733. case CPU_VR4111:
  734. case CPU_VR4121:
  735. case CPU_VR4122:
  736. case CPU_VR4181:
  737. case CPU_VR4181A:
  738. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  739. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  740. c->icache.ways = 1;
  741. c->icache.waybit = 0; /* doesn't matter */
  742. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  743. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  744. c->dcache.ways = 1;
  745. c->dcache.waybit = 0; /* does not matter */
  746. c->options |= MIPS_CPU_CACHE_CDEX_P;
  747. break;
  748. case CPU_RM7000:
  749. rm7k_erratum31();
  750. case CPU_RM9000:
  751. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  752. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  753. c->icache.ways = 4;
  754. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  755. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  756. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  757. c->dcache.ways = 4;
  758. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  759. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  760. c->options |= MIPS_CPU_CACHE_CDEX_P;
  761. #endif
  762. c->options |= MIPS_CPU_PREFETCH;
  763. break;
  764. case CPU_LOONGSON2:
  765. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  766. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  767. if (prid & 0x3)
  768. c->icache.ways = 4;
  769. else
  770. c->icache.ways = 2;
  771. c->icache.waybit = 0;
  772. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  773. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  774. if (prid & 0x3)
  775. c->dcache.ways = 4;
  776. else
  777. c->dcache.ways = 2;
  778. c->dcache.waybit = 0;
  779. break;
  780. default:
  781. if (!(config & MIPS_CONF_M))
  782. panic("Don't know how to probe P-caches on this cpu.");
  783. /*
  784. * So we seem to be a MIPS32 or MIPS64 CPU
  785. * So let's probe the I-cache ...
  786. */
  787. config1 = read_c0_config1();
  788. if ((lsize = ((config1 >> 19) & 7)))
  789. c->icache.linesz = 2 << lsize;
  790. else
  791. c->icache.linesz = lsize;
  792. c->icache.sets = 64 << ((config1 >> 22) & 7);
  793. c->icache.ways = 1 + ((config1 >> 16) & 7);
  794. icache_size = c->icache.sets *
  795. c->icache.ways *
  796. c->icache.linesz;
  797. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  798. if (config & 0x8) /* VI bit */
  799. c->icache.flags |= MIPS_CACHE_VTAG;
  800. /*
  801. * Now probe the MIPS32 / MIPS64 data cache.
  802. */
  803. c->dcache.flags = 0;
  804. if ((lsize = ((config1 >> 10) & 7)))
  805. c->dcache.linesz = 2 << lsize;
  806. else
  807. c->dcache.linesz= lsize;
  808. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  809. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  810. dcache_size = c->dcache.sets *
  811. c->dcache.ways *
  812. c->dcache.linesz;
  813. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  814. c->options |= MIPS_CPU_PREFETCH;
  815. break;
  816. }
  817. /*
  818. * Processor configuration sanity check for the R4000SC erratum
  819. * #5. With page sizes larger than 32kB there is no possibility
  820. * to get a VCE exception anymore so we don't care about this
  821. * misconfiguration. The case is rather theoretical anyway;
  822. * presumably no vendor is shipping his hardware in the "bad"
  823. * configuration.
  824. */
  825. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  826. !(config & CONF_SC) && c->icache.linesz != 16 &&
  827. PAGE_SIZE <= 0x8000)
  828. panic("Improper R4000SC processor configuration detected");
  829. /* compute a couple of other cache variables */
  830. c->icache.waysize = icache_size / c->icache.ways;
  831. c->dcache.waysize = dcache_size / c->dcache.ways;
  832. c->icache.sets = c->icache.linesz ?
  833. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  834. c->dcache.sets = c->dcache.linesz ?
  835. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  836. /*
  837. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  838. * 2-way virtually indexed so normally would suffer from aliases. So
  839. * normally they'd suffer from aliases but magic in the hardware deals
  840. * with that for us so we don't need to take care ourselves.
  841. */
  842. switch (c->cputype) {
  843. case CPU_20KC:
  844. case CPU_25KF:
  845. case CPU_SB1:
  846. case CPU_SB1A:
  847. c->dcache.flags |= MIPS_CACHE_PINDEX;
  848. break;
  849. case CPU_R10000:
  850. case CPU_R12000:
  851. case CPU_R14000:
  852. break;
  853. case CPU_24K:
  854. case CPU_34K:
  855. case CPU_74K:
  856. case CPU_1004K:
  857. if ((read_c0_config7() & (1 << 16))) {
  858. /* effectively physically indexed dcache,
  859. thus no virtual aliases. */
  860. c->dcache.flags |= MIPS_CACHE_PINDEX;
  861. break;
  862. }
  863. default:
  864. if (c->dcache.waysize > PAGE_SIZE)
  865. c->dcache.flags |= MIPS_CACHE_ALIASES;
  866. }
  867. switch (c->cputype) {
  868. case CPU_20KC:
  869. /*
  870. * Some older 20Kc chips doesn't have the 'VI' bit in
  871. * the config register.
  872. */
  873. c->icache.flags |= MIPS_CACHE_VTAG;
  874. break;
  875. case CPU_ALCHEMY:
  876. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  877. break;
  878. }
  879. #ifdef CONFIG_CPU_LOONGSON2
  880. /*
  881. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  882. * one op will act on all 4 ways
  883. */
  884. c->icache.ways = 1;
  885. #endif
  886. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  887. icache_size >> 10,
  888. c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
  889. way_string[c->icache.ways], c->icache.linesz);
  890. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  891. dcache_size >> 10, way_string[c->dcache.ways],
  892. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  893. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  894. "cache aliases" : "no aliases",
  895. c->dcache.linesz);
  896. }
  897. /*
  898. * If you even _breathe_ on this function, look at the gcc output and make sure
  899. * it does not pop things on and off the stack for the cache sizing loop that
  900. * executes in KSEG1 space or else you will crash and burn badly. You have
  901. * been warned.
  902. */
  903. static int __cpuinit probe_scache(void)
  904. {
  905. unsigned long flags, addr, begin, end, pow2;
  906. unsigned int config = read_c0_config();
  907. struct cpuinfo_mips *c = &current_cpu_data;
  908. int tmp;
  909. if (config & CONF_SC)
  910. return 0;
  911. begin = (unsigned long) &_stext;
  912. begin &= ~((4 * 1024 * 1024) - 1);
  913. end = begin + (4 * 1024 * 1024);
  914. /*
  915. * This is such a bitch, you'd think they would make it easy to do
  916. * this. Away you daemons of stupidity!
  917. */
  918. local_irq_save(flags);
  919. /* Fill each size-multiple cache line with a valid tag. */
  920. pow2 = (64 * 1024);
  921. for (addr = begin; addr < end; addr = (begin + pow2)) {
  922. unsigned long *p = (unsigned long *) addr;
  923. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  924. pow2 <<= 1;
  925. }
  926. /* Load first line with zero (therefore invalid) tag. */
  927. write_c0_taglo(0);
  928. write_c0_taghi(0);
  929. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  930. cache_op(Index_Store_Tag_I, begin);
  931. cache_op(Index_Store_Tag_D, begin);
  932. cache_op(Index_Store_Tag_SD, begin);
  933. /* Now search for the wrap around point. */
  934. pow2 = (128 * 1024);
  935. tmp = 0;
  936. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  937. cache_op(Index_Load_Tag_SD, addr);
  938. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  939. if (!read_c0_taglo())
  940. break;
  941. pow2 <<= 1;
  942. }
  943. local_irq_restore(flags);
  944. addr -= begin;
  945. scache_size = addr;
  946. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  947. c->scache.ways = 1;
  948. c->dcache.waybit = 0; /* does not matter */
  949. return 1;
  950. }
  951. #if defined(CONFIG_CPU_LOONGSON2)
  952. static void __init loongson2_sc_init(void)
  953. {
  954. struct cpuinfo_mips *c = &current_cpu_data;
  955. scache_size = 512*1024;
  956. c->scache.linesz = 32;
  957. c->scache.ways = 4;
  958. c->scache.waybit = 0;
  959. c->scache.waysize = scache_size / (c->scache.ways);
  960. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  961. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  962. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  963. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  964. }
  965. #endif
  966. extern int r5k_sc_init(void);
  967. extern int rm7k_sc_init(void);
  968. extern int mips_sc_init(void);
  969. static void __cpuinit setup_scache(void)
  970. {
  971. struct cpuinfo_mips *c = &current_cpu_data;
  972. unsigned int config = read_c0_config();
  973. int sc_present = 0;
  974. /*
  975. * Do the probing thing on R4000SC and R4400SC processors. Other
  976. * processors don't have a S-cache that would be relevant to the
  977. * Linux memory management.
  978. */
  979. switch (c->cputype) {
  980. case CPU_R4000SC:
  981. case CPU_R4000MC:
  982. case CPU_R4400SC:
  983. case CPU_R4400MC:
  984. sc_present = run_uncached(probe_scache);
  985. if (sc_present)
  986. c->options |= MIPS_CPU_CACHE_CDEX_S;
  987. break;
  988. case CPU_R10000:
  989. case CPU_R12000:
  990. case CPU_R14000:
  991. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  992. c->scache.linesz = 64 << ((config >> 13) & 1);
  993. c->scache.ways = 2;
  994. c->scache.waybit= 0;
  995. sc_present = 1;
  996. break;
  997. case CPU_R5000:
  998. case CPU_NEVADA:
  999. #ifdef CONFIG_R5000_CPU_SCACHE
  1000. r5k_sc_init();
  1001. #endif
  1002. return;
  1003. case CPU_RM7000:
  1004. case CPU_RM9000:
  1005. #ifdef CONFIG_RM7000_CPU_SCACHE
  1006. rm7k_sc_init();
  1007. #endif
  1008. return;
  1009. #if defined(CONFIG_CPU_LOONGSON2)
  1010. case CPU_LOONGSON2:
  1011. loongson2_sc_init();
  1012. return;
  1013. #endif
  1014. default:
  1015. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  1016. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  1017. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  1018. c->isa_level == MIPS_CPU_ISA_M64R2) {
  1019. #ifdef CONFIG_MIPS_CPU_SCACHE
  1020. if (mips_sc_init ()) {
  1021. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  1022. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  1023. scache_size >> 10,
  1024. way_string[c->scache.ways], c->scache.linesz);
  1025. }
  1026. #else
  1027. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1028. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1029. #endif
  1030. return;
  1031. }
  1032. sc_present = 0;
  1033. }
  1034. if (!sc_present)
  1035. return;
  1036. /* compute a couple of other cache variables */
  1037. c->scache.waysize = scache_size / c->scache.ways;
  1038. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1039. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1040. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1041. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1042. }
  1043. void au1x00_fixup_config_od(void)
  1044. {
  1045. /*
  1046. * c0_config.od (bit 19) was write only (and read as 0)
  1047. * on the early revisions of Alchemy SOCs. It disables the bus
  1048. * transaction overlapping and needs to be set to fix various errata.
  1049. */
  1050. switch (read_c0_prid()) {
  1051. case 0x00030100: /* Au1000 DA */
  1052. case 0x00030201: /* Au1000 HA */
  1053. case 0x00030202: /* Au1000 HB */
  1054. case 0x01030200: /* Au1500 AB */
  1055. /*
  1056. * Au1100 errata actually keeps silence about this bit, so we set it
  1057. * just in case for those revisions that require it to be set according
  1058. * to the (now gone) cpu table.
  1059. */
  1060. case 0x02030200: /* Au1100 AB */
  1061. case 0x02030201: /* Au1100 BA */
  1062. case 0x02030202: /* Au1100 BC */
  1063. set_c0_config(1 << 19);
  1064. break;
  1065. }
  1066. }
  1067. /* CP0 hazard avoidance. */
  1068. #define NXP_BARRIER() \
  1069. __asm__ __volatile__( \
  1070. ".set noreorder\n\t" \
  1071. "nop; nop; nop; nop; nop; nop;\n\t" \
  1072. ".set reorder\n\t")
  1073. static void nxp_pr4450_fixup_config(void)
  1074. {
  1075. unsigned long config0;
  1076. config0 = read_c0_config();
  1077. /* clear all three cache coherency fields */
  1078. config0 &= ~(0x7 | (7 << 25) | (7 << 28));
  1079. config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
  1080. ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
  1081. ((_page_cachable_default >> _CACHE_SHIFT) << 28));
  1082. write_c0_config(config0);
  1083. NXP_BARRIER();
  1084. }
  1085. static int __cpuinitdata cca = -1;
  1086. static int __init cca_setup(char *str)
  1087. {
  1088. get_option(&str, &cca);
  1089. return 1;
  1090. }
  1091. __setup("cca=", cca_setup);
  1092. static void __cpuinit coherency_setup(void)
  1093. {
  1094. if (cca < 0 || cca > 7)
  1095. cca = read_c0_config() & CONF_CM_CMASK;
  1096. _page_cachable_default = cca << _CACHE_SHIFT;
  1097. pr_debug("Using cache attribute %d\n", cca);
  1098. change_c0_config(CONF_CM_CMASK, cca);
  1099. /*
  1100. * c0_status.cu=0 specifies that updates by the sc instruction use
  1101. * the coherency mode specified by the TLB; 1 means cachable
  1102. * coherent update on write will be used. Not all processors have
  1103. * this bit and; some wire it to zero, others like Toshiba had the
  1104. * silly idea of putting something else there ...
  1105. */
  1106. switch (current_cpu_type()) {
  1107. case CPU_R4000PC:
  1108. case CPU_R4000SC:
  1109. case CPU_R4000MC:
  1110. case CPU_R4400PC:
  1111. case CPU_R4400SC:
  1112. case CPU_R4400MC:
  1113. clear_c0_config(CONF_CU);
  1114. break;
  1115. /*
  1116. * We need to catch the early Alchemy SOCs with
  1117. * the write-only co_config.od bit and set it back to one on:
  1118. * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
  1119. */
  1120. case CPU_ALCHEMY:
  1121. au1x00_fixup_config_od();
  1122. break;
  1123. case PRID_IMP_PR4450:
  1124. nxp_pr4450_fixup_config();
  1125. break;
  1126. }
  1127. }
  1128. #if defined(CONFIG_DMA_NONCOHERENT)
  1129. static int __cpuinitdata coherentio;
  1130. static int __init setcoherentio(char *str)
  1131. {
  1132. coherentio = 1;
  1133. return 1;
  1134. }
  1135. __setup("coherentio", setcoherentio);
  1136. #endif
  1137. void __cpuinit r4k_cache_init(void)
  1138. {
  1139. extern void build_clear_page(void);
  1140. extern void build_copy_page(void);
  1141. extern char __weak except_vec2_generic;
  1142. extern char __weak except_vec2_sb1;
  1143. struct cpuinfo_mips *c = &current_cpu_data;
  1144. switch (c->cputype) {
  1145. case CPU_SB1:
  1146. case CPU_SB1A:
  1147. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1148. break;
  1149. default:
  1150. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1151. break;
  1152. }
  1153. probe_pcache();
  1154. setup_scache();
  1155. r4k_blast_dcache_page_setup();
  1156. r4k_blast_dcache_page_indexed_setup();
  1157. r4k_blast_dcache_setup();
  1158. r4k_blast_icache_page_setup();
  1159. r4k_blast_icache_page_indexed_setup();
  1160. r4k_blast_icache_setup();
  1161. r4k_blast_scache_page_setup();
  1162. r4k_blast_scache_page_indexed_setup();
  1163. r4k_blast_scache_setup();
  1164. /*
  1165. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1166. * This code supports virtually indexed processors and will be
  1167. * unnecessarily inefficient on physically indexed processors.
  1168. */
  1169. if (c->dcache.linesz)
  1170. shm_align_mask = max_t( unsigned long,
  1171. c->dcache.sets * c->dcache.linesz - 1,
  1172. PAGE_SIZE - 1);
  1173. else
  1174. shm_align_mask = PAGE_SIZE-1;
  1175. __flush_cache_vmap = r4k__flush_cache_vmap;
  1176. __flush_cache_vunmap = r4k__flush_cache_vunmap;
  1177. flush_cache_all = cache_noop;
  1178. __flush_cache_all = r4k___flush_cache_all;
  1179. flush_cache_mm = r4k_flush_cache_mm;
  1180. flush_cache_page = r4k_flush_cache_page;
  1181. flush_cache_range = r4k_flush_cache_range;
  1182. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1183. flush_icache_all = r4k_flush_icache_all;
  1184. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1185. flush_data_cache_page = r4k_flush_data_cache_page;
  1186. flush_icache_range = r4k_flush_icache_range;
  1187. local_flush_icache_range = local_r4k_flush_icache_range;
  1188. #if defined(CONFIG_DMA_NONCOHERENT)
  1189. if (coherentio) {
  1190. _dma_cache_wback_inv = (void *)cache_noop;
  1191. _dma_cache_wback = (void *)cache_noop;
  1192. _dma_cache_inv = (void *)cache_noop;
  1193. } else {
  1194. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1195. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1196. _dma_cache_inv = r4k_dma_cache_inv;
  1197. }
  1198. #endif
  1199. build_clear_page();
  1200. build_copy_page();
  1201. #if !defined(CONFIG_MIPS_CMP)
  1202. local_r4k___flush_cache_all(NULL);
  1203. #endif
  1204. coherency_setup();
  1205. }