c-r4k.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/highmem.h>
  12. #include <linux/kernel.h>
  13. #include <linux/linkage.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/bitops.h>
  17. #include <asm/bcache.h>
  18. #include <asm/bootinfo.h>
  19. #include <asm/cache.h>
  20. #include <asm/cacheops.h>
  21. #include <asm/cpu.h>
  22. #include <asm/cpu-features.h>
  23. #include <asm/io.h>
  24. #include <asm/page.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/r4kcache.h>
  27. #include <asm/sections.h>
  28. #include <asm/system.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/war.h>
  31. #include <asm/cacheflush.h> /* for run_uncached() */
  32. /*
  33. * Special Variant of smp_call_function for use by cache functions:
  34. *
  35. * o No return value
  36. * o collapses to normal function call on UP kernels
  37. * o collapses to normal function call on systems with a single shared
  38. * primary cache.
  39. */
  40. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  41. int retry, int wait)
  42. {
  43. preempt_disable();
  44. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  45. smp_call_function(func, info, retry, wait);
  46. #endif
  47. func(info);
  48. preempt_enable();
  49. }
  50. /*
  51. * Must die.
  52. */
  53. static unsigned long icache_size __read_mostly;
  54. static unsigned long dcache_size __read_mostly;
  55. static unsigned long scache_size __read_mostly;
  56. /*
  57. * Dummy cache handling routines for machines without boardcaches
  58. */
  59. static void cache_noop(void) {}
  60. static struct bcache_ops no_sc_ops = {
  61. .bc_enable = (void *)cache_noop,
  62. .bc_disable = (void *)cache_noop,
  63. .bc_wback_inv = (void *)cache_noop,
  64. .bc_inv = (void *)cache_noop
  65. };
  66. struct bcache_ops *bcops = &no_sc_ops;
  67. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  68. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  69. #define R4600_HIT_CACHEOP_WAR_IMPL \
  70. do { \
  71. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  72. *(volatile unsigned long *)CKSEG1; \
  73. if (R4600_V1_HIT_CACHEOP_WAR) \
  74. __asm__ __volatile__("nop;nop;nop;nop"); \
  75. } while (0)
  76. static void (*r4k_blast_dcache_page)(unsigned long addr);
  77. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  78. {
  79. R4600_HIT_CACHEOP_WAR_IMPL;
  80. blast_dcache32_page(addr);
  81. }
  82. static void __init r4k_blast_dcache_page_setup(void)
  83. {
  84. unsigned long dc_lsize = cpu_dcache_line_size();
  85. if (dc_lsize == 0)
  86. r4k_blast_dcache_page = (void *)cache_noop;
  87. else if (dc_lsize == 16)
  88. r4k_blast_dcache_page = blast_dcache16_page;
  89. else if (dc_lsize == 32)
  90. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  91. }
  92. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  93. static void __init r4k_blast_dcache_page_indexed_setup(void)
  94. {
  95. unsigned long dc_lsize = cpu_dcache_line_size();
  96. if (dc_lsize == 0)
  97. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  98. else if (dc_lsize == 16)
  99. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  100. else if (dc_lsize == 32)
  101. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  102. }
  103. static void (* r4k_blast_dcache)(void);
  104. static void __init r4k_blast_dcache_setup(void)
  105. {
  106. unsigned long dc_lsize = cpu_dcache_line_size();
  107. if (dc_lsize == 0)
  108. r4k_blast_dcache = (void *)cache_noop;
  109. else if (dc_lsize == 16)
  110. r4k_blast_dcache = blast_dcache16;
  111. else if (dc_lsize == 32)
  112. r4k_blast_dcache = blast_dcache32;
  113. }
  114. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  115. #define JUMP_TO_ALIGN(order) \
  116. __asm__ __volatile__( \
  117. "b\t1f\n\t" \
  118. ".align\t" #order "\n\t" \
  119. "1:\n\t" \
  120. )
  121. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  122. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  123. static inline void blast_r4600_v1_icache32(void)
  124. {
  125. unsigned long flags;
  126. local_irq_save(flags);
  127. blast_icache32();
  128. local_irq_restore(flags);
  129. }
  130. static inline void tx49_blast_icache32(void)
  131. {
  132. unsigned long start = INDEX_BASE;
  133. unsigned long end = start + current_cpu_data.icache.waysize;
  134. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  135. unsigned long ws_end = current_cpu_data.icache.ways <<
  136. current_cpu_data.icache.waybit;
  137. unsigned long ws, addr;
  138. CACHE32_UNROLL32_ALIGN2;
  139. /* I'm in even chunk. blast odd chunks */
  140. for (ws = 0; ws < ws_end; ws += ws_inc)
  141. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  142. cache32_unroll32(addr|ws, Index_Invalidate_I);
  143. CACHE32_UNROLL32_ALIGN;
  144. /* I'm in odd chunk. blast even chunks */
  145. for (ws = 0; ws < ws_end; ws += ws_inc)
  146. for (addr = start; addr < end; addr += 0x400 * 2)
  147. cache32_unroll32(addr|ws, Index_Invalidate_I);
  148. }
  149. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  150. {
  151. unsigned long flags;
  152. local_irq_save(flags);
  153. blast_icache32_page_indexed(page);
  154. local_irq_restore(flags);
  155. }
  156. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  157. {
  158. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  159. unsigned long start = INDEX_BASE + (page & indexmask);
  160. unsigned long end = start + PAGE_SIZE;
  161. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  162. unsigned long ws_end = current_cpu_data.icache.ways <<
  163. current_cpu_data.icache.waybit;
  164. unsigned long ws, addr;
  165. CACHE32_UNROLL32_ALIGN2;
  166. /* I'm in even chunk. blast odd chunks */
  167. for (ws = 0; ws < ws_end; ws += ws_inc)
  168. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  169. cache32_unroll32(addr|ws, Index_Invalidate_I);
  170. CACHE32_UNROLL32_ALIGN;
  171. /* I'm in odd chunk. blast even chunks */
  172. for (ws = 0; ws < ws_end; ws += ws_inc)
  173. for (addr = start; addr < end; addr += 0x400 * 2)
  174. cache32_unroll32(addr|ws, Index_Invalidate_I);
  175. }
  176. static void (* r4k_blast_icache_page)(unsigned long addr);
  177. static void __init r4k_blast_icache_page_setup(void)
  178. {
  179. unsigned long ic_lsize = cpu_icache_line_size();
  180. if (ic_lsize == 0)
  181. r4k_blast_icache_page = (void *)cache_noop;
  182. else if (ic_lsize == 16)
  183. r4k_blast_icache_page = blast_icache16_page;
  184. else if (ic_lsize == 32)
  185. r4k_blast_icache_page = blast_icache32_page;
  186. else if (ic_lsize == 64)
  187. r4k_blast_icache_page = blast_icache64_page;
  188. }
  189. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  190. static void __init r4k_blast_icache_page_indexed_setup(void)
  191. {
  192. unsigned long ic_lsize = cpu_icache_line_size();
  193. if (ic_lsize == 0)
  194. r4k_blast_icache_page_indexed = (void *)cache_noop;
  195. else if (ic_lsize == 16)
  196. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  197. else if (ic_lsize == 32) {
  198. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  199. r4k_blast_icache_page_indexed =
  200. blast_icache32_r4600_v1_page_indexed;
  201. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  202. r4k_blast_icache_page_indexed =
  203. tx49_blast_icache32_page_indexed;
  204. else
  205. r4k_blast_icache_page_indexed =
  206. blast_icache32_page_indexed;
  207. } else if (ic_lsize == 64)
  208. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  209. }
  210. static void (* r4k_blast_icache)(void);
  211. static void __init r4k_blast_icache_setup(void)
  212. {
  213. unsigned long ic_lsize = cpu_icache_line_size();
  214. if (ic_lsize == 0)
  215. r4k_blast_icache = (void *)cache_noop;
  216. else if (ic_lsize == 16)
  217. r4k_blast_icache = blast_icache16;
  218. else if (ic_lsize == 32) {
  219. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  220. r4k_blast_icache = blast_r4600_v1_icache32;
  221. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  222. r4k_blast_icache = tx49_blast_icache32;
  223. else
  224. r4k_blast_icache = blast_icache32;
  225. } else if (ic_lsize == 64)
  226. r4k_blast_icache = blast_icache64;
  227. }
  228. static void (* r4k_blast_scache_page)(unsigned long addr);
  229. static void __init r4k_blast_scache_page_setup(void)
  230. {
  231. unsigned long sc_lsize = cpu_scache_line_size();
  232. if (scache_size == 0)
  233. r4k_blast_scache_page = (void *)cache_noop;
  234. else if (sc_lsize == 16)
  235. r4k_blast_scache_page = blast_scache16_page;
  236. else if (sc_lsize == 32)
  237. r4k_blast_scache_page = blast_scache32_page;
  238. else if (sc_lsize == 64)
  239. r4k_blast_scache_page = blast_scache64_page;
  240. else if (sc_lsize == 128)
  241. r4k_blast_scache_page = blast_scache128_page;
  242. }
  243. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  244. static void __init r4k_blast_scache_page_indexed_setup(void)
  245. {
  246. unsigned long sc_lsize = cpu_scache_line_size();
  247. if (scache_size == 0)
  248. r4k_blast_scache_page_indexed = (void *)cache_noop;
  249. else if (sc_lsize == 16)
  250. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  251. else if (sc_lsize == 32)
  252. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  253. else if (sc_lsize == 64)
  254. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  255. else if (sc_lsize == 128)
  256. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  257. }
  258. static void (* r4k_blast_scache)(void);
  259. static void __init r4k_blast_scache_setup(void)
  260. {
  261. unsigned long sc_lsize = cpu_scache_line_size();
  262. if (scache_size == 0)
  263. r4k_blast_scache = (void *)cache_noop;
  264. else if (sc_lsize == 16)
  265. r4k_blast_scache = blast_scache16;
  266. else if (sc_lsize == 32)
  267. r4k_blast_scache = blast_scache32;
  268. else if (sc_lsize == 64)
  269. r4k_blast_scache = blast_scache64;
  270. else if (sc_lsize == 128)
  271. r4k_blast_scache = blast_scache128;
  272. }
  273. static inline void local_r4k___flush_cache_all(void * args)
  274. {
  275. #if defined(CONFIG_CPU_LOONGSON2)
  276. r4k_blast_scache();
  277. return;
  278. #endif
  279. r4k_blast_dcache();
  280. r4k_blast_icache();
  281. switch (current_cpu_type()) {
  282. case CPU_R4000SC:
  283. case CPU_R4000MC:
  284. case CPU_R4400SC:
  285. case CPU_R4400MC:
  286. case CPU_R10000:
  287. case CPU_R12000:
  288. case CPU_R14000:
  289. r4k_blast_scache();
  290. }
  291. }
  292. static void r4k___flush_cache_all(void)
  293. {
  294. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  295. }
  296. static inline int has_valid_asid(const struct mm_struct *mm)
  297. {
  298. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  299. int i;
  300. for_each_online_cpu(i)
  301. if (cpu_context(i, mm))
  302. return 1;
  303. return 0;
  304. #else
  305. return cpu_context(smp_processor_id(), mm);
  306. #endif
  307. }
  308. static inline void local_r4k_flush_cache_range(void * args)
  309. {
  310. struct vm_area_struct *vma = args;
  311. if (!(has_valid_asid(vma->vm_mm)))
  312. return;
  313. r4k_blast_dcache();
  314. }
  315. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  316. unsigned long start, unsigned long end)
  317. {
  318. if (!cpu_has_dc_aliases)
  319. return;
  320. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  321. }
  322. static inline void local_r4k_flush_cache_mm(void * args)
  323. {
  324. struct mm_struct *mm = args;
  325. if (!has_valid_asid(mm))
  326. return;
  327. /*
  328. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  329. * only flush the primary caches but R10000 and R12000 behave sane ...
  330. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  331. * caches, so we can bail out early.
  332. */
  333. if (current_cpu_type() == CPU_R4000SC ||
  334. current_cpu_type() == CPU_R4000MC ||
  335. current_cpu_type() == CPU_R4400SC ||
  336. current_cpu_type() == CPU_R4400MC) {
  337. r4k_blast_scache();
  338. return;
  339. }
  340. r4k_blast_dcache();
  341. }
  342. static void r4k_flush_cache_mm(struct mm_struct *mm)
  343. {
  344. if (!cpu_has_dc_aliases)
  345. return;
  346. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  347. }
  348. struct flush_cache_page_args {
  349. struct vm_area_struct *vma;
  350. unsigned long addr;
  351. unsigned long pfn;
  352. };
  353. static inline void local_r4k_flush_cache_page(void *args)
  354. {
  355. struct flush_cache_page_args *fcp_args = args;
  356. struct vm_area_struct *vma = fcp_args->vma;
  357. unsigned long addr = fcp_args->addr;
  358. struct page *page = pfn_to_page(fcp_args->pfn);
  359. int exec = vma->vm_flags & VM_EXEC;
  360. struct mm_struct *mm = vma->vm_mm;
  361. pgd_t *pgdp;
  362. pud_t *pudp;
  363. pmd_t *pmdp;
  364. pte_t *ptep;
  365. void *vaddr;
  366. /*
  367. * If ownes no valid ASID yet, cannot possibly have gotten
  368. * this page into the cache.
  369. */
  370. if (!has_valid_asid(mm))
  371. return;
  372. addr &= PAGE_MASK;
  373. pgdp = pgd_offset(mm, addr);
  374. pudp = pud_offset(pgdp, addr);
  375. pmdp = pmd_offset(pudp, addr);
  376. ptep = pte_offset(pmdp, addr);
  377. /*
  378. * If the page isn't marked valid, the page cannot possibly be
  379. * in the cache.
  380. */
  381. if (!(pte_val(*ptep) & _PAGE_PRESENT))
  382. return;
  383. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  384. vaddr = NULL;
  385. else {
  386. /*
  387. * Use kmap_coherent or kmap_atomic to do flushes for
  388. * another ASID than the current one.
  389. */
  390. if (cpu_has_dc_aliases)
  391. vaddr = kmap_coherent(page, addr);
  392. else
  393. vaddr = kmap_atomic(page, KM_USER0);
  394. addr = (unsigned long)vaddr;
  395. }
  396. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  397. r4k_blast_dcache_page(addr);
  398. if (exec && !cpu_icache_snoops_remote_store)
  399. r4k_blast_scache_page(addr);
  400. }
  401. if (exec) {
  402. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  403. int cpu = smp_processor_id();
  404. if (cpu_context(cpu, mm) != 0)
  405. drop_mmu_context(mm, cpu);
  406. } else
  407. r4k_blast_icache_page(addr);
  408. }
  409. if (vaddr) {
  410. if (cpu_has_dc_aliases)
  411. kunmap_coherent();
  412. else
  413. kunmap_atomic(vaddr, KM_USER0);
  414. }
  415. }
  416. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  417. unsigned long addr, unsigned long pfn)
  418. {
  419. struct flush_cache_page_args args;
  420. args.vma = vma;
  421. args.addr = addr;
  422. args.pfn = pfn;
  423. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  424. }
  425. static inline void local_r4k_flush_data_cache_page(void * addr)
  426. {
  427. r4k_blast_dcache_page((unsigned long) addr);
  428. }
  429. static void r4k_flush_data_cache_page(unsigned long addr)
  430. {
  431. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
  432. }
  433. struct flush_icache_range_args {
  434. unsigned long start;
  435. unsigned long end;
  436. };
  437. static inline void local_r4k_flush_icache_range(void *args)
  438. {
  439. struct flush_icache_range_args *fir_args = args;
  440. unsigned long start = fir_args->start;
  441. unsigned long end = fir_args->end;
  442. if (!cpu_has_ic_fills_f_dc) {
  443. if (end - start >= dcache_size) {
  444. r4k_blast_dcache();
  445. } else {
  446. R4600_HIT_CACHEOP_WAR_IMPL;
  447. protected_blast_dcache_range(start, end);
  448. }
  449. if (!cpu_icache_snoops_remote_store && scache_size) {
  450. if (end - start > scache_size)
  451. r4k_blast_scache();
  452. else
  453. protected_blast_scache_range(start, end);
  454. }
  455. }
  456. if (end - start > icache_size)
  457. r4k_blast_icache();
  458. else
  459. protected_blast_icache_range(start, end);
  460. }
  461. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  462. {
  463. struct flush_icache_range_args args;
  464. args.start = start;
  465. args.end = end;
  466. r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  467. instruction_hazard();
  468. }
  469. #ifdef CONFIG_DMA_NONCOHERENT
  470. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  471. {
  472. /* Catch bad driver code */
  473. BUG_ON(size == 0);
  474. if (cpu_has_inclusive_pcaches) {
  475. if (size >= scache_size)
  476. r4k_blast_scache();
  477. else
  478. blast_scache_range(addr, addr + size);
  479. return;
  480. }
  481. /*
  482. * Either no secondary cache or the available caches don't have the
  483. * subset property so we have to flush the primary caches
  484. * explicitly
  485. */
  486. if (size >= dcache_size) {
  487. r4k_blast_dcache();
  488. } else {
  489. R4600_HIT_CACHEOP_WAR_IMPL;
  490. blast_dcache_range(addr, addr + size);
  491. }
  492. bc_wback_inv(addr, size);
  493. }
  494. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  495. {
  496. /* Catch bad driver code */
  497. BUG_ON(size == 0);
  498. if (cpu_has_inclusive_pcaches) {
  499. if (size >= scache_size)
  500. r4k_blast_scache();
  501. else
  502. blast_scache_range(addr, addr + size);
  503. return;
  504. }
  505. if (size >= dcache_size) {
  506. r4k_blast_dcache();
  507. } else {
  508. R4600_HIT_CACHEOP_WAR_IMPL;
  509. blast_dcache_range(addr, addr + size);
  510. }
  511. bc_inv(addr, size);
  512. }
  513. #endif /* CONFIG_DMA_NONCOHERENT */
  514. /*
  515. * While we're protected against bad userland addresses we don't care
  516. * very much about what happens in that case. Usually a segmentation
  517. * fault will dump the process later on anyway ...
  518. */
  519. static void local_r4k_flush_cache_sigtramp(void * arg)
  520. {
  521. unsigned long ic_lsize = cpu_icache_line_size();
  522. unsigned long dc_lsize = cpu_dcache_line_size();
  523. unsigned long sc_lsize = cpu_scache_line_size();
  524. unsigned long addr = (unsigned long) arg;
  525. R4600_HIT_CACHEOP_WAR_IMPL;
  526. if (dc_lsize)
  527. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  528. if (!cpu_icache_snoops_remote_store && scache_size)
  529. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  530. if (ic_lsize)
  531. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  532. if (MIPS4K_ICACHE_REFILL_WAR) {
  533. __asm__ __volatile__ (
  534. ".set push\n\t"
  535. ".set noat\n\t"
  536. ".set mips3\n\t"
  537. #ifdef CONFIG_32BIT
  538. "la $at,1f\n\t"
  539. #endif
  540. #ifdef CONFIG_64BIT
  541. "dla $at,1f\n\t"
  542. #endif
  543. "cache %0,($at)\n\t"
  544. "nop; nop; nop\n"
  545. "1:\n\t"
  546. ".set pop"
  547. :
  548. : "i" (Hit_Invalidate_I));
  549. }
  550. if (MIPS_CACHE_SYNC_WAR)
  551. __asm__ __volatile__ ("sync");
  552. }
  553. static void r4k_flush_cache_sigtramp(unsigned long addr)
  554. {
  555. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  556. }
  557. static void r4k_flush_icache_all(void)
  558. {
  559. if (cpu_has_vtag_icache)
  560. r4k_blast_icache();
  561. }
  562. static inline void rm7k_erratum31(void)
  563. {
  564. const unsigned long ic_lsize = 32;
  565. unsigned long addr;
  566. /* RM7000 erratum #31. The icache is screwed at startup. */
  567. write_c0_taglo(0);
  568. write_c0_taghi(0);
  569. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  570. __asm__ __volatile__ (
  571. ".set push\n\t"
  572. ".set noreorder\n\t"
  573. ".set mips3\n\t"
  574. "cache\t%1, 0(%0)\n\t"
  575. "cache\t%1, 0x1000(%0)\n\t"
  576. "cache\t%1, 0x2000(%0)\n\t"
  577. "cache\t%1, 0x3000(%0)\n\t"
  578. "cache\t%2, 0(%0)\n\t"
  579. "cache\t%2, 0x1000(%0)\n\t"
  580. "cache\t%2, 0x2000(%0)\n\t"
  581. "cache\t%2, 0x3000(%0)\n\t"
  582. "cache\t%1, 0(%0)\n\t"
  583. "cache\t%1, 0x1000(%0)\n\t"
  584. "cache\t%1, 0x2000(%0)\n\t"
  585. "cache\t%1, 0x3000(%0)\n\t"
  586. ".set pop\n"
  587. :
  588. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  589. }
  590. }
  591. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  592. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  593. };
  594. static void __init probe_pcache(void)
  595. {
  596. struct cpuinfo_mips *c = &current_cpu_data;
  597. unsigned int config = read_c0_config();
  598. unsigned int prid = read_c0_prid();
  599. unsigned long config1;
  600. unsigned int lsize;
  601. switch (c->cputype) {
  602. case CPU_R4600: /* QED style two way caches? */
  603. case CPU_R4700:
  604. case CPU_R5000:
  605. case CPU_NEVADA:
  606. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  607. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  608. c->icache.ways = 2;
  609. c->icache.waybit = __ffs(icache_size/2);
  610. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  611. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  612. c->dcache.ways = 2;
  613. c->dcache.waybit= __ffs(dcache_size/2);
  614. c->options |= MIPS_CPU_CACHE_CDEX_P;
  615. break;
  616. case CPU_R5432:
  617. case CPU_R5500:
  618. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  619. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  620. c->icache.ways = 2;
  621. c->icache.waybit= 0;
  622. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  623. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  624. c->dcache.ways = 2;
  625. c->dcache.waybit = 0;
  626. c->options |= MIPS_CPU_CACHE_CDEX_P;
  627. break;
  628. case CPU_TX49XX:
  629. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  630. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  631. c->icache.ways = 4;
  632. c->icache.waybit= 0;
  633. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  634. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  635. c->dcache.ways = 4;
  636. c->dcache.waybit = 0;
  637. c->options |= MIPS_CPU_CACHE_CDEX_P;
  638. c->options |= MIPS_CPU_PREFETCH;
  639. break;
  640. case CPU_R4000PC:
  641. case CPU_R4000SC:
  642. case CPU_R4000MC:
  643. case CPU_R4400PC:
  644. case CPU_R4400SC:
  645. case CPU_R4400MC:
  646. case CPU_R4300:
  647. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  648. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  649. c->icache.ways = 1;
  650. c->icache.waybit = 0; /* doesn't matter */
  651. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  652. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  653. c->dcache.ways = 1;
  654. c->dcache.waybit = 0; /* does not matter */
  655. c->options |= MIPS_CPU_CACHE_CDEX_P;
  656. break;
  657. case CPU_R10000:
  658. case CPU_R12000:
  659. case CPU_R14000:
  660. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  661. c->icache.linesz = 64;
  662. c->icache.ways = 2;
  663. c->icache.waybit = 0;
  664. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  665. c->dcache.linesz = 32;
  666. c->dcache.ways = 2;
  667. c->dcache.waybit = 0;
  668. c->options |= MIPS_CPU_PREFETCH;
  669. break;
  670. case CPU_VR4133:
  671. write_c0_config(config & ~VR41_CONF_P4K);
  672. case CPU_VR4131:
  673. /* Workaround for cache instruction bug of VR4131 */
  674. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  675. c->processor_id == 0x0c82U) {
  676. config |= 0x00400000U;
  677. if (c->processor_id == 0x0c80U)
  678. config |= VR41_CONF_BP;
  679. write_c0_config(config);
  680. } else
  681. c->options |= MIPS_CPU_CACHE_CDEX_P;
  682. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  683. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  684. c->icache.ways = 2;
  685. c->icache.waybit = __ffs(icache_size/2);
  686. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  687. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  688. c->dcache.ways = 2;
  689. c->dcache.waybit = __ffs(dcache_size/2);
  690. break;
  691. case CPU_VR41XX:
  692. case CPU_VR4111:
  693. case CPU_VR4121:
  694. case CPU_VR4122:
  695. case CPU_VR4181:
  696. case CPU_VR4181A:
  697. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  698. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  699. c->icache.ways = 1;
  700. c->icache.waybit = 0; /* doesn't matter */
  701. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  702. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  703. c->dcache.ways = 1;
  704. c->dcache.waybit = 0; /* does not matter */
  705. c->options |= MIPS_CPU_CACHE_CDEX_P;
  706. break;
  707. case CPU_RM7000:
  708. rm7k_erratum31();
  709. case CPU_RM9000:
  710. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  711. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  712. c->icache.ways = 4;
  713. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  714. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  715. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  716. c->dcache.ways = 4;
  717. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  718. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  719. c->options |= MIPS_CPU_CACHE_CDEX_P;
  720. #endif
  721. c->options |= MIPS_CPU_PREFETCH;
  722. break;
  723. case CPU_LOONGSON2:
  724. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  725. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  726. if (prid & 0x3)
  727. c->icache.ways = 4;
  728. else
  729. c->icache.ways = 2;
  730. c->icache.waybit = 0;
  731. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  732. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  733. if (prid & 0x3)
  734. c->dcache.ways = 4;
  735. else
  736. c->dcache.ways = 2;
  737. c->dcache.waybit = 0;
  738. break;
  739. default:
  740. if (!(config & MIPS_CONF_M))
  741. panic("Don't know how to probe P-caches on this cpu.");
  742. /*
  743. * So we seem to be a MIPS32 or MIPS64 CPU
  744. * So let's probe the I-cache ...
  745. */
  746. config1 = read_c0_config1();
  747. if ((lsize = ((config1 >> 19) & 7)))
  748. c->icache.linesz = 2 << lsize;
  749. else
  750. c->icache.linesz = lsize;
  751. c->icache.sets = 64 << ((config1 >> 22) & 7);
  752. c->icache.ways = 1 + ((config1 >> 16) & 7);
  753. icache_size = c->icache.sets *
  754. c->icache.ways *
  755. c->icache.linesz;
  756. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  757. if (config & 0x8) /* VI bit */
  758. c->icache.flags |= MIPS_CACHE_VTAG;
  759. /*
  760. * Now probe the MIPS32 / MIPS64 data cache.
  761. */
  762. c->dcache.flags = 0;
  763. if ((lsize = ((config1 >> 10) & 7)))
  764. c->dcache.linesz = 2 << lsize;
  765. else
  766. c->dcache.linesz= lsize;
  767. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  768. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  769. dcache_size = c->dcache.sets *
  770. c->dcache.ways *
  771. c->dcache.linesz;
  772. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  773. c->options |= MIPS_CPU_PREFETCH;
  774. break;
  775. }
  776. /*
  777. * Processor configuration sanity check for the R4000SC erratum
  778. * #5. With page sizes larger than 32kB there is no possibility
  779. * to get a VCE exception anymore so we don't care about this
  780. * misconfiguration. The case is rather theoretical anyway;
  781. * presumably no vendor is shipping his hardware in the "bad"
  782. * configuration.
  783. */
  784. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  785. !(config & CONF_SC) && c->icache.linesz != 16 &&
  786. PAGE_SIZE <= 0x8000)
  787. panic("Improper R4000SC processor configuration detected");
  788. /* compute a couple of other cache variables */
  789. c->icache.waysize = icache_size / c->icache.ways;
  790. c->dcache.waysize = dcache_size / c->dcache.ways;
  791. c->icache.sets = c->icache.linesz ?
  792. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  793. c->dcache.sets = c->dcache.linesz ?
  794. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  795. /*
  796. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  797. * 2-way virtually indexed so normally would suffer from aliases. So
  798. * normally they'd suffer from aliases but magic in the hardware deals
  799. * with that for us so we don't need to take care ourselves.
  800. */
  801. switch (c->cputype) {
  802. case CPU_20KC:
  803. case CPU_25KF:
  804. case CPU_SB1:
  805. case CPU_SB1A:
  806. c->dcache.flags |= MIPS_CACHE_PINDEX;
  807. break;
  808. case CPU_R10000:
  809. case CPU_R12000:
  810. case CPU_R14000:
  811. break;
  812. case CPU_24K:
  813. case CPU_34K:
  814. case CPU_74K:
  815. if ((read_c0_config7() & (1 << 16))) {
  816. /* effectively physically indexed dcache,
  817. thus no virtual aliases. */
  818. c->dcache.flags |= MIPS_CACHE_PINDEX;
  819. break;
  820. }
  821. default:
  822. if (c->dcache.waysize > PAGE_SIZE)
  823. c->dcache.flags |= MIPS_CACHE_ALIASES;
  824. }
  825. switch (c->cputype) {
  826. case CPU_20KC:
  827. /*
  828. * Some older 20Kc chips doesn't have the 'VI' bit in
  829. * the config register.
  830. */
  831. c->icache.flags |= MIPS_CACHE_VTAG;
  832. break;
  833. case CPU_AU1000:
  834. case CPU_AU1500:
  835. case CPU_AU1100:
  836. case CPU_AU1550:
  837. case CPU_AU1200:
  838. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  839. break;
  840. }
  841. #ifdef CONFIG_CPU_LOONGSON2
  842. /*
  843. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  844. * one op will act on all 4 ways
  845. */
  846. c->icache.ways = 1;
  847. #endif
  848. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  849. icache_size >> 10,
  850. cpu_has_vtag_icache ? "VIVT" : "VIPT",
  851. way_string[c->icache.ways], c->icache.linesz);
  852. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  853. dcache_size >> 10, way_string[c->dcache.ways],
  854. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  855. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  856. "cache aliases" : "no aliases",
  857. c->dcache.linesz);
  858. }
  859. /*
  860. * If you even _breathe_ on this function, look at the gcc output and make sure
  861. * it does not pop things on and off the stack for the cache sizing loop that
  862. * executes in KSEG1 space or else you will crash and burn badly. You have
  863. * been warned.
  864. */
  865. static int __init probe_scache(void)
  866. {
  867. unsigned long flags, addr, begin, end, pow2;
  868. unsigned int config = read_c0_config();
  869. struct cpuinfo_mips *c = &current_cpu_data;
  870. int tmp;
  871. if (config & CONF_SC)
  872. return 0;
  873. begin = (unsigned long) &_stext;
  874. begin &= ~((4 * 1024 * 1024) - 1);
  875. end = begin + (4 * 1024 * 1024);
  876. /*
  877. * This is such a bitch, you'd think they would make it easy to do
  878. * this. Away you daemons of stupidity!
  879. */
  880. local_irq_save(flags);
  881. /* Fill each size-multiple cache line with a valid tag. */
  882. pow2 = (64 * 1024);
  883. for (addr = begin; addr < end; addr = (begin + pow2)) {
  884. unsigned long *p = (unsigned long *) addr;
  885. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  886. pow2 <<= 1;
  887. }
  888. /* Load first line with zero (therefore invalid) tag. */
  889. write_c0_taglo(0);
  890. write_c0_taghi(0);
  891. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  892. cache_op(Index_Store_Tag_I, begin);
  893. cache_op(Index_Store_Tag_D, begin);
  894. cache_op(Index_Store_Tag_SD, begin);
  895. /* Now search for the wrap around point. */
  896. pow2 = (128 * 1024);
  897. tmp = 0;
  898. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  899. cache_op(Index_Load_Tag_SD, addr);
  900. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  901. if (!read_c0_taglo())
  902. break;
  903. pow2 <<= 1;
  904. }
  905. local_irq_restore(flags);
  906. addr -= begin;
  907. scache_size = addr;
  908. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  909. c->scache.ways = 1;
  910. c->dcache.waybit = 0; /* does not matter */
  911. return 1;
  912. }
  913. #if defined(CONFIG_CPU_LOONGSON2)
  914. static void __init loongson2_sc_init(void)
  915. {
  916. struct cpuinfo_mips *c = &current_cpu_data;
  917. scache_size = 512*1024;
  918. c->scache.linesz = 32;
  919. c->scache.ways = 4;
  920. c->scache.waybit = 0;
  921. c->scache.waysize = scache_size / (c->scache.ways);
  922. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  923. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  924. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  925. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  926. }
  927. #endif
  928. extern int r5k_sc_init(void);
  929. extern int rm7k_sc_init(void);
  930. extern int mips_sc_init(void);
  931. static void __init setup_scache(void)
  932. {
  933. struct cpuinfo_mips *c = &current_cpu_data;
  934. unsigned int config = read_c0_config();
  935. int sc_present = 0;
  936. /*
  937. * Do the probing thing on R4000SC and R4400SC processors. Other
  938. * processors don't have a S-cache that would be relevant to the
  939. * Linux memory managment.
  940. */
  941. switch (c->cputype) {
  942. case CPU_R4000SC:
  943. case CPU_R4000MC:
  944. case CPU_R4400SC:
  945. case CPU_R4400MC:
  946. sc_present = run_uncached(probe_scache);
  947. if (sc_present)
  948. c->options |= MIPS_CPU_CACHE_CDEX_S;
  949. break;
  950. case CPU_R10000:
  951. case CPU_R12000:
  952. case CPU_R14000:
  953. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  954. c->scache.linesz = 64 << ((config >> 13) & 1);
  955. c->scache.ways = 2;
  956. c->scache.waybit= 0;
  957. sc_present = 1;
  958. break;
  959. case CPU_R5000:
  960. case CPU_NEVADA:
  961. #ifdef CONFIG_R5000_CPU_SCACHE
  962. r5k_sc_init();
  963. #endif
  964. return;
  965. case CPU_RM7000:
  966. case CPU_RM9000:
  967. #ifdef CONFIG_RM7000_CPU_SCACHE
  968. rm7k_sc_init();
  969. #endif
  970. return;
  971. #if defined(CONFIG_CPU_LOONGSON2)
  972. case CPU_LOONGSON2:
  973. loongson2_sc_init();
  974. return;
  975. #endif
  976. default:
  977. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  978. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  979. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  980. c->isa_level == MIPS_CPU_ISA_M64R2) {
  981. #ifdef CONFIG_MIPS_CPU_SCACHE
  982. if (mips_sc_init ()) {
  983. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  984. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  985. scache_size >> 10,
  986. way_string[c->scache.ways], c->scache.linesz);
  987. }
  988. #else
  989. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  990. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  991. #endif
  992. return;
  993. }
  994. sc_present = 0;
  995. }
  996. if (!sc_present)
  997. return;
  998. /* compute a couple of other cache variables */
  999. c->scache.waysize = scache_size / c->scache.ways;
  1000. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1001. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1002. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1003. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1004. }
  1005. void au1x00_fixup_config_od(void)
  1006. {
  1007. /*
  1008. * c0_config.od (bit 19) was write only (and read as 0)
  1009. * on the early revisions of Alchemy SOCs. It disables the bus
  1010. * transaction overlapping and needs to be set to fix various errata.
  1011. */
  1012. switch (read_c0_prid()) {
  1013. case 0x00030100: /* Au1000 DA */
  1014. case 0x00030201: /* Au1000 HA */
  1015. case 0x00030202: /* Au1000 HB */
  1016. case 0x01030200: /* Au1500 AB */
  1017. /*
  1018. * Au1100 errata actually keeps silence about this bit, so we set it
  1019. * just in case for those revisions that require it to be set according
  1020. * to arch/mips/au1000/common/cputable.c
  1021. */
  1022. case 0x02030200: /* Au1100 AB */
  1023. case 0x02030201: /* Au1100 BA */
  1024. case 0x02030202: /* Au1100 BC */
  1025. set_c0_config(1 << 19);
  1026. break;
  1027. }
  1028. }
  1029. static void __init coherency_setup(void)
  1030. {
  1031. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  1032. /*
  1033. * c0_status.cu=0 specifies that updates by the sc instruction use
  1034. * the coherency mode specified by the TLB; 1 means cachable
  1035. * coherent update on write will be used. Not all processors have
  1036. * this bit and; some wire it to zero, others like Toshiba had the
  1037. * silly idea of putting something else there ...
  1038. */
  1039. switch (current_cpu_type()) {
  1040. case CPU_R4000PC:
  1041. case CPU_R4000SC:
  1042. case CPU_R4000MC:
  1043. case CPU_R4400PC:
  1044. case CPU_R4400SC:
  1045. case CPU_R4400MC:
  1046. clear_c0_config(CONF_CU);
  1047. break;
  1048. /*
  1049. * We need to catch the early Alchemy SOCs with
  1050. * the write-only co_config.od bit and set it back to one...
  1051. */
  1052. case CPU_AU1000: /* rev. DA, HA, HB */
  1053. case CPU_AU1100: /* rev. AB, BA, BC ?? */
  1054. case CPU_AU1500: /* rev. AB */
  1055. au1x00_fixup_config_od();
  1056. break;
  1057. }
  1058. }
  1059. void __init r4k_cache_init(void)
  1060. {
  1061. extern void build_clear_page(void);
  1062. extern void build_copy_page(void);
  1063. extern char __weak except_vec2_generic;
  1064. extern char __weak except_vec2_sb1;
  1065. struct cpuinfo_mips *c = &current_cpu_data;
  1066. switch (c->cputype) {
  1067. case CPU_SB1:
  1068. case CPU_SB1A:
  1069. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1070. break;
  1071. default:
  1072. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1073. break;
  1074. }
  1075. probe_pcache();
  1076. setup_scache();
  1077. r4k_blast_dcache_page_setup();
  1078. r4k_blast_dcache_page_indexed_setup();
  1079. r4k_blast_dcache_setup();
  1080. r4k_blast_icache_page_setup();
  1081. r4k_blast_icache_page_indexed_setup();
  1082. r4k_blast_icache_setup();
  1083. r4k_blast_scache_page_setup();
  1084. r4k_blast_scache_page_indexed_setup();
  1085. r4k_blast_scache_setup();
  1086. /*
  1087. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1088. * This code supports virtually indexed processors and will be
  1089. * unnecessarily inefficient on physically indexed processors.
  1090. */
  1091. if (c->dcache.linesz)
  1092. shm_align_mask = max_t( unsigned long,
  1093. c->dcache.sets * c->dcache.linesz - 1,
  1094. PAGE_SIZE - 1);
  1095. else
  1096. shm_align_mask = PAGE_SIZE-1;
  1097. flush_cache_all = cache_noop;
  1098. __flush_cache_all = r4k___flush_cache_all;
  1099. flush_cache_mm = r4k_flush_cache_mm;
  1100. flush_cache_page = r4k_flush_cache_page;
  1101. flush_cache_range = r4k_flush_cache_range;
  1102. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1103. flush_icache_all = r4k_flush_icache_all;
  1104. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1105. flush_data_cache_page = r4k_flush_data_cache_page;
  1106. flush_icache_range = r4k_flush_icache_range;
  1107. #ifdef CONFIG_DMA_NONCOHERENT
  1108. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1109. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1110. _dma_cache_inv = r4k_dma_cache_inv;
  1111. #endif
  1112. build_clear_page();
  1113. build_copy_page();
  1114. local_r4k___flush_cache_all(NULL);
  1115. coherency_setup();
  1116. }