c-r4k.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/hardirq.h>
  11. #include <linux/init.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/sched.h>
  16. #include <linux/mm.h>
  17. #include <linux/bitops.h>
  18. #include <asm/bcache.h>
  19. #include <asm/bootinfo.h>
  20. #include <asm/cache.h>
  21. #include <asm/cacheops.h>
  22. #include <asm/cpu.h>
  23. #include <asm/cpu-features.h>
  24. #include <asm/io.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/r4kcache.h>
  28. #include <asm/sections.h>
  29. #include <asm/system.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/war.h>
  32. #include <asm/cacheflush.h> /* for run_uncached() */
  33. /*
  34. * Special Variant of smp_call_function for use by cache functions:
  35. *
  36. * o No return value
  37. * o collapses to normal function call on UP kernels
  38. * o collapses to normal function call on systems with a single shared
  39. * primary cache.
  40. */
  41. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  42. int retry, int wait)
  43. {
  44. preempt_disable();
  45. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  46. smp_call_function(func, info, retry, wait);
  47. #endif
  48. func(info);
  49. preempt_enable();
  50. }
  51. /*
  52. * Must die.
  53. */
  54. static unsigned long icache_size __read_mostly;
  55. static unsigned long dcache_size __read_mostly;
  56. static unsigned long scache_size __read_mostly;
  57. /*
  58. * Dummy cache handling routines for machines without boardcaches
  59. */
  60. static void cache_noop(void) {}
  61. static struct bcache_ops no_sc_ops = {
  62. .bc_enable = (void *)cache_noop,
  63. .bc_disable = (void *)cache_noop,
  64. .bc_wback_inv = (void *)cache_noop,
  65. .bc_inv = (void *)cache_noop
  66. };
  67. struct bcache_ops *bcops = &no_sc_ops;
  68. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  69. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  70. #define R4600_HIT_CACHEOP_WAR_IMPL \
  71. do { \
  72. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  73. *(volatile unsigned long *)CKSEG1; \
  74. if (R4600_V1_HIT_CACHEOP_WAR) \
  75. __asm__ __volatile__("nop;nop;nop;nop"); \
  76. } while (0)
  77. static void (*r4k_blast_dcache_page)(unsigned long addr);
  78. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  79. {
  80. R4600_HIT_CACHEOP_WAR_IMPL;
  81. blast_dcache32_page(addr);
  82. }
  83. static void __init r4k_blast_dcache_page_setup(void)
  84. {
  85. unsigned long dc_lsize = cpu_dcache_line_size();
  86. if (dc_lsize == 0)
  87. r4k_blast_dcache_page = (void *)cache_noop;
  88. else if (dc_lsize == 16)
  89. r4k_blast_dcache_page = blast_dcache16_page;
  90. else if (dc_lsize == 32)
  91. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  92. }
  93. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  94. static void __init r4k_blast_dcache_page_indexed_setup(void)
  95. {
  96. unsigned long dc_lsize = cpu_dcache_line_size();
  97. if (dc_lsize == 0)
  98. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  99. else if (dc_lsize == 16)
  100. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  101. else if (dc_lsize == 32)
  102. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  103. }
  104. static void (* r4k_blast_dcache)(void);
  105. static void __init r4k_blast_dcache_setup(void)
  106. {
  107. unsigned long dc_lsize = cpu_dcache_line_size();
  108. if (dc_lsize == 0)
  109. r4k_blast_dcache = (void *)cache_noop;
  110. else if (dc_lsize == 16)
  111. r4k_blast_dcache = blast_dcache16;
  112. else if (dc_lsize == 32)
  113. r4k_blast_dcache = blast_dcache32;
  114. }
  115. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  116. #define JUMP_TO_ALIGN(order) \
  117. __asm__ __volatile__( \
  118. "b\t1f\n\t" \
  119. ".align\t" #order "\n\t" \
  120. "1:\n\t" \
  121. )
  122. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  123. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  124. static inline void blast_r4600_v1_icache32(void)
  125. {
  126. unsigned long flags;
  127. local_irq_save(flags);
  128. blast_icache32();
  129. local_irq_restore(flags);
  130. }
  131. static inline void tx49_blast_icache32(void)
  132. {
  133. unsigned long start = INDEX_BASE;
  134. unsigned long end = start + current_cpu_data.icache.waysize;
  135. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  136. unsigned long ws_end = current_cpu_data.icache.ways <<
  137. current_cpu_data.icache.waybit;
  138. unsigned long ws, addr;
  139. CACHE32_UNROLL32_ALIGN2;
  140. /* I'm in even chunk. blast odd chunks */
  141. for (ws = 0; ws < ws_end; ws += ws_inc)
  142. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  143. cache32_unroll32(addr|ws, Index_Invalidate_I);
  144. CACHE32_UNROLL32_ALIGN;
  145. /* I'm in odd chunk. blast even chunks */
  146. for (ws = 0; ws < ws_end; ws += ws_inc)
  147. for (addr = start; addr < end; addr += 0x400 * 2)
  148. cache32_unroll32(addr|ws, Index_Invalidate_I);
  149. }
  150. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  151. {
  152. unsigned long flags;
  153. local_irq_save(flags);
  154. blast_icache32_page_indexed(page);
  155. local_irq_restore(flags);
  156. }
  157. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  158. {
  159. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  160. unsigned long start = INDEX_BASE + (page & indexmask);
  161. unsigned long end = start + PAGE_SIZE;
  162. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  163. unsigned long ws_end = current_cpu_data.icache.ways <<
  164. current_cpu_data.icache.waybit;
  165. unsigned long ws, addr;
  166. CACHE32_UNROLL32_ALIGN2;
  167. /* I'm in even chunk. blast odd chunks */
  168. for (ws = 0; ws < ws_end; ws += ws_inc)
  169. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  170. cache32_unroll32(addr|ws, Index_Invalidate_I);
  171. CACHE32_UNROLL32_ALIGN;
  172. /* I'm in odd chunk. blast even chunks */
  173. for (ws = 0; ws < ws_end; ws += ws_inc)
  174. for (addr = start; addr < end; addr += 0x400 * 2)
  175. cache32_unroll32(addr|ws, Index_Invalidate_I);
  176. }
  177. static void (* r4k_blast_icache_page)(unsigned long addr);
  178. static void __init r4k_blast_icache_page_setup(void)
  179. {
  180. unsigned long ic_lsize = cpu_icache_line_size();
  181. if (ic_lsize == 0)
  182. r4k_blast_icache_page = (void *)cache_noop;
  183. else if (ic_lsize == 16)
  184. r4k_blast_icache_page = blast_icache16_page;
  185. else if (ic_lsize == 32)
  186. r4k_blast_icache_page = blast_icache32_page;
  187. else if (ic_lsize == 64)
  188. r4k_blast_icache_page = blast_icache64_page;
  189. }
  190. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  191. static void __init r4k_blast_icache_page_indexed_setup(void)
  192. {
  193. unsigned long ic_lsize = cpu_icache_line_size();
  194. if (ic_lsize == 0)
  195. r4k_blast_icache_page_indexed = (void *)cache_noop;
  196. else if (ic_lsize == 16)
  197. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  198. else if (ic_lsize == 32) {
  199. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  200. r4k_blast_icache_page_indexed =
  201. blast_icache32_r4600_v1_page_indexed;
  202. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  203. r4k_blast_icache_page_indexed =
  204. tx49_blast_icache32_page_indexed;
  205. else
  206. r4k_blast_icache_page_indexed =
  207. blast_icache32_page_indexed;
  208. } else if (ic_lsize == 64)
  209. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  210. }
  211. static void (* r4k_blast_icache)(void);
  212. static void __init r4k_blast_icache_setup(void)
  213. {
  214. unsigned long ic_lsize = cpu_icache_line_size();
  215. if (ic_lsize == 0)
  216. r4k_blast_icache = (void *)cache_noop;
  217. else if (ic_lsize == 16)
  218. r4k_blast_icache = blast_icache16;
  219. else if (ic_lsize == 32) {
  220. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  221. r4k_blast_icache = blast_r4600_v1_icache32;
  222. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  223. r4k_blast_icache = tx49_blast_icache32;
  224. else
  225. r4k_blast_icache = blast_icache32;
  226. } else if (ic_lsize == 64)
  227. r4k_blast_icache = blast_icache64;
  228. }
  229. static void (* r4k_blast_scache_page)(unsigned long addr);
  230. static void __init r4k_blast_scache_page_setup(void)
  231. {
  232. unsigned long sc_lsize = cpu_scache_line_size();
  233. if (scache_size == 0)
  234. r4k_blast_scache_page = (void *)cache_noop;
  235. else if (sc_lsize == 16)
  236. r4k_blast_scache_page = blast_scache16_page;
  237. else if (sc_lsize == 32)
  238. r4k_blast_scache_page = blast_scache32_page;
  239. else if (sc_lsize == 64)
  240. r4k_blast_scache_page = blast_scache64_page;
  241. else if (sc_lsize == 128)
  242. r4k_blast_scache_page = blast_scache128_page;
  243. }
  244. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  245. static void __init r4k_blast_scache_page_indexed_setup(void)
  246. {
  247. unsigned long sc_lsize = cpu_scache_line_size();
  248. if (scache_size == 0)
  249. r4k_blast_scache_page_indexed = (void *)cache_noop;
  250. else if (sc_lsize == 16)
  251. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  252. else if (sc_lsize == 32)
  253. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  254. else if (sc_lsize == 64)
  255. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  256. else if (sc_lsize == 128)
  257. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  258. }
  259. static void (* r4k_blast_scache)(void);
  260. static void __init r4k_blast_scache_setup(void)
  261. {
  262. unsigned long sc_lsize = cpu_scache_line_size();
  263. if (scache_size == 0)
  264. r4k_blast_scache = (void *)cache_noop;
  265. else if (sc_lsize == 16)
  266. r4k_blast_scache = blast_scache16;
  267. else if (sc_lsize == 32)
  268. r4k_blast_scache = blast_scache32;
  269. else if (sc_lsize == 64)
  270. r4k_blast_scache = blast_scache64;
  271. else if (sc_lsize == 128)
  272. r4k_blast_scache = blast_scache128;
  273. }
  274. static inline void local_r4k___flush_cache_all(void * args)
  275. {
  276. #if defined(CONFIG_CPU_LOONGSON2)
  277. r4k_blast_scache();
  278. return;
  279. #endif
  280. r4k_blast_dcache();
  281. r4k_blast_icache();
  282. switch (current_cpu_type()) {
  283. case CPU_R4000SC:
  284. case CPU_R4000MC:
  285. case CPU_R4400SC:
  286. case CPU_R4400MC:
  287. case CPU_R10000:
  288. case CPU_R12000:
  289. case CPU_R14000:
  290. r4k_blast_scache();
  291. }
  292. }
  293. static void r4k___flush_cache_all(void)
  294. {
  295. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  296. }
  297. static inline int has_valid_asid(const struct mm_struct *mm)
  298. {
  299. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  300. int i;
  301. for_each_online_cpu(i)
  302. if (cpu_context(i, mm))
  303. return 1;
  304. return 0;
  305. #else
  306. return cpu_context(smp_processor_id(), mm);
  307. #endif
  308. }
  309. static inline void local_r4k_flush_cache_range(void * args)
  310. {
  311. struct vm_area_struct *vma = args;
  312. int exec = vma->vm_flags & VM_EXEC;
  313. if (!(has_valid_asid(vma->vm_mm)))
  314. return;
  315. r4k_blast_dcache();
  316. if (exec)
  317. r4k_blast_icache();
  318. }
  319. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  320. unsigned long start, unsigned long end)
  321. {
  322. int exec = vma->vm_flags & VM_EXEC;
  323. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  324. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  325. }
  326. static inline void local_r4k_flush_cache_mm(void * args)
  327. {
  328. struct mm_struct *mm = args;
  329. if (!has_valid_asid(mm))
  330. return;
  331. /*
  332. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  333. * only flush the primary caches but R10000 and R12000 behave sane ...
  334. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  335. * caches, so we can bail out early.
  336. */
  337. if (current_cpu_type() == CPU_R4000SC ||
  338. current_cpu_type() == CPU_R4000MC ||
  339. current_cpu_type() == CPU_R4400SC ||
  340. current_cpu_type() == CPU_R4400MC) {
  341. r4k_blast_scache();
  342. return;
  343. }
  344. r4k_blast_dcache();
  345. }
  346. static void r4k_flush_cache_mm(struct mm_struct *mm)
  347. {
  348. if (!cpu_has_dc_aliases)
  349. return;
  350. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  351. }
  352. struct flush_cache_page_args {
  353. struct vm_area_struct *vma;
  354. unsigned long addr;
  355. unsigned long pfn;
  356. };
  357. static inline void local_r4k_flush_cache_page(void *args)
  358. {
  359. struct flush_cache_page_args *fcp_args = args;
  360. struct vm_area_struct *vma = fcp_args->vma;
  361. unsigned long addr = fcp_args->addr;
  362. struct page *page = pfn_to_page(fcp_args->pfn);
  363. int exec = vma->vm_flags & VM_EXEC;
  364. struct mm_struct *mm = vma->vm_mm;
  365. pgd_t *pgdp;
  366. pud_t *pudp;
  367. pmd_t *pmdp;
  368. pte_t *ptep;
  369. void *vaddr;
  370. /*
  371. * If ownes no valid ASID yet, cannot possibly have gotten
  372. * this page into the cache.
  373. */
  374. if (!has_valid_asid(mm))
  375. return;
  376. addr &= PAGE_MASK;
  377. pgdp = pgd_offset(mm, addr);
  378. pudp = pud_offset(pgdp, addr);
  379. pmdp = pmd_offset(pudp, addr);
  380. ptep = pte_offset(pmdp, addr);
  381. /*
  382. * If the page isn't marked valid, the page cannot possibly be
  383. * in the cache.
  384. */
  385. if (!(pte_present(*ptep)))
  386. return;
  387. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  388. vaddr = NULL;
  389. else {
  390. /*
  391. * Use kmap_coherent or kmap_atomic to do flushes for
  392. * another ASID than the current one.
  393. */
  394. if (cpu_has_dc_aliases)
  395. vaddr = kmap_coherent(page, addr);
  396. else
  397. vaddr = kmap_atomic(page, KM_USER0);
  398. addr = (unsigned long)vaddr;
  399. }
  400. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  401. r4k_blast_dcache_page(addr);
  402. }
  403. if (exec) {
  404. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  405. int cpu = smp_processor_id();
  406. if (cpu_context(cpu, mm) != 0)
  407. drop_mmu_context(mm, cpu);
  408. } else
  409. r4k_blast_icache_page(addr);
  410. }
  411. if (vaddr) {
  412. if (cpu_has_dc_aliases)
  413. kunmap_coherent();
  414. else
  415. kunmap_atomic(vaddr, KM_USER0);
  416. }
  417. }
  418. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  419. unsigned long addr, unsigned long pfn)
  420. {
  421. struct flush_cache_page_args args;
  422. args.vma = vma;
  423. args.addr = addr;
  424. args.pfn = pfn;
  425. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  426. }
  427. static inline void local_r4k_flush_data_cache_page(void * addr)
  428. {
  429. r4k_blast_dcache_page((unsigned long) addr);
  430. }
  431. static void r4k_flush_data_cache_page(unsigned long addr)
  432. {
  433. if (in_atomic())
  434. local_r4k_flush_data_cache_page((void *)addr);
  435. else
  436. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
  437. 1, 1);
  438. }
  439. struct flush_icache_range_args {
  440. unsigned long start;
  441. unsigned long end;
  442. };
  443. static inline void local_r4k_flush_icache_range(void *args)
  444. {
  445. struct flush_icache_range_args *fir_args = args;
  446. unsigned long start = fir_args->start;
  447. unsigned long end = fir_args->end;
  448. if (!cpu_has_ic_fills_f_dc) {
  449. if (end - start >= dcache_size) {
  450. r4k_blast_dcache();
  451. } else {
  452. R4600_HIT_CACHEOP_WAR_IMPL;
  453. protected_blast_dcache_range(start, end);
  454. }
  455. }
  456. if (end - start > icache_size)
  457. r4k_blast_icache();
  458. else
  459. protected_blast_icache_range(start, end);
  460. }
  461. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  462. {
  463. struct flush_icache_range_args args;
  464. args.start = start;
  465. args.end = end;
  466. r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  467. instruction_hazard();
  468. }
  469. #ifdef CONFIG_DMA_NONCOHERENT
  470. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  471. {
  472. /* Catch bad driver code */
  473. BUG_ON(size == 0);
  474. if (cpu_has_inclusive_pcaches) {
  475. if (size >= scache_size)
  476. r4k_blast_scache();
  477. else
  478. blast_scache_range(addr, addr + size);
  479. return;
  480. }
  481. /*
  482. * Either no secondary cache or the available caches don't have the
  483. * subset property so we have to flush the primary caches
  484. * explicitly
  485. */
  486. if (size >= dcache_size) {
  487. r4k_blast_dcache();
  488. } else {
  489. R4600_HIT_CACHEOP_WAR_IMPL;
  490. blast_dcache_range(addr, addr + size);
  491. }
  492. bc_wback_inv(addr, size);
  493. }
  494. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  495. {
  496. /* Catch bad driver code */
  497. BUG_ON(size == 0);
  498. if (cpu_has_inclusive_pcaches) {
  499. if (size >= scache_size)
  500. r4k_blast_scache();
  501. else
  502. blast_inv_scache_range(addr, addr + size);
  503. return;
  504. }
  505. if (size >= dcache_size) {
  506. r4k_blast_dcache();
  507. } else {
  508. R4600_HIT_CACHEOP_WAR_IMPL;
  509. blast_inv_dcache_range(addr, addr + size);
  510. }
  511. bc_inv(addr, size);
  512. }
  513. #endif /* CONFIG_DMA_NONCOHERENT */
  514. /*
  515. * While we're protected against bad userland addresses we don't care
  516. * very much about what happens in that case. Usually a segmentation
  517. * fault will dump the process later on anyway ...
  518. */
  519. static void local_r4k_flush_cache_sigtramp(void * arg)
  520. {
  521. unsigned long ic_lsize = cpu_icache_line_size();
  522. unsigned long dc_lsize = cpu_dcache_line_size();
  523. unsigned long sc_lsize = cpu_scache_line_size();
  524. unsigned long addr = (unsigned long) arg;
  525. R4600_HIT_CACHEOP_WAR_IMPL;
  526. if (dc_lsize)
  527. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  528. if (!cpu_icache_snoops_remote_store && scache_size)
  529. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  530. if (ic_lsize)
  531. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  532. if (MIPS4K_ICACHE_REFILL_WAR) {
  533. __asm__ __volatile__ (
  534. ".set push\n\t"
  535. ".set noat\n\t"
  536. ".set mips3\n\t"
  537. #ifdef CONFIG_32BIT
  538. "la $at,1f\n\t"
  539. #endif
  540. #ifdef CONFIG_64BIT
  541. "dla $at,1f\n\t"
  542. #endif
  543. "cache %0,($at)\n\t"
  544. "nop; nop; nop\n"
  545. "1:\n\t"
  546. ".set pop"
  547. :
  548. : "i" (Hit_Invalidate_I));
  549. }
  550. if (MIPS_CACHE_SYNC_WAR)
  551. __asm__ __volatile__ ("sync");
  552. }
  553. static void r4k_flush_cache_sigtramp(unsigned long addr)
  554. {
  555. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  556. }
  557. static void r4k_flush_icache_all(void)
  558. {
  559. if (cpu_has_vtag_icache)
  560. r4k_blast_icache();
  561. }
  562. static inline void rm7k_erratum31(void)
  563. {
  564. const unsigned long ic_lsize = 32;
  565. unsigned long addr;
  566. /* RM7000 erratum #31. The icache is screwed at startup. */
  567. write_c0_taglo(0);
  568. write_c0_taghi(0);
  569. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  570. __asm__ __volatile__ (
  571. ".set push\n\t"
  572. ".set noreorder\n\t"
  573. ".set mips3\n\t"
  574. "cache\t%1, 0(%0)\n\t"
  575. "cache\t%1, 0x1000(%0)\n\t"
  576. "cache\t%1, 0x2000(%0)\n\t"
  577. "cache\t%1, 0x3000(%0)\n\t"
  578. "cache\t%2, 0(%0)\n\t"
  579. "cache\t%2, 0x1000(%0)\n\t"
  580. "cache\t%2, 0x2000(%0)\n\t"
  581. "cache\t%2, 0x3000(%0)\n\t"
  582. "cache\t%1, 0(%0)\n\t"
  583. "cache\t%1, 0x1000(%0)\n\t"
  584. "cache\t%1, 0x2000(%0)\n\t"
  585. "cache\t%1, 0x3000(%0)\n\t"
  586. ".set pop\n"
  587. :
  588. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  589. }
  590. }
  591. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  592. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  593. };
  594. static void __init probe_pcache(void)
  595. {
  596. struct cpuinfo_mips *c = &current_cpu_data;
  597. unsigned int config = read_c0_config();
  598. unsigned int prid = read_c0_prid();
  599. unsigned long config1;
  600. unsigned int lsize;
  601. switch (c->cputype) {
  602. case CPU_R4600: /* QED style two way caches? */
  603. case CPU_R4700:
  604. case CPU_R5000:
  605. case CPU_NEVADA:
  606. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  607. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  608. c->icache.ways = 2;
  609. c->icache.waybit = __ffs(icache_size/2);
  610. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  611. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  612. c->dcache.ways = 2;
  613. c->dcache.waybit= __ffs(dcache_size/2);
  614. c->options |= MIPS_CPU_CACHE_CDEX_P;
  615. break;
  616. case CPU_R5432:
  617. case CPU_R5500:
  618. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  619. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  620. c->icache.ways = 2;
  621. c->icache.waybit= 0;
  622. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  623. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  624. c->dcache.ways = 2;
  625. c->dcache.waybit = 0;
  626. c->options |= MIPS_CPU_CACHE_CDEX_P;
  627. break;
  628. case CPU_TX49XX:
  629. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  630. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  631. c->icache.ways = 4;
  632. c->icache.waybit= 0;
  633. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  634. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  635. c->dcache.ways = 4;
  636. c->dcache.waybit = 0;
  637. c->options |= MIPS_CPU_CACHE_CDEX_P;
  638. c->options |= MIPS_CPU_PREFETCH;
  639. break;
  640. case CPU_R4000PC:
  641. case CPU_R4000SC:
  642. case CPU_R4000MC:
  643. case CPU_R4400PC:
  644. case CPU_R4400SC:
  645. case CPU_R4400MC:
  646. case CPU_R4300:
  647. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  648. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  649. c->icache.ways = 1;
  650. c->icache.waybit = 0; /* doesn't matter */
  651. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  652. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  653. c->dcache.ways = 1;
  654. c->dcache.waybit = 0; /* does not matter */
  655. c->options |= MIPS_CPU_CACHE_CDEX_P;
  656. break;
  657. case CPU_R10000:
  658. case CPU_R12000:
  659. case CPU_R14000:
  660. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  661. c->icache.linesz = 64;
  662. c->icache.ways = 2;
  663. c->icache.waybit = 0;
  664. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  665. c->dcache.linesz = 32;
  666. c->dcache.ways = 2;
  667. c->dcache.waybit = 0;
  668. c->options |= MIPS_CPU_PREFETCH;
  669. break;
  670. case CPU_VR4133:
  671. write_c0_config(config & ~VR41_CONF_P4K);
  672. case CPU_VR4131:
  673. /* Workaround for cache instruction bug of VR4131 */
  674. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  675. c->processor_id == 0x0c82U) {
  676. config |= 0x00400000U;
  677. if (c->processor_id == 0x0c80U)
  678. config |= VR41_CONF_BP;
  679. write_c0_config(config);
  680. } else
  681. c->options |= MIPS_CPU_CACHE_CDEX_P;
  682. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  683. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  684. c->icache.ways = 2;
  685. c->icache.waybit = __ffs(icache_size/2);
  686. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  687. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  688. c->dcache.ways = 2;
  689. c->dcache.waybit = __ffs(dcache_size/2);
  690. break;
  691. case CPU_VR41XX:
  692. case CPU_VR4111:
  693. case CPU_VR4121:
  694. case CPU_VR4122:
  695. case CPU_VR4181:
  696. case CPU_VR4181A:
  697. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  698. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  699. c->icache.ways = 1;
  700. c->icache.waybit = 0; /* doesn't matter */
  701. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  702. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  703. c->dcache.ways = 1;
  704. c->dcache.waybit = 0; /* does not matter */
  705. c->options |= MIPS_CPU_CACHE_CDEX_P;
  706. break;
  707. case CPU_RM7000:
  708. rm7k_erratum31();
  709. case CPU_RM9000:
  710. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  711. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  712. c->icache.ways = 4;
  713. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  714. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  715. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  716. c->dcache.ways = 4;
  717. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  718. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  719. c->options |= MIPS_CPU_CACHE_CDEX_P;
  720. #endif
  721. c->options |= MIPS_CPU_PREFETCH;
  722. break;
  723. case CPU_LOONGSON2:
  724. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  725. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  726. if (prid & 0x3)
  727. c->icache.ways = 4;
  728. else
  729. c->icache.ways = 2;
  730. c->icache.waybit = 0;
  731. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  732. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  733. if (prid & 0x3)
  734. c->dcache.ways = 4;
  735. else
  736. c->dcache.ways = 2;
  737. c->dcache.waybit = 0;
  738. break;
  739. default:
  740. if (!(config & MIPS_CONF_M))
  741. panic("Don't know how to probe P-caches on this cpu.");
  742. /*
  743. * So we seem to be a MIPS32 or MIPS64 CPU
  744. * So let's probe the I-cache ...
  745. */
  746. config1 = read_c0_config1();
  747. if ((lsize = ((config1 >> 19) & 7)))
  748. c->icache.linesz = 2 << lsize;
  749. else
  750. c->icache.linesz = lsize;
  751. c->icache.sets = 64 << ((config1 >> 22) & 7);
  752. c->icache.ways = 1 + ((config1 >> 16) & 7);
  753. icache_size = c->icache.sets *
  754. c->icache.ways *
  755. c->icache.linesz;
  756. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  757. if (config & 0x8) /* VI bit */
  758. c->icache.flags |= MIPS_CACHE_VTAG;
  759. /*
  760. * Now probe the MIPS32 / MIPS64 data cache.
  761. */
  762. c->dcache.flags = 0;
  763. if ((lsize = ((config1 >> 10) & 7)))
  764. c->dcache.linesz = 2 << lsize;
  765. else
  766. c->dcache.linesz= lsize;
  767. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  768. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  769. dcache_size = c->dcache.sets *
  770. c->dcache.ways *
  771. c->dcache.linesz;
  772. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  773. c->options |= MIPS_CPU_PREFETCH;
  774. break;
  775. }
  776. /*
  777. * Processor configuration sanity check for the R4000SC erratum
  778. * #5. With page sizes larger than 32kB there is no possibility
  779. * to get a VCE exception anymore so we don't care about this
  780. * misconfiguration. The case is rather theoretical anyway;
  781. * presumably no vendor is shipping his hardware in the "bad"
  782. * configuration.
  783. */
  784. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  785. !(config & CONF_SC) && c->icache.linesz != 16 &&
  786. PAGE_SIZE <= 0x8000)
  787. panic("Improper R4000SC processor configuration detected");
  788. /* compute a couple of other cache variables */
  789. c->icache.waysize = icache_size / c->icache.ways;
  790. c->dcache.waysize = dcache_size / c->dcache.ways;
  791. c->icache.sets = c->icache.linesz ?
  792. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  793. c->dcache.sets = c->dcache.linesz ?
  794. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  795. /*
  796. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  797. * 2-way virtually indexed so normally would suffer from aliases. So
  798. * normally they'd suffer from aliases but magic in the hardware deals
  799. * with that for us so we don't need to take care ourselves.
  800. */
  801. switch (c->cputype) {
  802. case CPU_20KC:
  803. case CPU_25KF:
  804. case CPU_SB1:
  805. case CPU_SB1A:
  806. c->dcache.flags |= MIPS_CACHE_PINDEX;
  807. break;
  808. case CPU_R10000:
  809. case CPU_R12000:
  810. case CPU_R14000:
  811. break;
  812. case CPU_24K:
  813. case CPU_34K:
  814. case CPU_74K:
  815. if ((read_c0_config7() & (1 << 16))) {
  816. /* effectively physically indexed dcache,
  817. thus no virtual aliases. */
  818. c->dcache.flags |= MIPS_CACHE_PINDEX;
  819. break;
  820. }
  821. default:
  822. if (c->dcache.waysize > PAGE_SIZE)
  823. c->dcache.flags |= MIPS_CACHE_ALIASES;
  824. }
  825. switch (c->cputype) {
  826. case CPU_20KC:
  827. /*
  828. * Some older 20Kc chips doesn't have the 'VI' bit in
  829. * the config register.
  830. */
  831. c->icache.flags |= MIPS_CACHE_VTAG;
  832. break;
  833. case CPU_AU1000:
  834. case CPU_AU1500:
  835. case CPU_AU1100:
  836. case CPU_AU1550:
  837. case CPU_AU1200:
  838. case CPU_AU1210:
  839. case CPU_AU1250:
  840. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  841. break;
  842. }
  843. #ifdef CONFIG_CPU_LOONGSON2
  844. /*
  845. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  846. * one op will act on all 4 ways
  847. */
  848. c->icache.ways = 1;
  849. #endif
  850. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  851. icache_size >> 10,
  852. cpu_has_vtag_icache ? "VIVT" : "VIPT",
  853. way_string[c->icache.ways], c->icache.linesz);
  854. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  855. dcache_size >> 10, way_string[c->dcache.ways],
  856. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  857. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  858. "cache aliases" : "no aliases",
  859. c->dcache.linesz);
  860. }
  861. /*
  862. * If you even _breathe_ on this function, look at the gcc output and make sure
  863. * it does not pop things on and off the stack for the cache sizing loop that
  864. * executes in KSEG1 space or else you will crash and burn badly. You have
  865. * been warned.
  866. */
  867. static int __init probe_scache(void)
  868. {
  869. unsigned long flags, addr, begin, end, pow2;
  870. unsigned int config = read_c0_config();
  871. struct cpuinfo_mips *c = &current_cpu_data;
  872. int tmp;
  873. if (config & CONF_SC)
  874. return 0;
  875. begin = (unsigned long) &_stext;
  876. begin &= ~((4 * 1024 * 1024) - 1);
  877. end = begin + (4 * 1024 * 1024);
  878. /*
  879. * This is such a bitch, you'd think they would make it easy to do
  880. * this. Away you daemons of stupidity!
  881. */
  882. local_irq_save(flags);
  883. /* Fill each size-multiple cache line with a valid tag. */
  884. pow2 = (64 * 1024);
  885. for (addr = begin; addr < end; addr = (begin + pow2)) {
  886. unsigned long *p = (unsigned long *) addr;
  887. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  888. pow2 <<= 1;
  889. }
  890. /* Load first line with zero (therefore invalid) tag. */
  891. write_c0_taglo(0);
  892. write_c0_taghi(0);
  893. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  894. cache_op(Index_Store_Tag_I, begin);
  895. cache_op(Index_Store_Tag_D, begin);
  896. cache_op(Index_Store_Tag_SD, begin);
  897. /* Now search for the wrap around point. */
  898. pow2 = (128 * 1024);
  899. tmp = 0;
  900. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  901. cache_op(Index_Load_Tag_SD, addr);
  902. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  903. if (!read_c0_taglo())
  904. break;
  905. pow2 <<= 1;
  906. }
  907. local_irq_restore(flags);
  908. addr -= begin;
  909. scache_size = addr;
  910. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  911. c->scache.ways = 1;
  912. c->dcache.waybit = 0; /* does not matter */
  913. return 1;
  914. }
  915. #if defined(CONFIG_CPU_LOONGSON2)
  916. static void __init loongson2_sc_init(void)
  917. {
  918. struct cpuinfo_mips *c = &current_cpu_data;
  919. scache_size = 512*1024;
  920. c->scache.linesz = 32;
  921. c->scache.ways = 4;
  922. c->scache.waybit = 0;
  923. c->scache.waysize = scache_size / (c->scache.ways);
  924. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  925. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  926. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  927. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  928. }
  929. #endif
  930. extern int r5k_sc_init(void);
  931. extern int rm7k_sc_init(void);
  932. extern int mips_sc_init(void);
  933. static void __init setup_scache(void)
  934. {
  935. struct cpuinfo_mips *c = &current_cpu_data;
  936. unsigned int config = read_c0_config();
  937. int sc_present = 0;
  938. /*
  939. * Do the probing thing on R4000SC and R4400SC processors. Other
  940. * processors don't have a S-cache that would be relevant to the
  941. * Linux memory management.
  942. */
  943. switch (c->cputype) {
  944. case CPU_R4000SC:
  945. case CPU_R4000MC:
  946. case CPU_R4400SC:
  947. case CPU_R4400MC:
  948. sc_present = run_uncached(probe_scache);
  949. if (sc_present)
  950. c->options |= MIPS_CPU_CACHE_CDEX_S;
  951. break;
  952. case CPU_R10000:
  953. case CPU_R12000:
  954. case CPU_R14000:
  955. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  956. c->scache.linesz = 64 << ((config >> 13) & 1);
  957. c->scache.ways = 2;
  958. c->scache.waybit= 0;
  959. sc_present = 1;
  960. break;
  961. case CPU_R5000:
  962. case CPU_NEVADA:
  963. #ifdef CONFIG_R5000_CPU_SCACHE
  964. r5k_sc_init();
  965. #endif
  966. return;
  967. case CPU_RM7000:
  968. case CPU_RM9000:
  969. #ifdef CONFIG_RM7000_CPU_SCACHE
  970. rm7k_sc_init();
  971. #endif
  972. return;
  973. #if defined(CONFIG_CPU_LOONGSON2)
  974. case CPU_LOONGSON2:
  975. loongson2_sc_init();
  976. return;
  977. #endif
  978. default:
  979. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  980. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  981. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  982. c->isa_level == MIPS_CPU_ISA_M64R2) {
  983. #ifdef CONFIG_MIPS_CPU_SCACHE
  984. if (mips_sc_init ()) {
  985. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  986. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  987. scache_size >> 10,
  988. way_string[c->scache.ways], c->scache.linesz);
  989. }
  990. #else
  991. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  992. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  993. #endif
  994. return;
  995. }
  996. sc_present = 0;
  997. }
  998. if (!sc_present)
  999. return;
  1000. /* compute a couple of other cache variables */
  1001. c->scache.waysize = scache_size / c->scache.ways;
  1002. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1003. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1004. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1005. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1006. }
  1007. void au1x00_fixup_config_od(void)
  1008. {
  1009. /*
  1010. * c0_config.od (bit 19) was write only (and read as 0)
  1011. * on the early revisions of Alchemy SOCs. It disables the bus
  1012. * transaction overlapping and needs to be set to fix various errata.
  1013. */
  1014. switch (read_c0_prid()) {
  1015. case 0x00030100: /* Au1000 DA */
  1016. case 0x00030201: /* Au1000 HA */
  1017. case 0x00030202: /* Au1000 HB */
  1018. case 0x01030200: /* Au1500 AB */
  1019. /*
  1020. * Au1100 errata actually keeps silence about this bit, so we set it
  1021. * just in case for those revisions that require it to be set according
  1022. * to arch/mips/au1000/common/cputable.c
  1023. */
  1024. case 0x02030200: /* Au1100 AB */
  1025. case 0x02030201: /* Au1100 BA */
  1026. case 0x02030202: /* Au1100 BC */
  1027. set_c0_config(1 << 19);
  1028. break;
  1029. }
  1030. }
  1031. static void __init coherency_setup(void)
  1032. {
  1033. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  1034. /*
  1035. * c0_status.cu=0 specifies that updates by the sc instruction use
  1036. * the coherency mode specified by the TLB; 1 means cachable
  1037. * coherent update on write will be used. Not all processors have
  1038. * this bit and; some wire it to zero, others like Toshiba had the
  1039. * silly idea of putting something else there ...
  1040. */
  1041. switch (current_cpu_type()) {
  1042. case CPU_R4000PC:
  1043. case CPU_R4000SC:
  1044. case CPU_R4000MC:
  1045. case CPU_R4400PC:
  1046. case CPU_R4400SC:
  1047. case CPU_R4400MC:
  1048. clear_c0_config(CONF_CU);
  1049. break;
  1050. /*
  1051. * We need to catch the early Alchemy SOCs with
  1052. * the write-only co_config.od bit and set it back to one...
  1053. */
  1054. case CPU_AU1000: /* rev. DA, HA, HB */
  1055. case CPU_AU1100: /* rev. AB, BA, BC ?? */
  1056. case CPU_AU1500: /* rev. AB */
  1057. au1x00_fixup_config_od();
  1058. break;
  1059. }
  1060. }
  1061. void __init r4k_cache_init(void)
  1062. {
  1063. extern void build_clear_page(void);
  1064. extern void build_copy_page(void);
  1065. extern char __weak except_vec2_generic;
  1066. extern char __weak except_vec2_sb1;
  1067. struct cpuinfo_mips *c = &current_cpu_data;
  1068. switch (c->cputype) {
  1069. case CPU_SB1:
  1070. case CPU_SB1A:
  1071. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1072. break;
  1073. default:
  1074. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1075. break;
  1076. }
  1077. probe_pcache();
  1078. setup_scache();
  1079. r4k_blast_dcache_page_setup();
  1080. r4k_blast_dcache_page_indexed_setup();
  1081. r4k_blast_dcache_setup();
  1082. r4k_blast_icache_page_setup();
  1083. r4k_blast_icache_page_indexed_setup();
  1084. r4k_blast_icache_setup();
  1085. r4k_blast_scache_page_setup();
  1086. r4k_blast_scache_page_indexed_setup();
  1087. r4k_blast_scache_setup();
  1088. /*
  1089. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1090. * This code supports virtually indexed processors and will be
  1091. * unnecessarily inefficient on physically indexed processors.
  1092. */
  1093. if (c->dcache.linesz)
  1094. shm_align_mask = max_t( unsigned long,
  1095. c->dcache.sets * c->dcache.linesz - 1,
  1096. PAGE_SIZE - 1);
  1097. else
  1098. shm_align_mask = PAGE_SIZE-1;
  1099. flush_cache_all = cache_noop;
  1100. __flush_cache_all = r4k___flush_cache_all;
  1101. flush_cache_mm = r4k_flush_cache_mm;
  1102. flush_cache_page = r4k_flush_cache_page;
  1103. flush_cache_range = r4k_flush_cache_range;
  1104. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1105. flush_icache_all = r4k_flush_icache_all;
  1106. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1107. flush_data_cache_page = r4k_flush_data_cache_page;
  1108. flush_icache_range = r4k_flush_icache_range;
  1109. #ifdef CONFIG_DMA_NONCOHERENT
  1110. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1111. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1112. _dma_cache_inv = r4k_dma_cache_inv;
  1113. #endif
  1114. build_clear_page();
  1115. build_copy_page();
  1116. local_r4k___flush_cache_all(NULL);
  1117. coherency_setup();
  1118. }