c-r4k.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <linux/bitops.h>
  15. #include <asm/bcache.h>
  16. #include <asm/bootinfo.h>
  17. #include <asm/cache.h>
  18. #include <asm/cacheops.h>
  19. #include <asm/cpu.h>
  20. #include <asm/cpu-features.h>
  21. #include <asm/io.h>
  22. #include <asm/page.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/r4kcache.h>
  25. #include <asm/system.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/war.h>
  28. #include <asm/cacheflush.h> /* for run_uncached() */
  29. /*
  30. * Special Variant of smp_call_function for use by cache functions:
  31. *
  32. * o No return value
  33. * o collapses to normal function call on UP kernels
  34. * o collapses to normal function call on systems with a single shared
  35. * primary cache.
  36. */
  37. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  38. int retry, int wait)
  39. {
  40. preempt_disable();
  41. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  42. smp_call_function(func, info, retry, wait);
  43. #endif
  44. func(info);
  45. preempt_enable();
  46. }
  47. /*
  48. * Must die.
  49. */
  50. static unsigned long icache_size __read_mostly;
  51. static unsigned long dcache_size __read_mostly;
  52. static unsigned long scache_size __read_mostly;
  53. /*
  54. * Dummy cache handling routines for machines without boardcaches
  55. */
  56. static void cache_noop(void) {}
  57. static struct bcache_ops no_sc_ops = {
  58. .bc_enable = (void *)cache_noop,
  59. .bc_disable = (void *)cache_noop,
  60. .bc_wback_inv = (void *)cache_noop,
  61. .bc_inv = (void *)cache_noop
  62. };
  63. struct bcache_ops *bcops = &no_sc_ops;
  64. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  65. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  66. #define R4600_HIT_CACHEOP_WAR_IMPL \
  67. do { \
  68. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  69. *(volatile unsigned long *)CKSEG1; \
  70. if (R4600_V1_HIT_CACHEOP_WAR) \
  71. __asm__ __volatile__("nop;nop;nop;nop"); \
  72. } while (0)
  73. static void (*r4k_blast_dcache_page)(unsigned long addr);
  74. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  75. {
  76. R4600_HIT_CACHEOP_WAR_IMPL;
  77. blast_dcache32_page(addr);
  78. }
  79. static inline void r4k_blast_dcache_page_setup(void)
  80. {
  81. unsigned long dc_lsize = cpu_dcache_line_size();
  82. if (dc_lsize == 0)
  83. r4k_blast_dcache_page = (void *)cache_noop;
  84. else if (dc_lsize == 16)
  85. r4k_blast_dcache_page = blast_dcache16_page;
  86. else if (dc_lsize == 32)
  87. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  88. }
  89. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  90. static inline void r4k_blast_dcache_page_indexed_setup(void)
  91. {
  92. unsigned long dc_lsize = cpu_dcache_line_size();
  93. if (dc_lsize == 0)
  94. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  95. else if (dc_lsize == 16)
  96. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  97. else if (dc_lsize == 32)
  98. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  99. }
  100. static void (* r4k_blast_dcache)(void);
  101. static inline void r4k_blast_dcache_setup(void)
  102. {
  103. unsigned long dc_lsize = cpu_dcache_line_size();
  104. if (dc_lsize == 0)
  105. r4k_blast_dcache = (void *)cache_noop;
  106. else if (dc_lsize == 16)
  107. r4k_blast_dcache = blast_dcache16;
  108. else if (dc_lsize == 32)
  109. r4k_blast_dcache = blast_dcache32;
  110. }
  111. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  112. #define JUMP_TO_ALIGN(order) \
  113. __asm__ __volatile__( \
  114. "b\t1f\n\t" \
  115. ".align\t" #order "\n\t" \
  116. "1:\n\t" \
  117. )
  118. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  119. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  120. static inline void blast_r4600_v1_icache32(void)
  121. {
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. blast_icache32();
  125. local_irq_restore(flags);
  126. }
  127. static inline void tx49_blast_icache32(void)
  128. {
  129. unsigned long start = INDEX_BASE;
  130. unsigned long end = start + current_cpu_data.icache.waysize;
  131. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  132. unsigned long ws_end = current_cpu_data.icache.ways <<
  133. current_cpu_data.icache.waybit;
  134. unsigned long ws, addr;
  135. CACHE32_UNROLL32_ALIGN2;
  136. /* I'm in even chunk. blast odd chunks */
  137. for (ws = 0; ws < ws_end; ws += ws_inc)
  138. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  139. cache32_unroll32(addr|ws,Index_Invalidate_I);
  140. CACHE32_UNROLL32_ALIGN;
  141. /* I'm in odd chunk. blast even chunks */
  142. for (ws = 0; ws < ws_end; ws += ws_inc)
  143. for (addr = start; addr < end; addr += 0x400 * 2)
  144. cache32_unroll32(addr|ws,Index_Invalidate_I);
  145. }
  146. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  147. {
  148. unsigned long flags;
  149. local_irq_save(flags);
  150. blast_icache32_page_indexed(page);
  151. local_irq_restore(flags);
  152. }
  153. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  154. {
  155. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  156. unsigned long start = INDEX_BASE + (page & indexmask);
  157. unsigned long end = start + PAGE_SIZE;
  158. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  159. unsigned long ws_end = current_cpu_data.icache.ways <<
  160. current_cpu_data.icache.waybit;
  161. unsigned long ws, addr;
  162. CACHE32_UNROLL32_ALIGN2;
  163. /* I'm in even chunk. blast odd chunks */
  164. for (ws = 0; ws < ws_end; ws += ws_inc)
  165. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  166. cache32_unroll32(addr|ws,Index_Invalidate_I);
  167. CACHE32_UNROLL32_ALIGN;
  168. /* I'm in odd chunk. blast even chunks */
  169. for (ws = 0; ws < ws_end; ws += ws_inc)
  170. for (addr = start; addr < end; addr += 0x400 * 2)
  171. cache32_unroll32(addr|ws,Index_Invalidate_I);
  172. }
  173. static void (* r4k_blast_icache_page)(unsigned long addr);
  174. static inline void r4k_blast_icache_page_setup(void)
  175. {
  176. unsigned long ic_lsize = cpu_icache_line_size();
  177. if (ic_lsize == 0)
  178. r4k_blast_icache_page = (void *)cache_noop;
  179. else if (ic_lsize == 16)
  180. r4k_blast_icache_page = blast_icache16_page;
  181. else if (ic_lsize == 32)
  182. r4k_blast_icache_page = blast_icache32_page;
  183. else if (ic_lsize == 64)
  184. r4k_blast_icache_page = blast_icache64_page;
  185. }
  186. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  187. static inline void r4k_blast_icache_page_indexed_setup(void)
  188. {
  189. unsigned long ic_lsize = cpu_icache_line_size();
  190. if (ic_lsize == 0)
  191. r4k_blast_icache_page_indexed = (void *)cache_noop;
  192. else if (ic_lsize == 16)
  193. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  194. else if (ic_lsize == 32) {
  195. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  196. r4k_blast_icache_page_indexed =
  197. blast_icache32_r4600_v1_page_indexed;
  198. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  199. r4k_blast_icache_page_indexed =
  200. tx49_blast_icache32_page_indexed;
  201. else
  202. r4k_blast_icache_page_indexed =
  203. blast_icache32_page_indexed;
  204. } else if (ic_lsize == 64)
  205. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  206. }
  207. static void (* r4k_blast_icache)(void);
  208. static inline void r4k_blast_icache_setup(void)
  209. {
  210. unsigned long ic_lsize = cpu_icache_line_size();
  211. if (ic_lsize == 0)
  212. r4k_blast_icache = (void *)cache_noop;
  213. else if (ic_lsize == 16)
  214. r4k_blast_icache = blast_icache16;
  215. else if (ic_lsize == 32) {
  216. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  217. r4k_blast_icache = blast_r4600_v1_icache32;
  218. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  219. r4k_blast_icache = tx49_blast_icache32;
  220. else
  221. r4k_blast_icache = blast_icache32;
  222. } else if (ic_lsize == 64)
  223. r4k_blast_icache = blast_icache64;
  224. }
  225. static void (* r4k_blast_scache_page)(unsigned long addr);
  226. static inline void r4k_blast_scache_page_setup(void)
  227. {
  228. unsigned long sc_lsize = cpu_scache_line_size();
  229. if (scache_size == 0)
  230. r4k_blast_scache_page = (void *)cache_noop;
  231. else if (sc_lsize == 16)
  232. r4k_blast_scache_page = blast_scache16_page;
  233. else if (sc_lsize == 32)
  234. r4k_blast_scache_page = blast_scache32_page;
  235. else if (sc_lsize == 64)
  236. r4k_blast_scache_page = blast_scache64_page;
  237. else if (sc_lsize == 128)
  238. r4k_blast_scache_page = blast_scache128_page;
  239. }
  240. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  241. static inline void r4k_blast_scache_page_indexed_setup(void)
  242. {
  243. unsigned long sc_lsize = cpu_scache_line_size();
  244. if (scache_size == 0)
  245. r4k_blast_scache_page_indexed = (void *)cache_noop;
  246. else if (sc_lsize == 16)
  247. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  248. else if (sc_lsize == 32)
  249. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  250. else if (sc_lsize == 64)
  251. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  252. else if (sc_lsize == 128)
  253. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  254. }
  255. static void (* r4k_blast_scache)(void);
  256. static inline void r4k_blast_scache_setup(void)
  257. {
  258. unsigned long sc_lsize = cpu_scache_line_size();
  259. if (scache_size == 0)
  260. r4k_blast_scache = (void *)cache_noop;
  261. else if (sc_lsize == 16)
  262. r4k_blast_scache = blast_scache16;
  263. else if (sc_lsize == 32)
  264. r4k_blast_scache = blast_scache32;
  265. else if (sc_lsize == 64)
  266. r4k_blast_scache = blast_scache64;
  267. else if (sc_lsize == 128)
  268. r4k_blast_scache = blast_scache128;
  269. }
  270. /*
  271. * This is former mm's flush_cache_all() which really should be
  272. * flush_cache_vunmap these days ...
  273. */
  274. static inline void local_r4k_flush_cache_all(void * args)
  275. {
  276. r4k_blast_dcache();
  277. r4k_blast_icache();
  278. }
  279. static void r4k_flush_cache_all(void)
  280. {
  281. if (!cpu_has_dc_aliases)
  282. return;
  283. r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
  284. }
  285. static inline void local_r4k___flush_cache_all(void * args)
  286. {
  287. r4k_blast_dcache();
  288. r4k_blast_icache();
  289. switch (current_cpu_data.cputype) {
  290. case CPU_R4000SC:
  291. case CPU_R4000MC:
  292. case CPU_R4400SC:
  293. case CPU_R4400MC:
  294. case CPU_R10000:
  295. case CPU_R12000:
  296. case CPU_R14000:
  297. r4k_blast_scache();
  298. }
  299. }
  300. static void r4k___flush_cache_all(void)
  301. {
  302. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  303. }
  304. static inline void local_r4k_flush_cache_range(void * args)
  305. {
  306. struct vm_area_struct *vma = args;
  307. int exec;
  308. if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
  309. return;
  310. exec = vma->vm_flags & VM_EXEC;
  311. if (cpu_has_dc_aliases || exec)
  312. r4k_blast_dcache();
  313. if (exec)
  314. r4k_blast_icache();
  315. }
  316. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  317. unsigned long start, unsigned long end)
  318. {
  319. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  320. }
  321. static inline void local_r4k_flush_cache_mm(void * args)
  322. {
  323. struct mm_struct *mm = args;
  324. if (!cpu_context(smp_processor_id(), mm))
  325. return;
  326. r4k_blast_dcache();
  327. r4k_blast_icache();
  328. /*
  329. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  330. * only flush the primary caches but R10000 and R12000 behave sane ...
  331. */
  332. if (current_cpu_data.cputype == CPU_R4000SC ||
  333. current_cpu_data.cputype == CPU_R4000MC ||
  334. current_cpu_data.cputype == CPU_R4400SC ||
  335. current_cpu_data.cputype == CPU_R4400MC)
  336. r4k_blast_scache();
  337. }
  338. static void r4k_flush_cache_mm(struct mm_struct *mm)
  339. {
  340. if (!cpu_has_dc_aliases)
  341. return;
  342. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  343. }
  344. struct flush_cache_page_args {
  345. struct vm_area_struct *vma;
  346. unsigned long addr;
  347. unsigned long pfn;
  348. };
  349. static inline void local_r4k_flush_cache_page(void *args)
  350. {
  351. struct flush_cache_page_args *fcp_args = args;
  352. struct vm_area_struct *vma = fcp_args->vma;
  353. unsigned long addr = fcp_args->addr;
  354. unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
  355. int exec = vma->vm_flags & VM_EXEC;
  356. struct mm_struct *mm = vma->vm_mm;
  357. pgd_t *pgdp;
  358. pud_t *pudp;
  359. pmd_t *pmdp;
  360. pte_t *ptep;
  361. /*
  362. * If ownes no valid ASID yet, cannot possibly have gotten
  363. * this page into the cache.
  364. */
  365. if (cpu_context(smp_processor_id(), mm) == 0)
  366. return;
  367. addr &= PAGE_MASK;
  368. pgdp = pgd_offset(mm, addr);
  369. pudp = pud_offset(pgdp, addr);
  370. pmdp = pmd_offset(pudp, addr);
  371. ptep = pte_offset(pmdp, addr);
  372. /*
  373. * If the page isn't marked valid, the page cannot possibly be
  374. * in the cache.
  375. */
  376. if (!(pte_val(*ptep) & _PAGE_PRESENT))
  377. return;
  378. /*
  379. * Doing flushes for another ASID than the current one is
  380. * too difficult since stupid R4k caches do a TLB translation
  381. * for every cache flush operation. So we do indexed flushes
  382. * in that case, which doesn't overly flush the cache too much.
  383. */
  384. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
  385. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  386. r4k_blast_dcache_page(addr);
  387. if (exec && !cpu_icache_snoops_remote_store)
  388. r4k_blast_scache_page(addr);
  389. }
  390. if (exec)
  391. r4k_blast_icache_page(addr);
  392. return;
  393. }
  394. /*
  395. * Do indexed flush, too much work to get the (possible) TLB refills
  396. * to work correctly.
  397. */
  398. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  399. r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
  400. paddr : addr);
  401. if (exec && !cpu_icache_snoops_remote_store) {
  402. r4k_blast_scache_page_indexed(paddr);
  403. }
  404. }
  405. if (exec) {
  406. if (cpu_has_vtag_icache) {
  407. int cpu = smp_processor_id();
  408. if (cpu_context(cpu, mm) != 0)
  409. drop_mmu_context(mm, cpu);
  410. } else
  411. r4k_blast_icache_page_indexed(addr);
  412. }
  413. }
  414. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  415. unsigned long addr, unsigned long pfn)
  416. {
  417. struct flush_cache_page_args args;
  418. args.vma = vma;
  419. args.addr = addr;
  420. args.pfn = pfn;
  421. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  422. }
  423. static inline void local_r4k_flush_data_cache_page(void * addr)
  424. {
  425. r4k_blast_dcache_page((unsigned long) addr);
  426. }
  427. static void r4k_flush_data_cache_page(unsigned long addr)
  428. {
  429. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
  430. }
  431. struct flush_icache_range_args {
  432. unsigned long start;
  433. unsigned long end;
  434. };
  435. static inline void local_r4k_flush_icache_range(void *args)
  436. {
  437. struct flush_icache_range_args *fir_args = args;
  438. unsigned long start = fir_args->start;
  439. unsigned long end = fir_args->end;
  440. if (!cpu_has_ic_fills_f_dc) {
  441. if (end - start >= dcache_size) {
  442. r4k_blast_dcache();
  443. } else {
  444. R4600_HIT_CACHEOP_WAR_IMPL;
  445. protected_blast_dcache_range(start, end);
  446. }
  447. if (!cpu_icache_snoops_remote_store && scache_size) {
  448. if (end - start > scache_size)
  449. r4k_blast_scache();
  450. else
  451. protected_blast_scache_range(start, end);
  452. }
  453. }
  454. if (end - start > icache_size)
  455. r4k_blast_icache();
  456. else
  457. protected_blast_icache_range(start, end);
  458. }
  459. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  460. {
  461. struct flush_icache_range_args args;
  462. args.start = start;
  463. args.end = end;
  464. r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  465. instruction_hazard();
  466. }
  467. /*
  468. * Ok, this seriously sucks. We use them to flush a user page but don't
  469. * know the virtual address, so we have to blast away the whole icache
  470. * which is significantly more expensive than the real thing. Otoh we at
  471. * least know the kernel address of the page so we can flush it
  472. * selectivly.
  473. */
  474. struct flush_icache_page_args {
  475. struct vm_area_struct *vma;
  476. struct page *page;
  477. };
  478. static inline void local_r4k_flush_icache_page(void *args)
  479. {
  480. struct flush_icache_page_args *fip_args = args;
  481. struct vm_area_struct *vma = fip_args->vma;
  482. struct page *page = fip_args->page;
  483. /*
  484. * Tricky ... Because we don't know the virtual address we've got the
  485. * choice of either invalidating the entire primary and secondary
  486. * caches or invalidating the secondary caches also. With the subset
  487. * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
  488. * secondary cache will result in any entries in the primary caches
  489. * also getting invalidated which hopefully is a bit more economical.
  490. */
  491. if (cpu_has_subset_pcaches) {
  492. unsigned long addr = (unsigned long) page_address(page);
  493. r4k_blast_scache_page(addr);
  494. ClearPageDcacheDirty(page);
  495. return;
  496. }
  497. if (!cpu_has_ic_fills_f_dc) {
  498. unsigned long addr = (unsigned long) page_address(page);
  499. r4k_blast_dcache_page(addr);
  500. if (!cpu_icache_snoops_remote_store)
  501. r4k_blast_scache_page(addr);
  502. ClearPageDcacheDirty(page);
  503. }
  504. /*
  505. * We're not sure of the virtual address(es) involved here, so
  506. * we have to flush the entire I-cache.
  507. */
  508. if (cpu_has_vtag_icache) {
  509. int cpu = smp_processor_id();
  510. if (cpu_context(cpu, vma->vm_mm) != 0)
  511. drop_mmu_context(vma->vm_mm, cpu);
  512. } else
  513. r4k_blast_icache();
  514. }
  515. static void r4k_flush_icache_page(struct vm_area_struct *vma,
  516. struct page *page)
  517. {
  518. struct flush_icache_page_args args;
  519. /*
  520. * If there's no context yet, or the page isn't executable, no I-cache
  521. * flush is needed.
  522. */
  523. if (!(vma->vm_flags & VM_EXEC))
  524. return;
  525. args.vma = vma;
  526. args.page = page;
  527. r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
  528. }
  529. #ifdef CONFIG_DMA_NONCOHERENT
  530. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  531. {
  532. /* Catch bad driver code */
  533. BUG_ON(size == 0);
  534. if (cpu_has_subset_pcaches) {
  535. if (size >= scache_size)
  536. r4k_blast_scache();
  537. else
  538. blast_scache_range(addr, addr + size);
  539. return;
  540. }
  541. /*
  542. * Either no secondary cache or the available caches don't have the
  543. * subset property so we have to flush the primary caches
  544. * explicitly
  545. */
  546. if (size >= dcache_size) {
  547. r4k_blast_dcache();
  548. } else {
  549. R4600_HIT_CACHEOP_WAR_IMPL;
  550. blast_dcache_range(addr, addr + size);
  551. }
  552. bc_wback_inv(addr, size);
  553. }
  554. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  555. {
  556. /* Catch bad driver code */
  557. BUG_ON(size == 0);
  558. if (cpu_has_subset_pcaches) {
  559. if (size >= scache_size)
  560. r4k_blast_scache();
  561. else
  562. blast_scache_range(addr, addr + size);
  563. return;
  564. }
  565. if (size >= dcache_size) {
  566. r4k_blast_dcache();
  567. } else {
  568. R4600_HIT_CACHEOP_WAR_IMPL;
  569. blast_dcache_range(addr, addr + size);
  570. }
  571. bc_inv(addr, size);
  572. }
  573. #endif /* CONFIG_DMA_NONCOHERENT */
  574. /*
  575. * While we're protected against bad userland addresses we don't care
  576. * very much about what happens in that case. Usually a segmentation
  577. * fault will dump the process later on anyway ...
  578. */
  579. static void local_r4k_flush_cache_sigtramp(void * arg)
  580. {
  581. unsigned long ic_lsize = cpu_icache_line_size();
  582. unsigned long dc_lsize = cpu_dcache_line_size();
  583. unsigned long sc_lsize = cpu_scache_line_size();
  584. unsigned long addr = (unsigned long) arg;
  585. R4600_HIT_CACHEOP_WAR_IMPL;
  586. if (dc_lsize)
  587. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  588. if (!cpu_icache_snoops_remote_store && scache_size)
  589. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  590. if (ic_lsize)
  591. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  592. if (MIPS4K_ICACHE_REFILL_WAR) {
  593. __asm__ __volatile__ (
  594. ".set push\n\t"
  595. ".set noat\n\t"
  596. ".set mips3\n\t"
  597. #ifdef CONFIG_32BIT
  598. "la $at,1f\n\t"
  599. #endif
  600. #ifdef CONFIG_64BIT
  601. "dla $at,1f\n\t"
  602. #endif
  603. "cache %0,($at)\n\t"
  604. "nop; nop; nop\n"
  605. "1:\n\t"
  606. ".set pop"
  607. :
  608. : "i" (Hit_Invalidate_I));
  609. }
  610. if (MIPS_CACHE_SYNC_WAR)
  611. __asm__ __volatile__ ("sync");
  612. }
  613. static void r4k_flush_cache_sigtramp(unsigned long addr)
  614. {
  615. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  616. }
  617. static void r4k_flush_icache_all(void)
  618. {
  619. if (cpu_has_vtag_icache)
  620. r4k_blast_icache();
  621. }
  622. static inline void rm7k_erratum31(void)
  623. {
  624. const unsigned long ic_lsize = 32;
  625. unsigned long addr;
  626. /* RM7000 erratum #31. The icache is screwed at startup. */
  627. write_c0_taglo(0);
  628. write_c0_taghi(0);
  629. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  630. __asm__ __volatile__ (
  631. ".set push\n\t"
  632. ".set noreorder\n\t"
  633. ".set mips3\n\t"
  634. "cache\t%1, 0(%0)\n\t"
  635. "cache\t%1, 0x1000(%0)\n\t"
  636. "cache\t%1, 0x2000(%0)\n\t"
  637. "cache\t%1, 0x3000(%0)\n\t"
  638. "cache\t%2, 0(%0)\n\t"
  639. "cache\t%2, 0x1000(%0)\n\t"
  640. "cache\t%2, 0x2000(%0)\n\t"
  641. "cache\t%2, 0x3000(%0)\n\t"
  642. "cache\t%1, 0(%0)\n\t"
  643. "cache\t%1, 0x1000(%0)\n\t"
  644. "cache\t%1, 0x2000(%0)\n\t"
  645. "cache\t%1, 0x3000(%0)\n\t"
  646. ".set pop\n"
  647. :
  648. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  649. }
  650. }
  651. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  652. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  653. };
  654. static void __init probe_pcache(void)
  655. {
  656. struct cpuinfo_mips *c = &current_cpu_data;
  657. unsigned int config = read_c0_config();
  658. unsigned int prid = read_c0_prid();
  659. unsigned long config1;
  660. unsigned int lsize;
  661. switch (c->cputype) {
  662. case CPU_R4600: /* QED style two way caches? */
  663. case CPU_R4700:
  664. case CPU_R5000:
  665. case CPU_NEVADA:
  666. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  667. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  668. c->icache.ways = 2;
  669. c->icache.waybit = __ffs(icache_size/2);
  670. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  671. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  672. c->dcache.ways = 2;
  673. c->dcache.waybit= __ffs(dcache_size/2);
  674. c->options |= MIPS_CPU_CACHE_CDEX_P;
  675. break;
  676. case CPU_R5432:
  677. case CPU_R5500:
  678. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  679. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  680. c->icache.ways = 2;
  681. c->icache.waybit= 0;
  682. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  683. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  684. c->dcache.ways = 2;
  685. c->dcache.waybit = 0;
  686. c->options |= MIPS_CPU_CACHE_CDEX_P;
  687. break;
  688. case CPU_TX49XX:
  689. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  690. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  691. c->icache.ways = 4;
  692. c->icache.waybit= 0;
  693. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  694. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  695. c->dcache.ways = 4;
  696. c->dcache.waybit = 0;
  697. c->options |= MIPS_CPU_CACHE_CDEX_P;
  698. c->options |= MIPS_CPU_PREFETCH;
  699. break;
  700. case CPU_R4000PC:
  701. case CPU_R4000SC:
  702. case CPU_R4000MC:
  703. case CPU_R4400PC:
  704. case CPU_R4400SC:
  705. case CPU_R4400MC:
  706. case CPU_R4300:
  707. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  708. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  709. c->icache.ways = 1;
  710. c->icache.waybit = 0; /* doesn't matter */
  711. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  712. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  713. c->dcache.ways = 1;
  714. c->dcache.waybit = 0; /* does not matter */
  715. c->options |= MIPS_CPU_CACHE_CDEX_P;
  716. break;
  717. case CPU_R10000:
  718. case CPU_R12000:
  719. case CPU_R14000:
  720. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  721. c->icache.linesz = 64;
  722. c->icache.ways = 2;
  723. c->icache.waybit = 0;
  724. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  725. c->dcache.linesz = 32;
  726. c->dcache.ways = 2;
  727. c->dcache.waybit = 0;
  728. c->options |= MIPS_CPU_PREFETCH;
  729. break;
  730. case CPU_VR4133:
  731. write_c0_config(config & ~CONF_EB);
  732. case CPU_VR4131:
  733. /* Workaround for cache instruction bug of VR4131 */
  734. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  735. c->processor_id == 0x0c82U) {
  736. config &= ~0x00000030U;
  737. config |= 0x00410000U;
  738. write_c0_config(config);
  739. }
  740. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  741. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  742. c->icache.ways = 2;
  743. c->icache.waybit = __ffs(icache_size/2);
  744. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  745. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  746. c->dcache.ways = 2;
  747. c->dcache.waybit = __ffs(dcache_size/2);
  748. c->options |= MIPS_CPU_CACHE_CDEX_P;
  749. break;
  750. case CPU_VR41XX:
  751. case CPU_VR4111:
  752. case CPU_VR4121:
  753. case CPU_VR4122:
  754. case CPU_VR4181:
  755. case CPU_VR4181A:
  756. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  757. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  758. c->icache.ways = 1;
  759. c->icache.waybit = 0; /* doesn't matter */
  760. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  761. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  762. c->dcache.ways = 1;
  763. c->dcache.waybit = 0; /* does not matter */
  764. c->options |= MIPS_CPU_CACHE_CDEX_P;
  765. break;
  766. case CPU_RM7000:
  767. rm7k_erratum31();
  768. case CPU_RM9000:
  769. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  770. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  771. c->icache.ways = 4;
  772. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  773. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  774. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  775. c->dcache.ways = 4;
  776. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  777. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  778. c->options |= MIPS_CPU_CACHE_CDEX_P;
  779. #endif
  780. c->options |= MIPS_CPU_PREFETCH;
  781. break;
  782. default:
  783. if (!(config & MIPS_CONF_M))
  784. panic("Don't know how to probe P-caches on this cpu.");
  785. /*
  786. * So we seem to be a MIPS32 or MIPS64 CPU
  787. * So let's probe the I-cache ...
  788. */
  789. config1 = read_c0_config1();
  790. if ((lsize = ((config1 >> 19) & 7)))
  791. c->icache.linesz = 2 << lsize;
  792. else
  793. c->icache.linesz = lsize;
  794. c->icache.sets = 64 << ((config1 >> 22) & 7);
  795. c->icache.ways = 1 + ((config1 >> 16) & 7);
  796. icache_size = c->icache.sets *
  797. c->icache.ways *
  798. c->icache.linesz;
  799. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  800. if (config & 0x8) /* VI bit */
  801. c->icache.flags |= MIPS_CACHE_VTAG;
  802. /*
  803. * Now probe the MIPS32 / MIPS64 data cache.
  804. */
  805. c->dcache.flags = 0;
  806. if ((lsize = ((config1 >> 10) & 7)))
  807. c->dcache.linesz = 2 << lsize;
  808. else
  809. c->dcache.linesz= lsize;
  810. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  811. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  812. dcache_size = c->dcache.sets *
  813. c->dcache.ways *
  814. c->dcache.linesz;
  815. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  816. c->options |= MIPS_CPU_PREFETCH;
  817. break;
  818. }
  819. /*
  820. * Processor configuration sanity check for the R4000SC erratum
  821. * #5. With page sizes larger than 32kB there is no possibility
  822. * to get a VCE exception anymore so we don't care about this
  823. * misconfiguration. The case is rather theoretical anyway;
  824. * presumably no vendor is shipping his hardware in the "bad"
  825. * configuration.
  826. */
  827. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  828. !(config & CONF_SC) && c->icache.linesz != 16 &&
  829. PAGE_SIZE <= 0x8000)
  830. panic("Improper R4000SC processor configuration detected");
  831. /* compute a couple of other cache variables */
  832. c->icache.waysize = icache_size / c->icache.ways;
  833. c->dcache.waysize = dcache_size / c->dcache.ways;
  834. c->icache.sets = c->icache.linesz ?
  835. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  836. c->dcache.sets = c->dcache.linesz ?
  837. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  838. /*
  839. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  840. * 2-way virtually indexed so normally would suffer from aliases. So
  841. * normally they'd suffer from aliases but magic in the hardware deals
  842. * with that for us so we don't need to take care ourselves.
  843. */
  844. switch (c->cputype) {
  845. case CPU_20KC:
  846. case CPU_25KF:
  847. c->dcache.flags |= MIPS_CACHE_PINDEX;
  848. case CPU_R10000:
  849. case CPU_R12000:
  850. case CPU_R14000:
  851. case CPU_SB1:
  852. break;
  853. case CPU_24K:
  854. case CPU_34K:
  855. case CPU_74K:
  856. if ((read_c0_config7() & (1 << 16))) {
  857. /* effectively physically indexed dcache,
  858. thus no virtual aliases. */
  859. c->dcache.flags |= MIPS_CACHE_PINDEX;
  860. break;
  861. }
  862. default:
  863. if (c->dcache.waysize > PAGE_SIZE)
  864. c->dcache.flags |= MIPS_CACHE_ALIASES;
  865. }
  866. switch (c->cputype) {
  867. case CPU_20KC:
  868. /*
  869. * Some older 20Kc chips doesn't have the 'VI' bit in
  870. * the config register.
  871. */
  872. c->icache.flags |= MIPS_CACHE_VTAG;
  873. break;
  874. case CPU_AU1000:
  875. case CPU_AU1500:
  876. case CPU_AU1100:
  877. case CPU_AU1550:
  878. case CPU_AU1200:
  879. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  880. break;
  881. }
  882. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  883. icache_size >> 10,
  884. cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
  885. way_string[c->icache.ways], c->icache.linesz);
  886. printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
  887. dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
  888. }
  889. /*
  890. * If you even _breathe_ on this function, look at the gcc output and make sure
  891. * it does not pop things on and off the stack for the cache sizing loop that
  892. * executes in KSEG1 space or else you will crash and burn badly. You have
  893. * been warned.
  894. */
  895. static int __init probe_scache(void)
  896. {
  897. extern unsigned long stext;
  898. unsigned long flags, addr, begin, end, pow2;
  899. unsigned int config = read_c0_config();
  900. struct cpuinfo_mips *c = &current_cpu_data;
  901. int tmp;
  902. if (config & CONF_SC)
  903. return 0;
  904. begin = (unsigned long) &stext;
  905. begin &= ~((4 * 1024 * 1024) - 1);
  906. end = begin + (4 * 1024 * 1024);
  907. /*
  908. * This is such a bitch, you'd think they would make it easy to do
  909. * this. Away you daemons of stupidity!
  910. */
  911. local_irq_save(flags);
  912. /* Fill each size-multiple cache line with a valid tag. */
  913. pow2 = (64 * 1024);
  914. for (addr = begin; addr < end; addr = (begin + pow2)) {
  915. unsigned long *p = (unsigned long *) addr;
  916. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  917. pow2 <<= 1;
  918. }
  919. /* Load first line with zero (therefore invalid) tag. */
  920. write_c0_taglo(0);
  921. write_c0_taghi(0);
  922. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  923. cache_op(Index_Store_Tag_I, begin);
  924. cache_op(Index_Store_Tag_D, begin);
  925. cache_op(Index_Store_Tag_SD, begin);
  926. /* Now search for the wrap around point. */
  927. pow2 = (128 * 1024);
  928. tmp = 0;
  929. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  930. cache_op(Index_Load_Tag_SD, addr);
  931. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  932. if (!read_c0_taglo())
  933. break;
  934. pow2 <<= 1;
  935. }
  936. local_irq_restore(flags);
  937. addr -= begin;
  938. scache_size = addr;
  939. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  940. c->scache.ways = 1;
  941. c->dcache.waybit = 0; /* does not matter */
  942. return 1;
  943. }
  944. extern int r5k_sc_init(void);
  945. extern int rm7k_sc_init(void);
  946. extern int mips_sc_init(void);
  947. static void __init setup_scache(void)
  948. {
  949. struct cpuinfo_mips *c = &current_cpu_data;
  950. unsigned int config = read_c0_config();
  951. int sc_present = 0;
  952. /*
  953. * Do the probing thing on R4000SC and R4400SC processors. Other
  954. * processors don't have a S-cache that would be relevant to the
  955. * Linux memory managment.
  956. */
  957. switch (c->cputype) {
  958. case CPU_R4000SC:
  959. case CPU_R4000MC:
  960. case CPU_R4400SC:
  961. case CPU_R4400MC:
  962. sc_present = run_uncached(probe_scache);
  963. if (sc_present)
  964. c->options |= MIPS_CPU_CACHE_CDEX_S;
  965. break;
  966. case CPU_R10000:
  967. case CPU_R12000:
  968. case CPU_R14000:
  969. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  970. c->scache.linesz = 64 << ((config >> 13) & 1);
  971. c->scache.ways = 2;
  972. c->scache.waybit= 0;
  973. sc_present = 1;
  974. break;
  975. case CPU_R5000:
  976. case CPU_NEVADA:
  977. #ifdef CONFIG_R5000_CPU_SCACHE
  978. r5k_sc_init();
  979. #endif
  980. return;
  981. case CPU_RM7000:
  982. case CPU_RM9000:
  983. #ifdef CONFIG_RM7000_CPU_SCACHE
  984. rm7k_sc_init();
  985. #endif
  986. return;
  987. default:
  988. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  989. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  990. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  991. c->isa_level == MIPS_CPU_ISA_M64R2) {
  992. #ifdef CONFIG_MIPS_CPU_SCACHE
  993. if (mips_sc_init ()) {
  994. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  995. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  996. scache_size >> 10,
  997. way_string[c->scache.ways], c->scache.linesz);
  998. }
  999. #else
  1000. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1001. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1002. #endif
  1003. return;
  1004. }
  1005. sc_present = 0;
  1006. }
  1007. if (!sc_present)
  1008. return;
  1009. /* compute a couple of other cache variables */
  1010. c->scache.waysize = scache_size / c->scache.ways;
  1011. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1012. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1013. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1014. c->options |= MIPS_CPU_SUBSET_CACHES;
  1015. }
  1016. void au1x00_fixup_config_od(void)
  1017. {
  1018. /*
  1019. * c0_config.od (bit 19) was write only (and read as 0)
  1020. * on the early revisions of Alchemy SOCs. It disables the bus
  1021. * transaction overlapping and needs to be set to fix various errata.
  1022. */
  1023. switch (read_c0_prid()) {
  1024. case 0x00030100: /* Au1000 DA */
  1025. case 0x00030201: /* Au1000 HA */
  1026. case 0x00030202: /* Au1000 HB */
  1027. case 0x01030200: /* Au1500 AB */
  1028. /*
  1029. * Au1100 errata actually keeps silence about this bit, so we set it
  1030. * just in case for those revisions that require it to be set according
  1031. * to arch/mips/au1000/common/cputable.c
  1032. */
  1033. case 0x02030200: /* Au1100 AB */
  1034. case 0x02030201: /* Au1100 BA */
  1035. case 0x02030202: /* Au1100 BC */
  1036. set_c0_config(1 << 19);
  1037. break;
  1038. }
  1039. }
  1040. static inline void coherency_setup(void)
  1041. {
  1042. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  1043. /*
  1044. * c0_status.cu=0 specifies that updates by the sc instruction use
  1045. * the coherency mode specified by the TLB; 1 means cachable
  1046. * coherent update on write will be used. Not all processors have
  1047. * this bit and; some wire it to zero, others like Toshiba had the
  1048. * silly idea of putting something else there ...
  1049. */
  1050. switch (current_cpu_data.cputype) {
  1051. case CPU_R4000PC:
  1052. case CPU_R4000SC:
  1053. case CPU_R4000MC:
  1054. case CPU_R4400PC:
  1055. case CPU_R4400SC:
  1056. case CPU_R4400MC:
  1057. clear_c0_config(CONF_CU);
  1058. break;
  1059. /*
  1060. * We need to catch the ealry Alchemy SOCs with
  1061. * the write-only co_config.od bit and set it back to one...
  1062. */
  1063. case CPU_AU1000: /* rev. DA, HA, HB */
  1064. case CPU_AU1100: /* rev. AB, BA, BC ?? */
  1065. case CPU_AU1500: /* rev. AB */
  1066. au1x00_fixup_config_od();
  1067. break;
  1068. }
  1069. }
  1070. void __init r4k_cache_init(void)
  1071. {
  1072. extern void build_clear_page(void);
  1073. extern void build_copy_page(void);
  1074. extern char except_vec2_generic;
  1075. struct cpuinfo_mips *c = &current_cpu_data;
  1076. /* Default cache error handler for R4000 and R5000 family */
  1077. set_uncached_handler (0x100, &except_vec2_generic, 0x80);
  1078. probe_pcache();
  1079. setup_scache();
  1080. r4k_blast_dcache_page_setup();
  1081. r4k_blast_dcache_page_indexed_setup();
  1082. r4k_blast_dcache_setup();
  1083. r4k_blast_icache_page_setup();
  1084. r4k_blast_icache_page_indexed_setup();
  1085. r4k_blast_icache_setup();
  1086. r4k_blast_scache_page_setup();
  1087. r4k_blast_scache_page_indexed_setup();
  1088. r4k_blast_scache_setup();
  1089. /*
  1090. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1091. * This code supports virtually indexed processors and will be
  1092. * unnecessarily inefficient on physically indexed processors.
  1093. */
  1094. if (c->dcache.linesz)
  1095. shm_align_mask = max_t( unsigned long,
  1096. c->dcache.sets * c->dcache.linesz - 1,
  1097. PAGE_SIZE - 1);
  1098. else
  1099. shm_align_mask = PAGE_SIZE-1;
  1100. flush_cache_all = r4k_flush_cache_all;
  1101. __flush_cache_all = r4k___flush_cache_all;
  1102. flush_cache_mm = r4k_flush_cache_mm;
  1103. flush_cache_page = r4k_flush_cache_page;
  1104. flush_icache_page = r4k_flush_icache_page;
  1105. flush_cache_range = r4k_flush_cache_range;
  1106. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1107. flush_icache_all = r4k_flush_icache_all;
  1108. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1109. flush_data_cache_page = r4k_flush_data_cache_page;
  1110. flush_icache_range = r4k_flush_icache_range;
  1111. #ifdef CONFIG_DMA_NONCOHERENT
  1112. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1113. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1114. _dma_cache_inv = r4k_dma_cache_inv;
  1115. #endif
  1116. build_clear_page();
  1117. build_copy_page();
  1118. local_r4k___flush_cache_all(NULL);
  1119. coherency_setup();
  1120. }