c-r4k.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/hardirq.h>
  11. #include <linux/init.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/mm.h>
  18. #include <linux/module.h>
  19. #include <linux/bitops.h>
  20. #include <asm/bcache.h>
  21. #include <asm/bootinfo.h>
  22. #include <asm/cache.h>
  23. #include <asm/cacheops.h>
  24. #include <asm/cpu.h>
  25. #include <asm/cpu-features.h>
  26. #include <asm/io.h>
  27. #include <asm/page.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/r4kcache.h>
  30. #include <asm/sections.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/war.h>
  33. #include <asm/cacheflush.h> /* for run_uncached() */
  34. #include <asm/traps.h>
  35. #include <asm/dma-coherence.h>
  36. /*
  37. * Special Variant of smp_call_function for use by cache functions:
  38. *
  39. * o No return value
  40. * o collapses to normal function call on UP kernels
  41. * o collapses to normal function call on systems with a single shared
  42. * primary cache.
  43. * o doesn't disable interrupts on the local CPU
  44. */
  45. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
  46. {
  47. preempt_disable();
  48. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  49. smp_call_function(func, info, 1);
  50. #endif
  51. func(info);
  52. preempt_enable();
  53. }
  54. #if defined(CONFIG_MIPS_CMP)
  55. #define cpu_has_safe_index_cacheops 0
  56. #else
  57. #define cpu_has_safe_index_cacheops 1
  58. #endif
  59. /*
  60. * Must die.
  61. */
  62. static unsigned long icache_size __read_mostly;
  63. static unsigned long dcache_size __read_mostly;
  64. static unsigned long scache_size __read_mostly;
  65. /*
  66. * Dummy cache handling routines for machines without boardcaches
  67. */
  68. static void cache_noop(void) {}
  69. static struct bcache_ops no_sc_ops = {
  70. .bc_enable = (void *)cache_noop,
  71. .bc_disable = (void *)cache_noop,
  72. .bc_wback_inv = (void *)cache_noop,
  73. .bc_inv = (void *)cache_noop
  74. };
  75. struct bcache_ops *bcops = &no_sc_ops;
  76. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  77. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  78. #define R4600_HIT_CACHEOP_WAR_IMPL \
  79. do { \
  80. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  81. *(volatile unsigned long *)CKSEG1; \
  82. if (R4600_V1_HIT_CACHEOP_WAR) \
  83. __asm__ __volatile__("nop;nop;nop;nop"); \
  84. } while (0)
  85. static void (*r4k_blast_dcache_page)(unsigned long addr);
  86. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  87. {
  88. R4600_HIT_CACHEOP_WAR_IMPL;
  89. blast_dcache32_page(addr);
  90. }
  91. static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
  92. {
  93. R4600_HIT_CACHEOP_WAR_IMPL;
  94. blast_dcache64_page(addr);
  95. }
  96. static void r4k_blast_dcache_page_setup(void)
  97. {
  98. unsigned long dc_lsize = cpu_dcache_line_size();
  99. if (dc_lsize == 0)
  100. r4k_blast_dcache_page = (void *)cache_noop;
  101. else if (dc_lsize == 16)
  102. r4k_blast_dcache_page = blast_dcache16_page;
  103. else if (dc_lsize == 32)
  104. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  105. else if (dc_lsize == 64)
  106. r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
  107. }
  108. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  109. static void r4k_blast_dcache_page_indexed_setup(void)
  110. {
  111. unsigned long dc_lsize = cpu_dcache_line_size();
  112. if (dc_lsize == 0)
  113. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  114. else if (dc_lsize == 16)
  115. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  116. else if (dc_lsize == 32)
  117. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  118. else if (dc_lsize == 64)
  119. r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
  120. }
  121. void (* r4k_blast_dcache)(void);
  122. EXPORT_SYMBOL(r4k_blast_dcache);
  123. static void r4k_blast_dcache_setup(void)
  124. {
  125. unsigned long dc_lsize = cpu_dcache_line_size();
  126. if (dc_lsize == 0)
  127. r4k_blast_dcache = (void *)cache_noop;
  128. else if (dc_lsize == 16)
  129. r4k_blast_dcache = blast_dcache16;
  130. else if (dc_lsize == 32)
  131. r4k_blast_dcache = blast_dcache32;
  132. else if (dc_lsize == 64)
  133. r4k_blast_dcache = blast_dcache64;
  134. }
  135. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  136. #define JUMP_TO_ALIGN(order) \
  137. __asm__ __volatile__( \
  138. "b\t1f\n\t" \
  139. ".align\t" #order "\n\t" \
  140. "1:\n\t" \
  141. )
  142. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  143. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  144. static inline void blast_r4600_v1_icache32(void)
  145. {
  146. unsigned long flags;
  147. local_irq_save(flags);
  148. blast_icache32();
  149. local_irq_restore(flags);
  150. }
  151. static inline void tx49_blast_icache32(void)
  152. {
  153. unsigned long start = INDEX_BASE;
  154. unsigned long end = start + current_cpu_data.icache.waysize;
  155. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  156. unsigned long ws_end = current_cpu_data.icache.ways <<
  157. current_cpu_data.icache.waybit;
  158. unsigned long ws, addr;
  159. CACHE32_UNROLL32_ALIGN2;
  160. /* I'm in even chunk. blast odd chunks */
  161. for (ws = 0; ws < ws_end; ws += ws_inc)
  162. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  163. cache32_unroll32(addr|ws, Index_Invalidate_I);
  164. CACHE32_UNROLL32_ALIGN;
  165. /* I'm in odd chunk. blast even chunks */
  166. for (ws = 0; ws < ws_end; ws += ws_inc)
  167. for (addr = start; addr < end; addr += 0x400 * 2)
  168. cache32_unroll32(addr|ws, Index_Invalidate_I);
  169. }
  170. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  171. {
  172. unsigned long flags;
  173. local_irq_save(flags);
  174. blast_icache32_page_indexed(page);
  175. local_irq_restore(flags);
  176. }
  177. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  178. {
  179. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  180. unsigned long start = INDEX_BASE + (page & indexmask);
  181. unsigned long end = start + PAGE_SIZE;
  182. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  183. unsigned long ws_end = current_cpu_data.icache.ways <<
  184. current_cpu_data.icache.waybit;
  185. unsigned long ws, addr;
  186. CACHE32_UNROLL32_ALIGN2;
  187. /* I'm in even chunk. blast odd chunks */
  188. for (ws = 0; ws < ws_end; ws += ws_inc)
  189. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  190. cache32_unroll32(addr|ws, Index_Invalidate_I);
  191. CACHE32_UNROLL32_ALIGN;
  192. /* I'm in odd chunk. blast even chunks */
  193. for (ws = 0; ws < ws_end; ws += ws_inc)
  194. for (addr = start; addr < end; addr += 0x400 * 2)
  195. cache32_unroll32(addr|ws, Index_Invalidate_I);
  196. }
  197. static void (* r4k_blast_icache_page)(unsigned long addr);
  198. static void r4k_blast_icache_page_setup(void)
  199. {
  200. unsigned long ic_lsize = cpu_icache_line_size();
  201. if (ic_lsize == 0)
  202. r4k_blast_icache_page = (void *)cache_noop;
  203. else if (ic_lsize == 16)
  204. r4k_blast_icache_page = blast_icache16_page;
  205. else if (ic_lsize == 32)
  206. r4k_blast_icache_page = blast_icache32_page;
  207. else if (ic_lsize == 64)
  208. r4k_blast_icache_page = blast_icache64_page;
  209. }
  210. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  211. static void r4k_blast_icache_page_indexed_setup(void)
  212. {
  213. unsigned long ic_lsize = cpu_icache_line_size();
  214. if (ic_lsize == 0)
  215. r4k_blast_icache_page_indexed = (void *)cache_noop;
  216. else if (ic_lsize == 16)
  217. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  218. else if (ic_lsize == 32) {
  219. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  220. r4k_blast_icache_page_indexed =
  221. blast_icache32_r4600_v1_page_indexed;
  222. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  223. r4k_blast_icache_page_indexed =
  224. tx49_blast_icache32_page_indexed;
  225. else
  226. r4k_blast_icache_page_indexed =
  227. blast_icache32_page_indexed;
  228. } else if (ic_lsize == 64)
  229. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  230. }
  231. void (* r4k_blast_icache)(void);
  232. EXPORT_SYMBOL(r4k_blast_icache);
  233. static void r4k_blast_icache_setup(void)
  234. {
  235. unsigned long ic_lsize = cpu_icache_line_size();
  236. if (ic_lsize == 0)
  237. r4k_blast_icache = (void *)cache_noop;
  238. else if (ic_lsize == 16)
  239. r4k_blast_icache = blast_icache16;
  240. else if (ic_lsize == 32) {
  241. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  242. r4k_blast_icache = blast_r4600_v1_icache32;
  243. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  244. r4k_blast_icache = tx49_blast_icache32;
  245. else
  246. r4k_blast_icache = blast_icache32;
  247. } else if (ic_lsize == 64)
  248. r4k_blast_icache = blast_icache64;
  249. }
  250. static void (* r4k_blast_scache_page)(unsigned long addr);
  251. static void r4k_blast_scache_page_setup(void)
  252. {
  253. unsigned long sc_lsize = cpu_scache_line_size();
  254. if (scache_size == 0)
  255. r4k_blast_scache_page = (void *)cache_noop;
  256. else if (sc_lsize == 16)
  257. r4k_blast_scache_page = blast_scache16_page;
  258. else if (sc_lsize == 32)
  259. r4k_blast_scache_page = blast_scache32_page;
  260. else if (sc_lsize == 64)
  261. r4k_blast_scache_page = blast_scache64_page;
  262. else if (sc_lsize == 128)
  263. r4k_blast_scache_page = blast_scache128_page;
  264. }
  265. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  266. static void r4k_blast_scache_page_indexed_setup(void)
  267. {
  268. unsigned long sc_lsize = cpu_scache_line_size();
  269. if (scache_size == 0)
  270. r4k_blast_scache_page_indexed = (void *)cache_noop;
  271. else if (sc_lsize == 16)
  272. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  273. else if (sc_lsize == 32)
  274. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  275. else if (sc_lsize == 64)
  276. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  277. else if (sc_lsize == 128)
  278. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  279. }
  280. static void (* r4k_blast_scache)(void);
  281. static void r4k_blast_scache_setup(void)
  282. {
  283. unsigned long sc_lsize = cpu_scache_line_size();
  284. if (scache_size == 0)
  285. r4k_blast_scache = (void *)cache_noop;
  286. else if (sc_lsize == 16)
  287. r4k_blast_scache = blast_scache16;
  288. else if (sc_lsize == 32)
  289. r4k_blast_scache = blast_scache32;
  290. else if (sc_lsize == 64)
  291. r4k_blast_scache = blast_scache64;
  292. else if (sc_lsize == 128)
  293. r4k_blast_scache = blast_scache128;
  294. }
  295. static inline void local_r4k___flush_cache_all(void * args)
  296. {
  297. #if defined(CONFIG_CPU_LOONGSON2)
  298. r4k_blast_scache();
  299. return;
  300. #endif
  301. r4k_blast_dcache();
  302. r4k_blast_icache();
  303. switch (current_cpu_type()) {
  304. case CPU_R4000SC:
  305. case CPU_R4000MC:
  306. case CPU_R4400SC:
  307. case CPU_R4400MC:
  308. case CPU_R10000:
  309. case CPU_R12000:
  310. case CPU_R14000:
  311. r4k_blast_scache();
  312. }
  313. }
  314. static void r4k___flush_cache_all(void)
  315. {
  316. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
  317. }
  318. static inline int has_valid_asid(const struct mm_struct *mm)
  319. {
  320. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  321. int i;
  322. for_each_online_cpu(i)
  323. if (cpu_context(i, mm))
  324. return 1;
  325. return 0;
  326. #else
  327. return cpu_context(smp_processor_id(), mm);
  328. #endif
  329. }
  330. static void r4k__flush_cache_vmap(void)
  331. {
  332. r4k_blast_dcache();
  333. }
  334. static void r4k__flush_cache_vunmap(void)
  335. {
  336. r4k_blast_dcache();
  337. }
  338. static inline void local_r4k_flush_cache_range(void * args)
  339. {
  340. struct vm_area_struct *vma = args;
  341. int exec = vma->vm_flags & VM_EXEC;
  342. if (!(has_valid_asid(vma->vm_mm)))
  343. return;
  344. r4k_blast_dcache();
  345. if (exec)
  346. r4k_blast_icache();
  347. }
  348. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  349. unsigned long start, unsigned long end)
  350. {
  351. int exec = vma->vm_flags & VM_EXEC;
  352. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  353. r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
  354. }
  355. static inline void local_r4k_flush_cache_mm(void * args)
  356. {
  357. struct mm_struct *mm = args;
  358. if (!has_valid_asid(mm))
  359. return;
  360. /*
  361. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  362. * only flush the primary caches but R10000 and R12000 behave sane ...
  363. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  364. * caches, so we can bail out early.
  365. */
  366. if (current_cpu_type() == CPU_R4000SC ||
  367. current_cpu_type() == CPU_R4000MC ||
  368. current_cpu_type() == CPU_R4400SC ||
  369. current_cpu_type() == CPU_R4400MC) {
  370. r4k_blast_scache();
  371. return;
  372. }
  373. r4k_blast_dcache();
  374. }
  375. static void r4k_flush_cache_mm(struct mm_struct *mm)
  376. {
  377. if (!cpu_has_dc_aliases)
  378. return;
  379. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
  380. }
  381. struct flush_cache_page_args {
  382. struct vm_area_struct *vma;
  383. unsigned long addr;
  384. unsigned long pfn;
  385. };
  386. static inline void local_r4k_flush_cache_page(void *args)
  387. {
  388. struct flush_cache_page_args *fcp_args = args;
  389. struct vm_area_struct *vma = fcp_args->vma;
  390. unsigned long addr = fcp_args->addr;
  391. struct page *page = pfn_to_page(fcp_args->pfn);
  392. int exec = vma->vm_flags & VM_EXEC;
  393. struct mm_struct *mm = vma->vm_mm;
  394. int map_coherent = 0;
  395. pgd_t *pgdp;
  396. pud_t *pudp;
  397. pmd_t *pmdp;
  398. pte_t *ptep;
  399. void *vaddr;
  400. /*
  401. * If ownes no valid ASID yet, cannot possibly have gotten
  402. * this page into the cache.
  403. */
  404. if (!has_valid_asid(mm))
  405. return;
  406. addr &= PAGE_MASK;
  407. pgdp = pgd_offset(mm, addr);
  408. pudp = pud_offset(pgdp, addr);
  409. pmdp = pmd_offset(pudp, addr);
  410. ptep = pte_offset(pmdp, addr);
  411. /*
  412. * If the page isn't marked valid, the page cannot possibly be
  413. * in the cache.
  414. */
  415. if (!(pte_present(*ptep)))
  416. return;
  417. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  418. vaddr = NULL;
  419. else {
  420. /*
  421. * Use kmap_coherent or kmap_atomic to do flushes for
  422. * another ASID than the current one.
  423. */
  424. map_coherent = (cpu_has_dc_aliases &&
  425. page_mapped(page) && !Page_dcache_dirty(page));
  426. if (map_coherent)
  427. vaddr = kmap_coherent(page, addr);
  428. else
  429. vaddr = kmap_atomic(page);
  430. addr = (unsigned long)vaddr;
  431. }
  432. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  433. r4k_blast_dcache_page(addr);
  434. if (exec && !cpu_icache_snoops_remote_store)
  435. r4k_blast_scache_page(addr);
  436. }
  437. if (exec) {
  438. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  439. int cpu = smp_processor_id();
  440. if (cpu_context(cpu, mm) != 0)
  441. drop_mmu_context(mm, cpu);
  442. } else
  443. r4k_blast_icache_page(addr);
  444. }
  445. if (vaddr) {
  446. if (map_coherent)
  447. kunmap_coherent();
  448. else
  449. kunmap_atomic(vaddr);
  450. }
  451. }
  452. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  453. unsigned long addr, unsigned long pfn)
  454. {
  455. struct flush_cache_page_args args;
  456. args.vma = vma;
  457. args.addr = addr;
  458. args.pfn = pfn;
  459. r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
  460. }
  461. static inline void local_r4k_flush_data_cache_page(void * addr)
  462. {
  463. r4k_blast_dcache_page((unsigned long) addr);
  464. }
  465. static void r4k_flush_data_cache_page(unsigned long addr)
  466. {
  467. if (in_atomic())
  468. local_r4k_flush_data_cache_page((void *)addr);
  469. else
  470. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
  471. }
  472. struct flush_icache_range_args {
  473. unsigned long start;
  474. unsigned long end;
  475. };
  476. static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  477. {
  478. if (!cpu_has_ic_fills_f_dc) {
  479. if (end - start >= dcache_size) {
  480. r4k_blast_dcache();
  481. } else {
  482. R4600_HIT_CACHEOP_WAR_IMPL;
  483. protected_blast_dcache_range(start, end);
  484. }
  485. }
  486. if (end - start > icache_size)
  487. r4k_blast_icache();
  488. else
  489. protected_blast_icache_range(start, end);
  490. }
  491. static inline void local_r4k_flush_icache_range_ipi(void *args)
  492. {
  493. struct flush_icache_range_args *fir_args = args;
  494. unsigned long start = fir_args->start;
  495. unsigned long end = fir_args->end;
  496. local_r4k_flush_icache_range(start, end);
  497. }
  498. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  499. {
  500. struct flush_icache_range_args args;
  501. args.start = start;
  502. args.end = end;
  503. r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
  504. instruction_hazard();
  505. }
  506. #ifdef CONFIG_DMA_NONCOHERENT
  507. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  508. {
  509. /* Catch bad driver code */
  510. BUG_ON(size == 0);
  511. if (cpu_has_inclusive_pcaches) {
  512. if (size >= scache_size)
  513. r4k_blast_scache();
  514. else
  515. blast_scache_range(addr, addr + size);
  516. __sync();
  517. return;
  518. }
  519. /*
  520. * Either no secondary cache or the available caches don't have the
  521. * subset property so we have to flush the primary caches
  522. * explicitly
  523. */
  524. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  525. r4k_blast_dcache();
  526. } else {
  527. R4600_HIT_CACHEOP_WAR_IMPL;
  528. blast_dcache_range(addr, addr + size);
  529. }
  530. bc_wback_inv(addr, size);
  531. __sync();
  532. }
  533. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  534. {
  535. /* Catch bad driver code */
  536. BUG_ON(size == 0);
  537. if (cpu_has_inclusive_pcaches) {
  538. if (size >= scache_size)
  539. r4k_blast_scache();
  540. else {
  541. /*
  542. * There is no clearly documented alignment requirement
  543. * for the cache instruction on MIPS processors and
  544. * some processors, among them the RM5200 and RM7000
  545. * QED processors will throw an address error for cache
  546. * hit ops with insufficient alignment. Solved by
  547. * aligning the address to cache line size.
  548. */
  549. blast_inv_scache_range(addr, addr + size);
  550. }
  551. __sync();
  552. return;
  553. }
  554. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  555. r4k_blast_dcache();
  556. } else {
  557. R4600_HIT_CACHEOP_WAR_IMPL;
  558. blast_inv_dcache_range(addr, addr + size);
  559. }
  560. bc_inv(addr, size);
  561. __sync();
  562. }
  563. #endif /* CONFIG_DMA_NONCOHERENT */
  564. /*
  565. * While we're protected against bad userland addresses we don't care
  566. * very much about what happens in that case. Usually a segmentation
  567. * fault will dump the process later on anyway ...
  568. */
  569. static void local_r4k_flush_cache_sigtramp(void * arg)
  570. {
  571. unsigned long ic_lsize = cpu_icache_line_size();
  572. unsigned long dc_lsize = cpu_dcache_line_size();
  573. unsigned long sc_lsize = cpu_scache_line_size();
  574. unsigned long addr = (unsigned long) arg;
  575. R4600_HIT_CACHEOP_WAR_IMPL;
  576. if (dc_lsize)
  577. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  578. if (!cpu_icache_snoops_remote_store && scache_size)
  579. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  580. if (ic_lsize)
  581. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  582. if (MIPS4K_ICACHE_REFILL_WAR) {
  583. __asm__ __volatile__ (
  584. ".set push\n\t"
  585. ".set noat\n\t"
  586. ".set mips3\n\t"
  587. #ifdef CONFIG_32BIT
  588. "la $at,1f\n\t"
  589. #endif
  590. #ifdef CONFIG_64BIT
  591. "dla $at,1f\n\t"
  592. #endif
  593. "cache %0,($at)\n\t"
  594. "nop; nop; nop\n"
  595. "1:\n\t"
  596. ".set pop"
  597. :
  598. : "i" (Hit_Invalidate_I));
  599. }
  600. if (MIPS_CACHE_SYNC_WAR)
  601. __asm__ __volatile__ ("sync");
  602. }
  603. static void r4k_flush_cache_sigtramp(unsigned long addr)
  604. {
  605. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
  606. }
  607. static void r4k_flush_icache_all(void)
  608. {
  609. if (cpu_has_vtag_icache)
  610. r4k_blast_icache();
  611. }
  612. struct flush_kernel_vmap_range_args {
  613. unsigned long vaddr;
  614. int size;
  615. };
  616. static inline void local_r4k_flush_kernel_vmap_range(void *args)
  617. {
  618. struct flush_kernel_vmap_range_args *vmra = args;
  619. unsigned long vaddr = vmra->vaddr;
  620. int size = vmra->size;
  621. /*
  622. * Aliases only affect the primary caches so don't bother with
  623. * S-caches or T-caches.
  624. */
  625. if (cpu_has_safe_index_cacheops && size >= dcache_size)
  626. r4k_blast_dcache();
  627. else {
  628. R4600_HIT_CACHEOP_WAR_IMPL;
  629. blast_dcache_range(vaddr, vaddr + size);
  630. }
  631. }
  632. static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
  633. {
  634. struct flush_kernel_vmap_range_args args;
  635. args.vaddr = (unsigned long) vaddr;
  636. args.size = size;
  637. r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
  638. }
  639. static inline void rm7k_erratum31(void)
  640. {
  641. const unsigned long ic_lsize = 32;
  642. unsigned long addr;
  643. /* RM7000 erratum #31. The icache is screwed at startup. */
  644. write_c0_taglo(0);
  645. write_c0_taghi(0);
  646. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  647. __asm__ __volatile__ (
  648. ".set push\n\t"
  649. ".set noreorder\n\t"
  650. ".set mips3\n\t"
  651. "cache\t%1, 0(%0)\n\t"
  652. "cache\t%1, 0x1000(%0)\n\t"
  653. "cache\t%1, 0x2000(%0)\n\t"
  654. "cache\t%1, 0x3000(%0)\n\t"
  655. "cache\t%2, 0(%0)\n\t"
  656. "cache\t%2, 0x1000(%0)\n\t"
  657. "cache\t%2, 0x2000(%0)\n\t"
  658. "cache\t%2, 0x3000(%0)\n\t"
  659. "cache\t%1, 0(%0)\n\t"
  660. "cache\t%1, 0x1000(%0)\n\t"
  661. "cache\t%1, 0x2000(%0)\n\t"
  662. "cache\t%1, 0x3000(%0)\n\t"
  663. ".set pop\n"
  664. :
  665. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  666. }
  667. }
  668. static inline void alias_74k_erratum(struct cpuinfo_mips *c)
  669. {
  670. /*
  671. * Early versions of the 74K do not update the cache tags on a
  672. * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
  673. * aliases. In this case it is better to treat the cache as always
  674. * having aliases.
  675. */
  676. if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0))
  677. c->dcache.flags |= MIPS_CACHE_VTAG;
  678. if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0))
  679. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  680. if (((c->processor_id & 0xff00) == PRID_IMP_1074K) &&
  681. ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) {
  682. c->dcache.flags |= MIPS_CACHE_VTAG;
  683. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  684. }
  685. }
  686. static char *way_string[] = { NULL, "direct mapped", "2-way",
  687. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  688. };
  689. static void probe_pcache(void)
  690. {
  691. struct cpuinfo_mips *c = &current_cpu_data;
  692. unsigned int config = read_c0_config();
  693. unsigned int prid = read_c0_prid();
  694. unsigned long config1;
  695. unsigned int lsize;
  696. switch (c->cputype) {
  697. case CPU_R4600: /* QED style two way caches? */
  698. case CPU_R4700:
  699. case CPU_R5000:
  700. case CPU_NEVADA:
  701. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  702. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  703. c->icache.ways = 2;
  704. c->icache.waybit = __ffs(icache_size/2);
  705. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  706. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  707. c->dcache.ways = 2;
  708. c->dcache.waybit= __ffs(dcache_size/2);
  709. c->options |= MIPS_CPU_CACHE_CDEX_P;
  710. break;
  711. case CPU_R5432:
  712. case CPU_R5500:
  713. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  714. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  715. c->icache.ways = 2;
  716. c->icache.waybit= 0;
  717. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  718. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  719. c->dcache.ways = 2;
  720. c->dcache.waybit = 0;
  721. c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
  722. break;
  723. case CPU_TX49XX:
  724. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  725. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  726. c->icache.ways = 4;
  727. c->icache.waybit= 0;
  728. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  729. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  730. c->dcache.ways = 4;
  731. c->dcache.waybit = 0;
  732. c->options |= MIPS_CPU_CACHE_CDEX_P;
  733. c->options |= MIPS_CPU_PREFETCH;
  734. break;
  735. case CPU_R4000PC:
  736. case CPU_R4000SC:
  737. case CPU_R4000MC:
  738. case CPU_R4400PC:
  739. case CPU_R4400SC:
  740. case CPU_R4400MC:
  741. case CPU_R4300:
  742. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  743. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  744. c->icache.ways = 1;
  745. c->icache.waybit = 0; /* doesn't matter */
  746. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  747. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  748. c->dcache.ways = 1;
  749. c->dcache.waybit = 0; /* does not matter */
  750. c->options |= MIPS_CPU_CACHE_CDEX_P;
  751. break;
  752. case CPU_R10000:
  753. case CPU_R12000:
  754. case CPU_R14000:
  755. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  756. c->icache.linesz = 64;
  757. c->icache.ways = 2;
  758. c->icache.waybit = 0;
  759. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  760. c->dcache.linesz = 32;
  761. c->dcache.ways = 2;
  762. c->dcache.waybit = 0;
  763. c->options |= MIPS_CPU_PREFETCH;
  764. break;
  765. case CPU_VR4133:
  766. write_c0_config(config & ~VR41_CONF_P4K);
  767. case CPU_VR4131:
  768. /* Workaround for cache instruction bug of VR4131 */
  769. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  770. c->processor_id == 0x0c82U) {
  771. config |= 0x00400000U;
  772. if (c->processor_id == 0x0c80U)
  773. config |= VR41_CONF_BP;
  774. write_c0_config(config);
  775. } else
  776. c->options |= MIPS_CPU_CACHE_CDEX_P;
  777. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  778. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  779. c->icache.ways = 2;
  780. c->icache.waybit = __ffs(icache_size/2);
  781. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  782. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  783. c->dcache.ways = 2;
  784. c->dcache.waybit = __ffs(dcache_size/2);
  785. break;
  786. case CPU_VR41XX:
  787. case CPU_VR4111:
  788. case CPU_VR4121:
  789. case CPU_VR4122:
  790. case CPU_VR4181:
  791. case CPU_VR4181A:
  792. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  793. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  794. c->icache.ways = 1;
  795. c->icache.waybit = 0; /* doesn't matter */
  796. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  797. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  798. c->dcache.ways = 1;
  799. c->dcache.waybit = 0; /* does not matter */
  800. c->options |= MIPS_CPU_CACHE_CDEX_P;
  801. break;
  802. case CPU_RM7000:
  803. rm7k_erratum31();
  804. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  805. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  806. c->icache.ways = 4;
  807. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  808. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  809. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  810. c->dcache.ways = 4;
  811. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  812. c->options |= MIPS_CPU_CACHE_CDEX_P;
  813. c->options |= MIPS_CPU_PREFETCH;
  814. break;
  815. case CPU_LOONGSON2:
  816. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  817. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  818. if (prid & 0x3)
  819. c->icache.ways = 4;
  820. else
  821. c->icache.ways = 2;
  822. c->icache.waybit = 0;
  823. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  824. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  825. if (prid & 0x3)
  826. c->dcache.ways = 4;
  827. else
  828. c->dcache.ways = 2;
  829. c->dcache.waybit = 0;
  830. break;
  831. default:
  832. if (!(config & MIPS_CONF_M))
  833. panic("Don't know how to probe P-caches on this cpu.");
  834. /*
  835. * So we seem to be a MIPS32 or MIPS64 CPU
  836. * So let's probe the I-cache ...
  837. */
  838. config1 = read_c0_config1();
  839. if ((lsize = ((config1 >> 19) & 7)))
  840. c->icache.linesz = 2 << lsize;
  841. else
  842. c->icache.linesz = lsize;
  843. c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
  844. c->icache.ways = 1 + ((config1 >> 16) & 7);
  845. icache_size = c->icache.sets *
  846. c->icache.ways *
  847. c->icache.linesz;
  848. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  849. if (config & 0x8) /* VI bit */
  850. c->icache.flags |= MIPS_CACHE_VTAG;
  851. /*
  852. * Now probe the MIPS32 / MIPS64 data cache.
  853. */
  854. c->dcache.flags = 0;
  855. if ((lsize = ((config1 >> 10) & 7)))
  856. c->dcache.linesz = 2 << lsize;
  857. else
  858. c->dcache.linesz= lsize;
  859. c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
  860. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  861. dcache_size = c->dcache.sets *
  862. c->dcache.ways *
  863. c->dcache.linesz;
  864. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  865. c->options |= MIPS_CPU_PREFETCH;
  866. break;
  867. }
  868. /*
  869. * Processor configuration sanity check for the R4000SC erratum
  870. * #5. With page sizes larger than 32kB there is no possibility
  871. * to get a VCE exception anymore so we don't care about this
  872. * misconfiguration. The case is rather theoretical anyway;
  873. * presumably no vendor is shipping his hardware in the "bad"
  874. * configuration.
  875. */
  876. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  877. !(config & CONF_SC) && c->icache.linesz != 16 &&
  878. PAGE_SIZE <= 0x8000)
  879. panic("Improper R4000SC processor configuration detected");
  880. /* compute a couple of other cache variables */
  881. c->icache.waysize = icache_size / c->icache.ways;
  882. c->dcache.waysize = dcache_size / c->dcache.ways;
  883. c->icache.sets = c->icache.linesz ?
  884. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  885. c->dcache.sets = c->dcache.linesz ?
  886. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  887. /*
  888. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  889. * 2-way virtually indexed so normally would suffer from aliases. So
  890. * normally they'd suffer from aliases but magic in the hardware deals
  891. * with that for us so we don't need to take care ourselves.
  892. */
  893. switch (c->cputype) {
  894. case CPU_20KC:
  895. case CPU_25KF:
  896. case CPU_SB1:
  897. case CPU_SB1A:
  898. case CPU_XLR:
  899. c->dcache.flags |= MIPS_CACHE_PINDEX;
  900. break;
  901. case CPU_R10000:
  902. case CPU_R12000:
  903. case CPU_R14000:
  904. break;
  905. case CPU_M14KC:
  906. case CPU_M14KEC:
  907. case CPU_24K:
  908. case CPU_34K:
  909. case CPU_74K:
  910. case CPU_1004K:
  911. if (c->cputype == CPU_74K)
  912. alias_74k_erratum(c);
  913. if ((read_c0_config7() & (1 << 16))) {
  914. /* effectively physically indexed dcache,
  915. thus no virtual aliases. */
  916. c->dcache.flags |= MIPS_CACHE_PINDEX;
  917. break;
  918. }
  919. default:
  920. if (c->dcache.waysize > PAGE_SIZE)
  921. c->dcache.flags |= MIPS_CACHE_ALIASES;
  922. }
  923. switch (c->cputype) {
  924. case CPU_20KC:
  925. /*
  926. * Some older 20Kc chips doesn't have the 'VI' bit in
  927. * the config register.
  928. */
  929. c->icache.flags |= MIPS_CACHE_VTAG;
  930. break;
  931. case CPU_ALCHEMY:
  932. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  933. break;
  934. }
  935. #ifdef CONFIG_CPU_LOONGSON2
  936. /*
  937. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  938. * one op will act on all 4 ways
  939. */
  940. c->icache.ways = 1;
  941. #endif
  942. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  943. icache_size >> 10,
  944. c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
  945. way_string[c->icache.ways], c->icache.linesz);
  946. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  947. dcache_size >> 10, way_string[c->dcache.ways],
  948. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  949. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  950. "cache aliases" : "no aliases",
  951. c->dcache.linesz);
  952. }
  953. /*
  954. * If you even _breathe_ on this function, look at the gcc output and make sure
  955. * it does not pop things on and off the stack for the cache sizing loop that
  956. * executes in KSEG1 space or else you will crash and burn badly. You have
  957. * been warned.
  958. */
  959. static int probe_scache(void)
  960. {
  961. unsigned long flags, addr, begin, end, pow2;
  962. unsigned int config = read_c0_config();
  963. struct cpuinfo_mips *c = &current_cpu_data;
  964. if (config & CONF_SC)
  965. return 0;
  966. begin = (unsigned long) &_stext;
  967. begin &= ~((4 * 1024 * 1024) - 1);
  968. end = begin + (4 * 1024 * 1024);
  969. /*
  970. * This is such a bitch, you'd think they would make it easy to do
  971. * this. Away you daemons of stupidity!
  972. */
  973. local_irq_save(flags);
  974. /* Fill each size-multiple cache line with a valid tag. */
  975. pow2 = (64 * 1024);
  976. for (addr = begin; addr < end; addr = (begin + pow2)) {
  977. unsigned long *p = (unsigned long *) addr;
  978. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  979. pow2 <<= 1;
  980. }
  981. /* Load first line with zero (therefore invalid) tag. */
  982. write_c0_taglo(0);
  983. write_c0_taghi(0);
  984. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  985. cache_op(Index_Store_Tag_I, begin);
  986. cache_op(Index_Store_Tag_D, begin);
  987. cache_op(Index_Store_Tag_SD, begin);
  988. /* Now search for the wrap around point. */
  989. pow2 = (128 * 1024);
  990. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  991. cache_op(Index_Load_Tag_SD, addr);
  992. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  993. if (!read_c0_taglo())
  994. break;
  995. pow2 <<= 1;
  996. }
  997. local_irq_restore(flags);
  998. addr -= begin;
  999. scache_size = addr;
  1000. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  1001. c->scache.ways = 1;
  1002. c->dcache.waybit = 0; /* does not matter */
  1003. return 1;
  1004. }
  1005. #if defined(CONFIG_CPU_LOONGSON2)
  1006. static void __init loongson2_sc_init(void)
  1007. {
  1008. struct cpuinfo_mips *c = &current_cpu_data;
  1009. scache_size = 512*1024;
  1010. c->scache.linesz = 32;
  1011. c->scache.ways = 4;
  1012. c->scache.waybit = 0;
  1013. c->scache.waysize = scache_size / (c->scache.ways);
  1014. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1015. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1016. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1017. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1018. }
  1019. #endif
  1020. extern int r5k_sc_init(void);
  1021. extern int rm7k_sc_init(void);
  1022. extern int mips_sc_init(void);
  1023. static void setup_scache(void)
  1024. {
  1025. struct cpuinfo_mips *c = &current_cpu_data;
  1026. unsigned int config = read_c0_config();
  1027. int sc_present = 0;
  1028. /*
  1029. * Do the probing thing on R4000SC and R4400SC processors. Other
  1030. * processors don't have a S-cache that would be relevant to the
  1031. * Linux memory management.
  1032. */
  1033. switch (c->cputype) {
  1034. case CPU_R4000SC:
  1035. case CPU_R4000MC:
  1036. case CPU_R4400SC:
  1037. case CPU_R4400MC:
  1038. sc_present = run_uncached(probe_scache);
  1039. if (sc_present)
  1040. c->options |= MIPS_CPU_CACHE_CDEX_S;
  1041. break;
  1042. case CPU_R10000:
  1043. case CPU_R12000:
  1044. case CPU_R14000:
  1045. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  1046. c->scache.linesz = 64 << ((config >> 13) & 1);
  1047. c->scache.ways = 2;
  1048. c->scache.waybit= 0;
  1049. sc_present = 1;
  1050. break;
  1051. case CPU_R5000:
  1052. case CPU_NEVADA:
  1053. #ifdef CONFIG_R5000_CPU_SCACHE
  1054. r5k_sc_init();
  1055. #endif
  1056. return;
  1057. case CPU_RM7000:
  1058. #ifdef CONFIG_RM7000_CPU_SCACHE
  1059. rm7k_sc_init();
  1060. #endif
  1061. return;
  1062. #if defined(CONFIG_CPU_LOONGSON2)
  1063. case CPU_LOONGSON2:
  1064. loongson2_sc_init();
  1065. return;
  1066. #endif
  1067. case CPU_XLP:
  1068. /* don't need to worry about L2, fully coherent */
  1069. return;
  1070. default:
  1071. if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
  1072. MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
  1073. #ifdef CONFIG_MIPS_CPU_SCACHE
  1074. if (mips_sc_init ()) {
  1075. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  1076. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  1077. scache_size >> 10,
  1078. way_string[c->scache.ways], c->scache.linesz);
  1079. }
  1080. #else
  1081. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1082. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1083. #endif
  1084. return;
  1085. }
  1086. sc_present = 0;
  1087. }
  1088. if (!sc_present)
  1089. return;
  1090. /* compute a couple of other cache variables */
  1091. c->scache.waysize = scache_size / c->scache.ways;
  1092. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1093. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1094. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1095. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1096. }
  1097. void au1x00_fixup_config_od(void)
  1098. {
  1099. /*
  1100. * c0_config.od (bit 19) was write only (and read as 0)
  1101. * on the early revisions of Alchemy SOCs. It disables the bus
  1102. * transaction overlapping and needs to be set to fix various errata.
  1103. */
  1104. switch (read_c0_prid()) {
  1105. case 0x00030100: /* Au1000 DA */
  1106. case 0x00030201: /* Au1000 HA */
  1107. case 0x00030202: /* Au1000 HB */
  1108. case 0x01030200: /* Au1500 AB */
  1109. /*
  1110. * Au1100 errata actually keeps silence about this bit, so we set it
  1111. * just in case for those revisions that require it to be set according
  1112. * to the (now gone) cpu table.
  1113. */
  1114. case 0x02030200: /* Au1100 AB */
  1115. case 0x02030201: /* Au1100 BA */
  1116. case 0x02030202: /* Au1100 BC */
  1117. set_c0_config(1 << 19);
  1118. break;
  1119. }
  1120. }
  1121. /* CP0 hazard avoidance. */
  1122. #define NXP_BARRIER() \
  1123. __asm__ __volatile__( \
  1124. ".set noreorder\n\t" \
  1125. "nop; nop; nop; nop; nop; nop;\n\t" \
  1126. ".set reorder\n\t")
  1127. static void nxp_pr4450_fixup_config(void)
  1128. {
  1129. unsigned long config0;
  1130. config0 = read_c0_config();
  1131. /* clear all three cache coherency fields */
  1132. config0 &= ~(0x7 | (7 << 25) | (7 << 28));
  1133. config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
  1134. ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
  1135. ((_page_cachable_default >> _CACHE_SHIFT) << 28));
  1136. write_c0_config(config0);
  1137. NXP_BARRIER();
  1138. }
  1139. static int cca = -1;
  1140. static int __init cca_setup(char *str)
  1141. {
  1142. get_option(&str, &cca);
  1143. return 0;
  1144. }
  1145. early_param("cca", cca_setup);
  1146. static void coherency_setup(void)
  1147. {
  1148. if (cca < 0 || cca > 7)
  1149. cca = read_c0_config() & CONF_CM_CMASK;
  1150. _page_cachable_default = cca << _CACHE_SHIFT;
  1151. pr_debug("Using cache attribute %d\n", cca);
  1152. change_c0_config(CONF_CM_CMASK, cca);
  1153. /*
  1154. * c0_status.cu=0 specifies that updates by the sc instruction use
  1155. * the coherency mode specified by the TLB; 1 means cachable
  1156. * coherent update on write will be used. Not all processors have
  1157. * this bit and; some wire it to zero, others like Toshiba had the
  1158. * silly idea of putting something else there ...
  1159. */
  1160. switch (current_cpu_type()) {
  1161. case CPU_R4000PC:
  1162. case CPU_R4000SC:
  1163. case CPU_R4000MC:
  1164. case CPU_R4400PC:
  1165. case CPU_R4400SC:
  1166. case CPU_R4400MC:
  1167. clear_c0_config(CONF_CU);
  1168. break;
  1169. /*
  1170. * We need to catch the early Alchemy SOCs with
  1171. * the write-only co_config.od bit and set it back to one on:
  1172. * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
  1173. */
  1174. case CPU_ALCHEMY:
  1175. au1x00_fixup_config_od();
  1176. break;
  1177. case PRID_IMP_PR4450:
  1178. nxp_pr4450_fixup_config();
  1179. break;
  1180. }
  1181. }
  1182. static void r4k_cache_error_setup(void)
  1183. {
  1184. extern char __weak except_vec2_generic;
  1185. extern char __weak except_vec2_sb1;
  1186. struct cpuinfo_mips *c = &current_cpu_data;
  1187. switch (c->cputype) {
  1188. case CPU_SB1:
  1189. case CPU_SB1A:
  1190. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1191. break;
  1192. default:
  1193. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1194. break;
  1195. }
  1196. }
  1197. void r4k_cache_init(void)
  1198. {
  1199. extern void build_clear_page(void);
  1200. extern void build_copy_page(void);
  1201. struct cpuinfo_mips *c = &current_cpu_data;
  1202. probe_pcache();
  1203. setup_scache();
  1204. r4k_blast_dcache_page_setup();
  1205. r4k_blast_dcache_page_indexed_setup();
  1206. r4k_blast_dcache_setup();
  1207. r4k_blast_icache_page_setup();
  1208. r4k_blast_icache_page_indexed_setup();
  1209. r4k_blast_icache_setup();
  1210. r4k_blast_scache_page_setup();
  1211. r4k_blast_scache_page_indexed_setup();
  1212. r4k_blast_scache_setup();
  1213. /*
  1214. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1215. * This code supports virtually indexed processors and will be
  1216. * unnecessarily inefficient on physically indexed processors.
  1217. */
  1218. if (c->dcache.linesz)
  1219. shm_align_mask = max_t( unsigned long,
  1220. c->dcache.sets * c->dcache.linesz - 1,
  1221. PAGE_SIZE - 1);
  1222. else
  1223. shm_align_mask = PAGE_SIZE-1;
  1224. __flush_cache_vmap = r4k__flush_cache_vmap;
  1225. __flush_cache_vunmap = r4k__flush_cache_vunmap;
  1226. flush_cache_all = cache_noop;
  1227. __flush_cache_all = r4k___flush_cache_all;
  1228. flush_cache_mm = r4k_flush_cache_mm;
  1229. flush_cache_page = r4k_flush_cache_page;
  1230. flush_cache_range = r4k_flush_cache_range;
  1231. __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
  1232. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1233. flush_icache_all = r4k_flush_icache_all;
  1234. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1235. flush_data_cache_page = r4k_flush_data_cache_page;
  1236. flush_icache_range = r4k_flush_icache_range;
  1237. local_flush_icache_range = local_r4k_flush_icache_range;
  1238. #if defined(CONFIG_DMA_NONCOHERENT)
  1239. if (coherentio) {
  1240. _dma_cache_wback_inv = (void *)cache_noop;
  1241. _dma_cache_wback = (void *)cache_noop;
  1242. _dma_cache_inv = (void *)cache_noop;
  1243. } else {
  1244. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1245. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1246. _dma_cache_inv = r4k_dma_cache_inv;
  1247. }
  1248. #endif
  1249. build_clear_page();
  1250. build_copy_page();
  1251. /*
  1252. * We want to run CMP kernels on core with and without coherent
  1253. * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
  1254. * or not to flush caches.
  1255. */
  1256. local_r4k___flush_cache_all(NULL);
  1257. coherency_setup();
  1258. board_cache_error_setup = r4k_cache_error_setup;
  1259. }