c-r4k.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/hardirq.h>
  11. #include <linux/init.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/mm.h>
  18. #include <linux/module.h>
  19. #include <linux/bitops.h>
  20. #include <asm/bcache.h>
  21. #include <asm/bootinfo.h>
  22. #include <asm/cache.h>
  23. #include <asm/cacheops.h>
  24. #include <asm/cpu.h>
  25. #include <asm/cpu-features.h>
  26. #include <asm/io.h>
  27. #include <asm/page.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/r4kcache.h>
  30. #include <asm/sections.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/war.h>
  33. #include <asm/cacheflush.h> /* for run_uncached() */
  34. #include <asm/traps.h>
  35. #include <asm/dma-coherence.h>
  36. /*
  37. * Special Variant of smp_call_function for use by cache functions:
  38. *
  39. * o No return value
  40. * o collapses to normal function call on UP kernels
  41. * o collapses to normal function call on systems with a single shared
  42. * primary cache.
  43. * o doesn't disable interrupts on the local CPU
  44. */
  45. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
  46. {
  47. preempt_disable();
  48. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  49. smp_call_function(func, info, 1);
  50. #endif
  51. func(info);
  52. preempt_enable();
  53. }
  54. #if defined(CONFIG_MIPS_CMP)
  55. #define cpu_has_safe_index_cacheops 0
  56. #else
  57. #define cpu_has_safe_index_cacheops 1
  58. #endif
  59. /*
  60. * Must die.
  61. */
  62. static unsigned long icache_size __read_mostly;
  63. static unsigned long dcache_size __read_mostly;
  64. static unsigned long scache_size __read_mostly;
  65. /*
  66. * Dummy cache handling routines for machines without boardcaches
  67. */
  68. static void cache_noop(void) {}
  69. static struct bcache_ops no_sc_ops = {
  70. .bc_enable = (void *)cache_noop,
  71. .bc_disable = (void *)cache_noop,
  72. .bc_wback_inv = (void *)cache_noop,
  73. .bc_inv = (void *)cache_noop
  74. };
  75. struct bcache_ops *bcops = &no_sc_ops;
  76. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  77. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  78. #define R4600_HIT_CACHEOP_WAR_IMPL \
  79. do { \
  80. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  81. *(volatile unsigned long *)CKSEG1; \
  82. if (R4600_V1_HIT_CACHEOP_WAR) \
  83. __asm__ __volatile__("nop;nop;nop;nop"); \
  84. } while (0)
  85. static void (*r4k_blast_dcache_page)(unsigned long addr);
  86. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  87. {
  88. R4600_HIT_CACHEOP_WAR_IMPL;
  89. blast_dcache32_page(addr);
  90. }
  91. static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
  92. {
  93. R4600_HIT_CACHEOP_WAR_IMPL;
  94. blast_dcache64_page(addr);
  95. }
  96. static void __cpuinit r4k_blast_dcache_page_setup(void)
  97. {
  98. unsigned long dc_lsize = cpu_dcache_line_size();
  99. if (dc_lsize == 0)
  100. r4k_blast_dcache_page = (void *)cache_noop;
  101. else if (dc_lsize == 16)
  102. r4k_blast_dcache_page = blast_dcache16_page;
  103. else if (dc_lsize == 32)
  104. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  105. else if (dc_lsize == 64)
  106. r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
  107. }
  108. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  109. static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
  110. {
  111. unsigned long dc_lsize = cpu_dcache_line_size();
  112. if (dc_lsize == 0)
  113. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  114. else if (dc_lsize == 16)
  115. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  116. else if (dc_lsize == 32)
  117. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  118. else if (dc_lsize == 64)
  119. r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
  120. }
  121. static void (* r4k_blast_dcache)(void);
  122. static void __cpuinit r4k_blast_dcache_setup(void)
  123. {
  124. unsigned long dc_lsize = cpu_dcache_line_size();
  125. if (dc_lsize == 0)
  126. r4k_blast_dcache = (void *)cache_noop;
  127. else if (dc_lsize == 16)
  128. r4k_blast_dcache = blast_dcache16;
  129. else if (dc_lsize == 32)
  130. r4k_blast_dcache = blast_dcache32;
  131. else if (dc_lsize == 64)
  132. r4k_blast_dcache = blast_dcache64;
  133. }
  134. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  135. #define JUMP_TO_ALIGN(order) \
  136. __asm__ __volatile__( \
  137. "b\t1f\n\t" \
  138. ".align\t" #order "\n\t" \
  139. "1:\n\t" \
  140. )
  141. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  142. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  143. static inline void blast_r4600_v1_icache32(void)
  144. {
  145. unsigned long flags;
  146. local_irq_save(flags);
  147. blast_icache32();
  148. local_irq_restore(flags);
  149. }
  150. static inline void tx49_blast_icache32(void)
  151. {
  152. unsigned long start = INDEX_BASE;
  153. unsigned long end = start + current_cpu_data.icache.waysize;
  154. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  155. unsigned long ws_end = current_cpu_data.icache.ways <<
  156. current_cpu_data.icache.waybit;
  157. unsigned long ws, addr;
  158. CACHE32_UNROLL32_ALIGN2;
  159. /* I'm in even chunk. blast odd chunks */
  160. for (ws = 0; ws < ws_end; ws += ws_inc)
  161. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  162. cache32_unroll32(addr|ws, Index_Invalidate_I);
  163. CACHE32_UNROLL32_ALIGN;
  164. /* I'm in odd chunk. blast even chunks */
  165. for (ws = 0; ws < ws_end; ws += ws_inc)
  166. for (addr = start; addr < end; addr += 0x400 * 2)
  167. cache32_unroll32(addr|ws, Index_Invalidate_I);
  168. }
  169. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  170. {
  171. unsigned long flags;
  172. local_irq_save(flags);
  173. blast_icache32_page_indexed(page);
  174. local_irq_restore(flags);
  175. }
  176. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  177. {
  178. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  179. unsigned long start = INDEX_BASE + (page & indexmask);
  180. unsigned long end = start + PAGE_SIZE;
  181. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  182. unsigned long ws_end = current_cpu_data.icache.ways <<
  183. current_cpu_data.icache.waybit;
  184. unsigned long ws, addr;
  185. CACHE32_UNROLL32_ALIGN2;
  186. /* I'm in even chunk. blast odd chunks */
  187. for (ws = 0; ws < ws_end; ws += ws_inc)
  188. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  189. cache32_unroll32(addr|ws, Index_Invalidate_I);
  190. CACHE32_UNROLL32_ALIGN;
  191. /* I'm in odd chunk. blast even chunks */
  192. for (ws = 0; ws < ws_end; ws += ws_inc)
  193. for (addr = start; addr < end; addr += 0x400 * 2)
  194. cache32_unroll32(addr|ws, Index_Invalidate_I);
  195. }
  196. static void (* r4k_blast_icache_page)(unsigned long addr);
  197. static void __cpuinit r4k_blast_icache_page_setup(void)
  198. {
  199. unsigned long ic_lsize = cpu_icache_line_size();
  200. if (ic_lsize == 0)
  201. r4k_blast_icache_page = (void *)cache_noop;
  202. else if (ic_lsize == 16)
  203. r4k_blast_icache_page = blast_icache16_page;
  204. else if (ic_lsize == 32)
  205. r4k_blast_icache_page = blast_icache32_page;
  206. else if (ic_lsize == 64)
  207. r4k_blast_icache_page = blast_icache64_page;
  208. }
  209. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  210. static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
  211. {
  212. unsigned long ic_lsize = cpu_icache_line_size();
  213. if (ic_lsize == 0)
  214. r4k_blast_icache_page_indexed = (void *)cache_noop;
  215. else if (ic_lsize == 16)
  216. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  217. else if (ic_lsize == 32) {
  218. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  219. r4k_blast_icache_page_indexed =
  220. blast_icache32_r4600_v1_page_indexed;
  221. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  222. r4k_blast_icache_page_indexed =
  223. tx49_blast_icache32_page_indexed;
  224. else
  225. r4k_blast_icache_page_indexed =
  226. blast_icache32_page_indexed;
  227. } else if (ic_lsize == 64)
  228. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  229. }
  230. static void (* r4k_blast_icache)(void);
  231. static void __cpuinit r4k_blast_icache_setup(void)
  232. {
  233. unsigned long ic_lsize = cpu_icache_line_size();
  234. if (ic_lsize == 0)
  235. r4k_blast_icache = (void *)cache_noop;
  236. else if (ic_lsize == 16)
  237. r4k_blast_icache = blast_icache16;
  238. else if (ic_lsize == 32) {
  239. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  240. r4k_blast_icache = blast_r4600_v1_icache32;
  241. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  242. r4k_blast_icache = tx49_blast_icache32;
  243. else
  244. r4k_blast_icache = blast_icache32;
  245. } else if (ic_lsize == 64)
  246. r4k_blast_icache = blast_icache64;
  247. }
  248. static void (* r4k_blast_scache_page)(unsigned long addr);
  249. static void __cpuinit r4k_blast_scache_page_setup(void)
  250. {
  251. unsigned long sc_lsize = cpu_scache_line_size();
  252. if (scache_size == 0)
  253. r4k_blast_scache_page = (void *)cache_noop;
  254. else if (sc_lsize == 16)
  255. r4k_blast_scache_page = blast_scache16_page;
  256. else if (sc_lsize == 32)
  257. r4k_blast_scache_page = blast_scache32_page;
  258. else if (sc_lsize == 64)
  259. r4k_blast_scache_page = blast_scache64_page;
  260. else if (sc_lsize == 128)
  261. r4k_blast_scache_page = blast_scache128_page;
  262. }
  263. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  264. static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
  265. {
  266. unsigned long sc_lsize = cpu_scache_line_size();
  267. if (scache_size == 0)
  268. r4k_blast_scache_page_indexed = (void *)cache_noop;
  269. else if (sc_lsize == 16)
  270. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  271. else if (sc_lsize == 32)
  272. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  273. else if (sc_lsize == 64)
  274. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  275. else if (sc_lsize == 128)
  276. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  277. }
  278. static void (* r4k_blast_scache)(void);
  279. static void __cpuinit r4k_blast_scache_setup(void)
  280. {
  281. unsigned long sc_lsize = cpu_scache_line_size();
  282. if (scache_size == 0)
  283. r4k_blast_scache = (void *)cache_noop;
  284. else if (sc_lsize == 16)
  285. r4k_blast_scache = blast_scache16;
  286. else if (sc_lsize == 32)
  287. r4k_blast_scache = blast_scache32;
  288. else if (sc_lsize == 64)
  289. r4k_blast_scache = blast_scache64;
  290. else if (sc_lsize == 128)
  291. r4k_blast_scache = blast_scache128;
  292. }
  293. static inline void local_r4k___flush_cache_all(void * args)
  294. {
  295. #if defined(CONFIG_CPU_LOONGSON2)
  296. r4k_blast_scache();
  297. return;
  298. #endif
  299. r4k_blast_dcache();
  300. r4k_blast_icache();
  301. switch (current_cpu_type()) {
  302. case CPU_R4000SC:
  303. case CPU_R4000MC:
  304. case CPU_R4400SC:
  305. case CPU_R4400MC:
  306. case CPU_R10000:
  307. case CPU_R12000:
  308. case CPU_R14000:
  309. r4k_blast_scache();
  310. }
  311. }
  312. static void r4k___flush_cache_all(void)
  313. {
  314. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
  315. }
  316. static inline int has_valid_asid(const struct mm_struct *mm)
  317. {
  318. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  319. int i;
  320. for_each_online_cpu(i)
  321. if (cpu_context(i, mm))
  322. return 1;
  323. return 0;
  324. #else
  325. return cpu_context(smp_processor_id(), mm);
  326. #endif
  327. }
  328. static void r4k__flush_cache_vmap(void)
  329. {
  330. r4k_blast_dcache();
  331. }
  332. static void r4k__flush_cache_vunmap(void)
  333. {
  334. r4k_blast_dcache();
  335. }
  336. static inline void local_r4k_flush_cache_range(void * args)
  337. {
  338. struct vm_area_struct *vma = args;
  339. int exec = vma->vm_flags & VM_EXEC;
  340. if (!(has_valid_asid(vma->vm_mm)))
  341. return;
  342. r4k_blast_dcache();
  343. if (exec)
  344. r4k_blast_icache();
  345. }
  346. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  347. unsigned long start, unsigned long end)
  348. {
  349. int exec = vma->vm_flags & VM_EXEC;
  350. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  351. r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
  352. }
  353. static inline void local_r4k_flush_cache_mm(void * args)
  354. {
  355. struct mm_struct *mm = args;
  356. if (!has_valid_asid(mm))
  357. return;
  358. /*
  359. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  360. * only flush the primary caches but R10000 and R12000 behave sane ...
  361. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  362. * caches, so we can bail out early.
  363. */
  364. if (current_cpu_type() == CPU_R4000SC ||
  365. current_cpu_type() == CPU_R4000MC ||
  366. current_cpu_type() == CPU_R4400SC ||
  367. current_cpu_type() == CPU_R4400MC) {
  368. r4k_blast_scache();
  369. return;
  370. }
  371. r4k_blast_dcache();
  372. }
  373. static void r4k_flush_cache_mm(struct mm_struct *mm)
  374. {
  375. if (!cpu_has_dc_aliases)
  376. return;
  377. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
  378. }
  379. struct flush_cache_page_args {
  380. struct vm_area_struct *vma;
  381. unsigned long addr;
  382. unsigned long pfn;
  383. };
  384. static inline void local_r4k_flush_cache_page(void *args)
  385. {
  386. struct flush_cache_page_args *fcp_args = args;
  387. struct vm_area_struct *vma = fcp_args->vma;
  388. unsigned long addr = fcp_args->addr;
  389. struct page *page = pfn_to_page(fcp_args->pfn);
  390. int exec = vma->vm_flags & VM_EXEC;
  391. struct mm_struct *mm = vma->vm_mm;
  392. int map_coherent = 0;
  393. pgd_t *pgdp;
  394. pud_t *pudp;
  395. pmd_t *pmdp;
  396. pte_t *ptep;
  397. void *vaddr;
  398. /*
  399. * If ownes no valid ASID yet, cannot possibly have gotten
  400. * this page into the cache.
  401. */
  402. if (!has_valid_asid(mm))
  403. return;
  404. addr &= PAGE_MASK;
  405. pgdp = pgd_offset(mm, addr);
  406. pudp = pud_offset(pgdp, addr);
  407. pmdp = pmd_offset(pudp, addr);
  408. ptep = pte_offset(pmdp, addr);
  409. /*
  410. * If the page isn't marked valid, the page cannot possibly be
  411. * in the cache.
  412. */
  413. if (!(pte_present(*ptep)))
  414. return;
  415. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  416. vaddr = NULL;
  417. else {
  418. /*
  419. * Use kmap_coherent or kmap_atomic to do flushes for
  420. * another ASID than the current one.
  421. */
  422. map_coherent = (cpu_has_dc_aliases &&
  423. page_mapped(page) && !Page_dcache_dirty(page));
  424. if (map_coherent)
  425. vaddr = kmap_coherent(page, addr);
  426. else
  427. vaddr = kmap_atomic(page);
  428. addr = (unsigned long)vaddr;
  429. }
  430. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  431. r4k_blast_dcache_page(addr);
  432. if (exec && !cpu_icache_snoops_remote_store)
  433. r4k_blast_scache_page(addr);
  434. }
  435. if (exec) {
  436. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  437. int cpu = smp_processor_id();
  438. if (cpu_context(cpu, mm) != 0)
  439. drop_mmu_context(mm, cpu);
  440. } else
  441. r4k_blast_icache_page(addr);
  442. }
  443. if (vaddr) {
  444. if (map_coherent)
  445. kunmap_coherent();
  446. else
  447. kunmap_atomic(vaddr);
  448. }
  449. }
  450. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  451. unsigned long addr, unsigned long pfn)
  452. {
  453. struct flush_cache_page_args args;
  454. args.vma = vma;
  455. args.addr = addr;
  456. args.pfn = pfn;
  457. r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
  458. }
  459. static inline void local_r4k_flush_data_cache_page(void * addr)
  460. {
  461. r4k_blast_dcache_page((unsigned long) addr);
  462. }
  463. static void r4k_flush_data_cache_page(unsigned long addr)
  464. {
  465. if (in_atomic())
  466. local_r4k_flush_data_cache_page((void *)addr);
  467. else
  468. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
  469. }
  470. struct flush_icache_range_args {
  471. unsigned long start;
  472. unsigned long end;
  473. };
  474. static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  475. {
  476. if (!cpu_has_ic_fills_f_dc) {
  477. if (end - start >= dcache_size) {
  478. r4k_blast_dcache();
  479. } else {
  480. R4600_HIT_CACHEOP_WAR_IMPL;
  481. protected_blast_dcache_range(start, end);
  482. }
  483. }
  484. if (end - start > icache_size)
  485. r4k_blast_icache();
  486. else
  487. protected_blast_icache_range(start, end);
  488. }
  489. static inline void local_r4k_flush_icache_range_ipi(void *args)
  490. {
  491. struct flush_icache_range_args *fir_args = args;
  492. unsigned long start = fir_args->start;
  493. unsigned long end = fir_args->end;
  494. local_r4k_flush_icache_range(start, end);
  495. }
  496. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  497. {
  498. struct flush_icache_range_args args;
  499. args.start = start;
  500. args.end = end;
  501. r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
  502. instruction_hazard();
  503. }
  504. #ifdef CONFIG_DMA_NONCOHERENT
  505. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  506. {
  507. /* Catch bad driver code */
  508. BUG_ON(size == 0);
  509. if (cpu_has_inclusive_pcaches) {
  510. if (size >= scache_size)
  511. r4k_blast_scache();
  512. else
  513. blast_scache_range(addr, addr + size);
  514. __sync();
  515. return;
  516. }
  517. /*
  518. * Either no secondary cache or the available caches don't have the
  519. * subset property so we have to flush the primary caches
  520. * explicitly
  521. */
  522. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  523. r4k_blast_dcache();
  524. } else {
  525. R4600_HIT_CACHEOP_WAR_IMPL;
  526. blast_dcache_range(addr, addr + size);
  527. }
  528. bc_wback_inv(addr, size);
  529. __sync();
  530. }
  531. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  532. {
  533. /* Catch bad driver code */
  534. BUG_ON(size == 0);
  535. if (cpu_has_inclusive_pcaches) {
  536. if (size >= scache_size)
  537. r4k_blast_scache();
  538. else {
  539. /*
  540. * There is no clearly documented alignment requirement
  541. * for the cache instruction on MIPS processors and
  542. * some processors, among them the RM5200 and RM7000
  543. * QED processors will throw an address error for cache
  544. * hit ops with insufficient alignment. Solved by
  545. * aligning the address to cache line size.
  546. */
  547. blast_inv_scache_range(addr, addr + size);
  548. }
  549. __sync();
  550. return;
  551. }
  552. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  553. r4k_blast_dcache();
  554. } else {
  555. R4600_HIT_CACHEOP_WAR_IMPL;
  556. blast_inv_dcache_range(addr, addr + size);
  557. }
  558. bc_inv(addr, size);
  559. __sync();
  560. }
  561. #endif /* CONFIG_DMA_NONCOHERENT */
  562. /*
  563. * While we're protected against bad userland addresses we don't care
  564. * very much about what happens in that case. Usually a segmentation
  565. * fault will dump the process later on anyway ...
  566. */
  567. static void local_r4k_flush_cache_sigtramp(void * arg)
  568. {
  569. unsigned long ic_lsize = cpu_icache_line_size();
  570. unsigned long dc_lsize = cpu_dcache_line_size();
  571. unsigned long sc_lsize = cpu_scache_line_size();
  572. unsigned long addr = (unsigned long) arg;
  573. R4600_HIT_CACHEOP_WAR_IMPL;
  574. if (dc_lsize)
  575. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  576. if (!cpu_icache_snoops_remote_store && scache_size)
  577. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  578. if (ic_lsize)
  579. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  580. if (MIPS4K_ICACHE_REFILL_WAR) {
  581. __asm__ __volatile__ (
  582. ".set push\n\t"
  583. ".set noat\n\t"
  584. ".set mips3\n\t"
  585. #ifdef CONFIG_32BIT
  586. "la $at,1f\n\t"
  587. #endif
  588. #ifdef CONFIG_64BIT
  589. "dla $at,1f\n\t"
  590. #endif
  591. "cache %0,($at)\n\t"
  592. "nop; nop; nop\n"
  593. "1:\n\t"
  594. ".set pop"
  595. :
  596. : "i" (Hit_Invalidate_I));
  597. }
  598. if (MIPS_CACHE_SYNC_WAR)
  599. __asm__ __volatile__ ("sync");
  600. }
  601. static void r4k_flush_cache_sigtramp(unsigned long addr)
  602. {
  603. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
  604. }
  605. static void r4k_flush_icache_all(void)
  606. {
  607. if (cpu_has_vtag_icache)
  608. r4k_blast_icache();
  609. }
  610. struct flush_kernel_vmap_range_args {
  611. unsigned long vaddr;
  612. int size;
  613. };
  614. static inline void local_r4k_flush_kernel_vmap_range(void *args)
  615. {
  616. struct flush_kernel_vmap_range_args *vmra = args;
  617. unsigned long vaddr = vmra->vaddr;
  618. int size = vmra->size;
  619. /*
  620. * Aliases only affect the primary caches so don't bother with
  621. * S-caches or T-caches.
  622. */
  623. if (cpu_has_safe_index_cacheops && size >= dcache_size)
  624. r4k_blast_dcache();
  625. else {
  626. R4600_HIT_CACHEOP_WAR_IMPL;
  627. blast_dcache_range(vaddr, vaddr + size);
  628. }
  629. }
  630. static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
  631. {
  632. struct flush_kernel_vmap_range_args args;
  633. args.vaddr = (unsigned long) vaddr;
  634. args.size = size;
  635. r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
  636. }
  637. static inline void rm7k_erratum31(void)
  638. {
  639. const unsigned long ic_lsize = 32;
  640. unsigned long addr;
  641. /* RM7000 erratum #31. The icache is screwed at startup. */
  642. write_c0_taglo(0);
  643. write_c0_taghi(0);
  644. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  645. __asm__ __volatile__ (
  646. ".set push\n\t"
  647. ".set noreorder\n\t"
  648. ".set mips3\n\t"
  649. "cache\t%1, 0(%0)\n\t"
  650. "cache\t%1, 0x1000(%0)\n\t"
  651. "cache\t%1, 0x2000(%0)\n\t"
  652. "cache\t%1, 0x3000(%0)\n\t"
  653. "cache\t%2, 0(%0)\n\t"
  654. "cache\t%2, 0x1000(%0)\n\t"
  655. "cache\t%2, 0x2000(%0)\n\t"
  656. "cache\t%2, 0x3000(%0)\n\t"
  657. "cache\t%1, 0(%0)\n\t"
  658. "cache\t%1, 0x1000(%0)\n\t"
  659. "cache\t%1, 0x2000(%0)\n\t"
  660. "cache\t%1, 0x3000(%0)\n\t"
  661. ".set pop\n"
  662. :
  663. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  664. }
  665. }
  666. static inline void alias_74k_erratum(struct cpuinfo_mips *c)
  667. {
  668. /*
  669. * Early versions of the 74K do not update the cache tags on a
  670. * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
  671. * aliases. In this case it is better to treat the cache as always
  672. * having aliases.
  673. */
  674. if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0))
  675. c->dcache.flags |= MIPS_CACHE_VTAG;
  676. if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0))
  677. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  678. if (((c->processor_id & 0xff00) == PRID_IMP_1074K) &&
  679. ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) {
  680. c->dcache.flags |= MIPS_CACHE_VTAG;
  681. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  682. }
  683. }
  684. static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
  685. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  686. };
  687. static void __cpuinit probe_pcache(void)
  688. {
  689. struct cpuinfo_mips *c = &current_cpu_data;
  690. unsigned int config = read_c0_config();
  691. unsigned int prid = read_c0_prid();
  692. unsigned long config1;
  693. unsigned int lsize;
  694. switch (c->cputype) {
  695. case CPU_R4600: /* QED style two way caches? */
  696. case CPU_R4700:
  697. case CPU_R5000:
  698. case CPU_NEVADA:
  699. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  700. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  701. c->icache.ways = 2;
  702. c->icache.waybit = __ffs(icache_size/2);
  703. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  704. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  705. c->dcache.ways = 2;
  706. c->dcache.waybit= __ffs(dcache_size/2);
  707. c->options |= MIPS_CPU_CACHE_CDEX_P;
  708. break;
  709. case CPU_R5432:
  710. case CPU_R5500:
  711. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  712. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  713. c->icache.ways = 2;
  714. c->icache.waybit= 0;
  715. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  716. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  717. c->dcache.ways = 2;
  718. c->dcache.waybit = 0;
  719. c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
  720. break;
  721. case CPU_TX49XX:
  722. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  723. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  724. c->icache.ways = 4;
  725. c->icache.waybit= 0;
  726. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  727. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  728. c->dcache.ways = 4;
  729. c->dcache.waybit = 0;
  730. c->options |= MIPS_CPU_CACHE_CDEX_P;
  731. c->options |= MIPS_CPU_PREFETCH;
  732. break;
  733. case CPU_R4000PC:
  734. case CPU_R4000SC:
  735. case CPU_R4000MC:
  736. case CPU_R4400PC:
  737. case CPU_R4400SC:
  738. case CPU_R4400MC:
  739. case CPU_R4300:
  740. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  741. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  742. c->icache.ways = 1;
  743. c->icache.waybit = 0; /* doesn't matter */
  744. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  745. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  746. c->dcache.ways = 1;
  747. c->dcache.waybit = 0; /* does not matter */
  748. c->options |= MIPS_CPU_CACHE_CDEX_P;
  749. break;
  750. case CPU_R10000:
  751. case CPU_R12000:
  752. case CPU_R14000:
  753. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  754. c->icache.linesz = 64;
  755. c->icache.ways = 2;
  756. c->icache.waybit = 0;
  757. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  758. c->dcache.linesz = 32;
  759. c->dcache.ways = 2;
  760. c->dcache.waybit = 0;
  761. c->options |= MIPS_CPU_PREFETCH;
  762. break;
  763. case CPU_VR4133:
  764. write_c0_config(config & ~VR41_CONF_P4K);
  765. case CPU_VR4131:
  766. /* Workaround for cache instruction bug of VR4131 */
  767. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  768. c->processor_id == 0x0c82U) {
  769. config |= 0x00400000U;
  770. if (c->processor_id == 0x0c80U)
  771. config |= VR41_CONF_BP;
  772. write_c0_config(config);
  773. } else
  774. c->options |= MIPS_CPU_CACHE_CDEX_P;
  775. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  776. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  777. c->icache.ways = 2;
  778. c->icache.waybit = __ffs(icache_size/2);
  779. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  780. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  781. c->dcache.ways = 2;
  782. c->dcache.waybit = __ffs(dcache_size/2);
  783. break;
  784. case CPU_VR41XX:
  785. case CPU_VR4111:
  786. case CPU_VR4121:
  787. case CPU_VR4122:
  788. case CPU_VR4181:
  789. case CPU_VR4181A:
  790. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  791. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  792. c->icache.ways = 1;
  793. c->icache.waybit = 0; /* doesn't matter */
  794. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  795. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  796. c->dcache.ways = 1;
  797. c->dcache.waybit = 0; /* does not matter */
  798. c->options |= MIPS_CPU_CACHE_CDEX_P;
  799. break;
  800. case CPU_RM7000:
  801. rm7k_erratum31();
  802. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  803. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  804. c->icache.ways = 4;
  805. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  806. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  807. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  808. c->dcache.ways = 4;
  809. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  810. c->options |= MIPS_CPU_CACHE_CDEX_P;
  811. c->options |= MIPS_CPU_PREFETCH;
  812. break;
  813. case CPU_LOONGSON2:
  814. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  815. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  816. if (prid & 0x3)
  817. c->icache.ways = 4;
  818. else
  819. c->icache.ways = 2;
  820. c->icache.waybit = 0;
  821. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  822. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  823. if (prid & 0x3)
  824. c->dcache.ways = 4;
  825. else
  826. c->dcache.ways = 2;
  827. c->dcache.waybit = 0;
  828. break;
  829. default:
  830. if (!(config & MIPS_CONF_M))
  831. panic("Don't know how to probe P-caches on this cpu.");
  832. /*
  833. * So we seem to be a MIPS32 or MIPS64 CPU
  834. * So let's probe the I-cache ...
  835. */
  836. config1 = read_c0_config1();
  837. if ((lsize = ((config1 >> 19) & 7)))
  838. c->icache.linesz = 2 << lsize;
  839. else
  840. c->icache.linesz = lsize;
  841. c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
  842. c->icache.ways = 1 + ((config1 >> 16) & 7);
  843. icache_size = c->icache.sets *
  844. c->icache.ways *
  845. c->icache.linesz;
  846. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  847. if (config & 0x8) /* VI bit */
  848. c->icache.flags |= MIPS_CACHE_VTAG;
  849. /*
  850. * Now probe the MIPS32 / MIPS64 data cache.
  851. */
  852. c->dcache.flags = 0;
  853. if ((lsize = ((config1 >> 10) & 7)))
  854. c->dcache.linesz = 2 << lsize;
  855. else
  856. c->dcache.linesz= lsize;
  857. c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
  858. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  859. dcache_size = c->dcache.sets *
  860. c->dcache.ways *
  861. c->dcache.linesz;
  862. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  863. c->options |= MIPS_CPU_PREFETCH;
  864. break;
  865. }
  866. /*
  867. * Processor configuration sanity check for the R4000SC erratum
  868. * #5. With page sizes larger than 32kB there is no possibility
  869. * to get a VCE exception anymore so we don't care about this
  870. * misconfiguration. The case is rather theoretical anyway;
  871. * presumably no vendor is shipping his hardware in the "bad"
  872. * configuration.
  873. */
  874. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  875. !(config & CONF_SC) && c->icache.linesz != 16 &&
  876. PAGE_SIZE <= 0x8000)
  877. panic("Improper R4000SC processor configuration detected");
  878. /* compute a couple of other cache variables */
  879. c->icache.waysize = icache_size / c->icache.ways;
  880. c->dcache.waysize = dcache_size / c->dcache.ways;
  881. c->icache.sets = c->icache.linesz ?
  882. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  883. c->dcache.sets = c->dcache.linesz ?
  884. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  885. /*
  886. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  887. * 2-way virtually indexed so normally would suffer from aliases. So
  888. * normally they'd suffer from aliases but magic in the hardware deals
  889. * with that for us so we don't need to take care ourselves.
  890. */
  891. switch (c->cputype) {
  892. case CPU_20KC:
  893. case CPU_25KF:
  894. case CPU_SB1:
  895. case CPU_SB1A:
  896. case CPU_XLR:
  897. c->dcache.flags |= MIPS_CACHE_PINDEX;
  898. break;
  899. case CPU_R10000:
  900. case CPU_R12000:
  901. case CPU_R14000:
  902. break;
  903. case CPU_M14KC:
  904. case CPU_M14KEC:
  905. case CPU_24K:
  906. case CPU_34K:
  907. case CPU_74K:
  908. case CPU_1004K:
  909. if (c->cputype == CPU_74K)
  910. alias_74k_erratum(c);
  911. if ((read_c0_config7() & (1 << 16))) {
  912. /* effectively physically indexed dcache,
  913. thus no virtual aliases. */
  914. c->dcache.flags |= MIPS_CACHE_PINDEX;
  915. break;
  916. }
  917. default:
  918. if (c->dcache.waysize > PAGE_SIZE)
  919. c->dcache.flags |= MIPS_CACHE_ALIASES;
  920. }
  921. switch (c->cputype) {
  922. case CPU_20KC:
  923. /*
  924. * Some older 20Kc chips doesn't have the 'VI' bit in
  925. * the config register.
  926. */
  927. c->icache.flags |= MIPS_CACHE_VTAG;
  928. break;
  929. case CPU_ALCHEMY:
  930. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  931. break;
  932. }
  933. #ifdef CONFIG_CPU_LOONGSON2
  934. /*
  935. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  936. * one op will act on all 4 ways
  937. */
  938. c->icache.ways = 1;
  939. #endif
  940. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  941. icache_size >> 10,
  942. c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
  943. way_string[c->icache.ways], c->icache.linesz);
  944. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  945. dcache_size >> 10, way_string[c->dcache.ways],
  946. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  947. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  948. "cache aliases" : "no aliases",
  949. c->dcache.linesz);
  950. }
  951. /*
  952. * If you even _breathe_ on this function, look at the gcc output and make sure
  953. * it does not pop things on and off the stack for the cache sizing loop that
  954. * executes in KSEG1 space or else you will crash and burn badly. You have
  955. * been warned.
  956. */
  957. static int __cpuinit probe_scache(void)
  958. {
  959. unsigned long flags, addr, begin, end, pow2;
  960. unsigned int config = read_c0_config();
  961. struct cpuinfo_mips *c = &current_cpu_data;
  962. if (config & CONF_SC)
  963. return 0;
  964. begin = (unsigned long) &_stext;
  965. begin &= ~((4 * 1024 * 1024) - 1);
  966. end = begin + (4 * 1024 * 1024);
  967. /*
  968. * This is such a bitch, you'd think they would make it easy to do
  969. * this. Away you daemons of stupidity!
  970. */
  971. local_irq_save(flags);
  972. /* Fill each size-multiple cache line with a valid tag. */
  973. pow2 = (64 * 1024);
  974. for (addr = begin; addr < end; addr = (begin + pow2)) {
  975. unsigned long *p = (unsigned long *) addr;
  976. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  977. pow2 <<= 1;
  978. }
  979. /* Load first line with zero (therefore invalid) tag. */
  980. write_c0_taglo(0);
  981. write_c0_taghi(0);
  982. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  983. cache_op(Index_Store_Tag_I, begin);
  984. cache_op(Index_Store_Tag_D, begin);
  985. cache_op(Index_Store_Tag_SD, begin);
  986. /* Now search for the wrap around point. */
  987. pow2 = (128 * 1024);
  988. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  989. cache_op(Index_Load_Tag_SD, addr);
  990. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  991. if (!read_c0_taglo())
  992. break;
  993. pow2 <<= 1;
  994. }
  995. local_irq_restore(flags);
  996. addr -= begin;
  997. scache_size = addr;
  998. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  999. c->scache.ways = 1;
  1000. c->dcache.waybit = 0; /* does not matter */
  1001. return 1;
  1002. }
  1003. #if defined(CONFIG_CPU_LOONGSON2)
  1004. static void __init loongson2_sc_init(void)
  1005. {
  1006. struct cpuinfo_mips *c = &current_cpu_data;
  1007. scache_size = 512*1024;
  1008. c->scache.linesz = 32;
  1009. c->scache.ways = 4;
  1010. c->scache.waybit = 0;
  1011. c->scache.waysize = scache_size / (c->scache.ways);
  1012. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1013. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1014. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1015. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1016. }
  1017. #endif
  1018. extern int r5k_sc_init(void);
  1019. extern int rm7k_sc_init(void);
  1020. extern int mips_sc_init(void);
  1021. static void __cpuinit setup_scache(void)
  1022. {
  1023. struct cpuinfo_mips *c = &current_cpu_data;
  1024. unsigned int config = read_c0_config();
  1025. int sc_present = 0;
  1026. /*
  1027. * Do the probing thing on R4000SC and R4400SC processors. Other
  1028. * processors don't have a S-cache that would be relevant to the
  1029. * Linux memory management.
  1030. */
  1031. switch (c->cputype) {
  1032. case CPU_R4000SC:
  1033. case CPU_R4000MC:
  1034. case CPU_R4400SC:
  1035. case CPU_R4400MC:
  1036. sc_present = run_uncached(probe_scache);
  1037. if (sc_present)
  1038. c->options |= MIPS_CPU_CACHE_CDEX_S;
  1039. break;
  1040. case CPU_R10000:
  1041. case CPU_R12000:
  1042. case CPU_R14000:
  1043. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  1044. c->scache.linesz = 64 << ((config >> 13) & 1);
  1045. c->scache.ways = 2;
  1046. c->scache.waybit= 0;
  1047. sc_present = 1;
  1048. break;
  1049. case CPU_R5000:
  1050. case CPU_NEVADA:
  1051. #ifdef CONFIG_R5000_CPU_SCACHE
  1052. r5k_sc_init();
  1053. #endif
  1054. return;
  1055. case CPU_RM7000:
  1056. #ifdef CONFIG_RM7000_CPU_SCACHE
  1057. rm7k_sc_init();
  1058. #endif
  1059. return;
  1060. #if defined(CONFIG_CPU_LOONGSON2)
  1061. case CPU_LOONGSON2:
  1062. loongson2_sc_init();
  1063. return;
  1064. #endif
  1065. case CPU_XLP:
  1066. /* don't need to worry about L2, fully coherent */
  1067. return;
  1068. default:
  1069. if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
  1070. MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
  1071. #ifdef CONFIG_MIPS_CPU_SCACHE
  1072. if (mips_sc_init ()) {
  1073. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  1074. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  1075. scache_size >> 10,
  1076. way_string[c->scache.ways], c->scache.linesz);
  1077. }
  1078. #else
  1079. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1080. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1081. #endif
  1082. return;
  1083. }
  1084. sc_present = 0;
  1085. }
  1086. if (!sc_present)
  1087. return;
  1088. /* compute a couple of other cache variables */
  1089. c->scache.waysize = scache_size / c->scache.ways;
  1090. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1091. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1092. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1093. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1094. }
  1095. void au1x00_fixup_config_od(void)
  1096. {
  1097. /*
  1098. * c0_config.od (bit 19) was write only (and read as 0)
  1099. * on the early revisions of Alchemy SOCs. It disables the bus
  1100. * transaction overlapping and needs to be set to fix various errata.
  1101. */
  1102. switch (read_c0_prid()) {
  1103. case 0x00030100: /* Au1000 DA */
  1104. case 0x00030201: /* Au1000 HA */
  1105. case 0x00030202: /* Au1000 HB */
  1106. case 0x01030200: /* Au1500 AB */
  1107. /*
  1108. * Au1100 errata actually keeps silence about this bit, so we set it
  1109. * just in case for those revisions that require it to be set according
  1110. * to the (now gone) cpu table.
  1111. */
  1112. case 0x02030200: /* Au1100 AB */
  1113. case 0x02030201: /* Au1100 BA */
  1114. case 0x02030202: /* Au1100 BC */
  1115. set_c0_config(1 << 19);
  1116. break;
  1117. }
  1118. }
  1119. /* CP0 hazard avoidance. */
  1120. #define NXP_BARRIER() \
  1121. __asm__ __volatile__( \
  1122. ".set noreorder\n\t" \
  1123. "nop; nop; nop; nop; nop; nop;\n\t" \
  1124. ".set reorder\n\t")
  1125. static void nxp_pr4450_fixup_config(void)
  1126. {
  1127. unsigned long config0;
  1128. config0 = read_c0_config();
  1129. /* clear all three cache coherency fields */
  1130. config0 &= ~(0x7 | (7 << 25) | (7 << 28));
  1131. config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
  1132. ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
  1133. ((_page_cachable_default >> _CACHE_SHIFT) << 28));
  1134. write_c0_config(config0);
  1135. NXP_BARRIER();
  1136. }
  1137. static int __cpuinitdata cca = -1;
  1138. static int __init cca_setup(char *str)
  1139. {
  1140. get_option(&str, &cca);
  1141. return 0;
  1142. }
  1143. early_param("cca", cca_setup);
  1144. static void __cpuinit coherency_setup(void)
  1145. {
  1146. if (cca < 0 || cca > 7)
  1147. cca = read_c0_config() & CONF_CM_CMASK;
  1148. _page_cachable_default = cca << _CACHE_SHIFT;
  1149. pr_debug("Using cache attribute %d\n", cca);
  1150. change_c0_config(CONF_CM_CMASK, cca);
  1151. /*
  1152. * c0_status.cu=0 specifies that updates by the sc instruction use
  1153. * the coherency mode specified by the TLB; 1 means cachable
  1154. * coherent update on write will be used. Not all processors have
  1155. * this bit and; some wire it to zero, others like Toshiba had the
  1156. * silly idea of putting something else there ...
  1157. */
  1158. switch (current_cpu_type()) {
  1159. case CPU_R4000PC:
  1160. case CPU_R4000SC:
  1161. case CPU_R4000MC:
  1162. case CPU_R4400PC:
  1163. case CPU_R4400SC:
  1164. case CPU_R4400MC:
  1165. clear_c0_config(CONF_CU);
  1166. break;
  1167. /*
  1168. * We need to catch the early Alchemy SOCs with
  1169. * the write-only co_config.od bit and set it back to one on:
  1170. * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
  1171. */
  1172. case CPU_ALCHEMY:
  1173. au1x00_fixup_config_od();
  1174. break;
  1175. case PRID_IMP_PR4450:
  1176. nxp_pr4450_fixup_config();
  1177. break;
  1178. }
  1179. }
  1180. static void __cpuinit r4k_cache_error_setup(void)
  1181. {
  1182. extern char __weak except_vec2_generic;
  1183. extern char __weak except_vec2_sb1;
  1184. struct cpuinfo_mips *c = &current_cpu_data;
  1185. switch (c->cputype) {
  1186. case CPU_SB1:
  1187. case CPU_SB1A:
  1188. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1189. break;
  1190. default:
  1191. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1192. break;
  1193. }
  1194. }
  1195. void __cpuinit r4k_cache_init(void)
  1196. {
  1197. extern void build_clear_page(void);
  1198. extern void build_copy_page(void);
  1199. struct cpuinfo_mips *c = &current_cpu_data;
  1200. probe_pcache();
  1201. setup_scache();
  1202. r4k_blast_dcache_page_setup();
  1203. r4k_blast_dcache_page_indexed_setup();
  1204. r4k_blast_dcache_setup();
  1205. r4k_blast_icache_page_setup();
  1206. r4k_blast_icache_page_indexed_setup();
  1207. r4k_blast_icache_setup();
  1208. r4k_blast_scache_page_setup();
  1209. r4k_blast_scache_page_indexed_setup();
  1210. r4k_blast_scache_setup();
  1211. /*
  1212. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1213. * This code supports virtually indexed processors and will be
  1214. * unnecessarily inefficient on physically indexed processors.
  1215. */
  1216. if (c->dcache.linesz)
  1217. shm_align_mask = max_t( unsigned long,
  1218. c->dcache.sets * c->dcache.linesz - 1,
  1219. PAGE_SIZE - 1);
  1220. else
  1221. shm_align_mask = PAGE_SIZE-1;
  1222. __flush_cache_vmap = r4k__flush_cache_vmap;
  1223. __flush_cache_vunmap = r4k__flush_cache_vunmap;
  1224. flush_cache_all = cache_noop;
  1225. __flush_cache_all = r4k___flush_cache_all;
  1226. flush_cache_mm = r4k_flush_cache_mm;
  1227. flush_cache_page = r4k_flush_cache_page;
  1228. flush_cache_range = r4k_flush_cache_range;
  1229. __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
  1230. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1231. flush_icache_all = r4k_flush_icache_all;
  1232. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1233. flush_data_cache_page = r4k_flush_data_cache_page;
  1234. flush_icache_range = r4k_flush_icache_range;
  1235. local_flush_icache_range = local_r4k_flush_icache_range;
  1236. #if defined(CONFIG_DMA_NONCOHERENT)
  1237. if (coherentio) {
  1238. _dma_cache_wback_inv = (void *)cache_noop;
  1239. _dma_cache_wback = (void *)cache_noop;
  1240. _dma_cache_inv = (void *)cache_noop;
  1241. } else {
  1242. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1243. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1244. _dma_cache_inv = r4k_dma_cache_inv;
  1245. }
  1246. #endif
  1247. build_clear_page();
  1248. build_copy_page();
  1249. /*
  1250. * We want to run CMP kernels on core with and without coherent
  1251. * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
  1252. * or not to flush caches.
  1253. */
  1254. local_r4k___flush_cache_all(NULL);
  1255. coherency_setup();
  1256. board_cache_error_setup = r4k_cache_error_setup;
  1257. }