c-r4k.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/config.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/bitops.h>
  16. #include <asm/bcache.h>
  17. #include <asm/bootinfo.h>
  18. #include <asm/cache.h>
  19. #include <asm/cacheops.h>
  20. #include <asm/cpu.h>
  21. #include <asm/cpu-features.h>
  22. #include <asm/io.h>
  23. #include <asm/page.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/r4kcache.h>
  26. #include <asm/system.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/war.h>
  29. #include <asm/cacheflush.h> /* for run_uncached() */
  30. /*
  31. * Special Variant of smp_call_function for use by cache functions:
  32. *
  33. * o No return value
  34. * o collapses to normal function call on UP kernels
  35. * o collapses to normal function call on systems with a single shared
  36. * primary cache.
  37. */
  38. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  39. int retry, int wait)
  40. {
  41. preempt_disable();
  42. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  43. smp_call_function(func, info, retry, wait);
  44. #endif
  45. func(info);
  46. preempt_enable();
  47. }
  48. /*
  49. * Must die.
  50. */
  51. static unsigned long icache_size __read_mostly;
  52. static unsigned long dcache_size __read_mostly;
  53. static unsigned long scache_size __read_mostly;
  54. /*
  55. * Dummy cache handling routines for machines without boardcaches
  56. */
  57. static void no_sc_noop(void) {}
  58. static struct bcache_ops no_sc_ops = {
  59. .bc_enable = (void *)no_sc_noop,
  60. .bc_disable = (void *)no_sc_noop,
  61. .bc_wback_inv = (void *)no_sc_noop,
  62. .bc_inv = (void *)no_sc_noop
  63. };
  64. struct bcache_ops *bcops = &no_sc_ops;
  65. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  66. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  67. #define R4600_HIT_CACHEOP_WAR_IMPL \
  68. do { \
  69. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  70. *(volatile unsigned long *)CKSEG1; \
  71. if (R4600_V1_HIT_CACHEOP_WAR) \
  72. __asm__ __volatile__("nop;nop;nop;nop"); \
  73. } while (0)
  74. static void (*r4k_blast_dcache_page)(unsigned long addr);
  75. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  76. {
  77. R4600_HIT_CACHEOP_WAR_IMPL;
  78. blast_dcache32_page(addr);
  79. }
  80. static inline void r4k_blast_dcache_page_setup(void)
  81. {
  82. unsigned long dc_lsize = cpu_dcache_line_size();
  83. if (dc_lsize == 16)
  84. r4k_blast_dcache_page = blast_dcache16_page;
  85. else if (dc_lsize == 32)
  86. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  87. }
  88. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  89. static inline void r4k_blast_dcache_page_indexed_setup(void)
  90. {
  91. unsigned long dc_lsize = cpu_dcache_line_size();
  92. if (dc_lsize == 16)
  93. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  94. else if (dc_lsize == 32)
  95. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  96. }
  97. static void (* r4k_blast_dcache)(void);
  98. static inline void r4k_blast_dcache_setup(void)
  99. {
  100. unsigned long dc_lsize = cpu_dcache_line_size();
  101. if (dc_lsize == 16)
  102. r4k_blast_dcache = blast_dcache16;
  103. else if (dc_lsize == 32)
  104. r4k_blast_dcache = blast_dcache32;
  105. }
  106. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  107. #define JUMP_TO_ALIGN(order) \
  108. __asm__ __volatile__( \
  109. "b\t1f\n\t" \
  110. ".align\t" #order "\n\t" \
  111. "1:\n\t" \
  112. )
  113. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  114. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  115. static inline void blast_r4600_v1_icache32(void)
  116. {
  117. unsigned long flags;
  118. local_irq_save(flags);
  119. blast_icache32();
  120. local_irq_restore(flags);
  121. }
  122. static inline void tx49_blast_icache32(void)
  123. {
  124. unsigned long start = INDEX_BASE;
  125. unsigned long end = start + current_cpu_data.icache.waysize;
  126. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  127. unsigned long ws_end = current_cpu_data.icache.ways <<
  128. current_cpu_data.icache.waybit;
  129. unsigned long ws, addr;
  130. CACHE32_UNROLL32_ALIGN2;
  131. /* I'm in even chunk. blast odd chunks */
  132. for (ws = 0; ws < ws_end; ws += ws_inc)
  133. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  134. cache32_unroll32(addr|ws,Index_Invalidate_I);
  135. CACHE32_UNROLL32_ALIGN;
  136. /* I'm in odd chunk. blast even chunks */
  137. for (ws = 0; ws < ws_end; ws += ws_inc)
  138. for (addr = start; addr < end; addr += 0x400 * 2)
  139. cache32_unroll32(addr|ws,Index_Invalidate_I);
  140. }
  141. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  142. {
  143. unsigned long flags;
  144. local_irq_save(flags);
  145. blast_icache32_page_indexed(page);
  146. local_irq_restore(flags);
  147. }
  148. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  149. {
  150. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  151. unsigned long start = INDEX_BASE + (page & indexmask);
  152. unsigned long end = start + PAGE_SIZE;
  153. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  154. unsigned long ws_end = current_cpu_data.icache.ways <<
  155. current_cpu_data.icache.waybit;
  156. unsigned long ws, addr;
  157. CACHE32_UNROLL32_ALIGN2;
  158. /* I'm in even chunk. blast odd chunks */
  159. for (ws = 0; ws < ws_end; ws += ws_inc)
  160. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  161. cache32_unroll32(addr|ws,Index_Invalidate_I);
  162. CACHE32_UNROLL32_ALIGN;
  163. /* I'm in odd chunk. blast even chunks */
  164. for (ws = 0; ws < ws_end; ws += ws_inc)
  165. for (addr = start; addr < end; addr += 0x400 * 2)
  166. cache32_unroll32(addr|ws,Index_Invalidate_I);
  167. }
  168. static void (* r4k_blast_icache_page)(unsigned long addr);
  169. static inline void r4k_blast_icache_page_setup(void)
  170. {
  171. unsigned long ic_lsize = cpu_icache_line_size();
  172. if (ic_lsize == 16)
  173. r4k_blast_icache_page = blast_icache16_page;
  174. else if (ic_lsize == 32)
  175. r4k_blast_icache_page = blast_icache32_page;
  176. else if (ic_lsize == 64)
  177. r4k_blast_icache_page = blast_icache64_page;
  178. }
  179. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  180. static inline void r4k_blast_icache_page_indexed_setup(void)
  181. {
  182. unsigned long ic_lsize = cpu_icache_line_size();
  183. if (ic_lsize == 16)
  184. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  185. else if (ic_lsize == 32) {
  186. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  187. r4k_blast_icache_page_indexed =
  188. blast_icache32_r4600_v1_page_indexed;
  189. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  190. r4k_blast_icache_page_indexed =
  191. tx49_blast_icache32_page_indexed;
  192. else
  193. r4k_blast_icache_page_indexed =
  194. blast_icache32_page_indexed;
  195. } else if (ic_lsize == 64)
  196. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  197. }
  198. static void (* r4k_blast_icache)(void);
  199. static inline void r4k_blast_icache_setup(void)
  200. {
  201. unsigned long ic_lsize = cpu_icache_line_size();
  202. if (ic_lsize == 16)
  203. r4k_blast_icache = blast_icache16;
  204. else if (ic_lsize == 32) {
  205. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  206. r4k_blast_icache = blast_r4600_v1_icache32;
  207. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  208. r4k_blast_icache = tx49_blast_icache32;
  209. else
  210. r4k_blast_icache = blast_icache32;
  211. } else if (ic_lsize == 64)
  212. r4k_blast_icache = blast_icache64;
  213. }
  214. static void (* r4k_blast_scache_page)(unsigned long addr);
  215. static inline void r4k_blast_scache_page_setup(void)
  216. {
  217. unsigned long sc_lsize = cpu_scache_line_size();
  218. if (scache_size == 0)
  219. r4k_blast_scache_page = (void *)no_sc_noop;
  220. else if (sc_lsize == 16)
  221. r4k_blast_scache_page = blast_scache16_page;
  222. else if (sc_lsize == 32)
  223. r4k_blast_scache_page = blast_scache32_page;
  224. else if (sc_lsize == 64)
  225. r4k_blast_scache_page = blast_scache64_page;
  226. else if (sc_lsize == 128)
  227. r4k_blast_scache_page = blast_scache128_page;
  228. }
  229. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  230. static inline void r4k_blast_scache_page_indexed_setup(void)
  231. {
  232. unsigned long sc_lsize = cpu_scache_line_size();
  233. if (scache_size == 0)
  234. r4k_blast_scache_page_indexed = (void *)no_sc_noop;
  235. else if (sc_lsize == 16)
  236. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  237. else if (sc_lsize == 32)
  238. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  239. else if (sc_lsize == 64)
  240. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  241. else if (sc_lsize == 128)
  242. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  243. }
  244. static void (* r4k_blast_scache)(void);
  245. static inline void r4k_blast_scache_setup(void)
  246. {
  247. unsigned long sc_lsize = cpu_scache_line_size();
  248. if (scache_size == 0)
  249. r4k_blast_scache = (void *)no_sc_noop;
  250. else if (sc_lsize == 16)
  251. r4k_blast_scache = blast_scache16;
  252. else if (sc_lsize == 32)
  253. r4k_blast_scache = blast_scache32;
  254. else if (sc_lsize == 64)
  255. r4k_blast_scache = blast_scache64;
  256. else if (sc_lsize == 128)
  257. r4k_blast_scache = blast_scache128;
  258. }
  259. /*
  260. * This is former mm's flush_cache_all() which really should be
  261. * flush_cache_vunmap these days ...
  262. */
  263. static inline void local_r4k_flush_cache_all(void * args)
  264. {
  265. r4k_blast_dcache();
  266. r4k_blast_icache();
  267. }
  268. static void r4k_flush_cache_all(void)
  269. {
  270. if (!cpu_has_dc_aliases)
  271. return;
  272. r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
  273. }
  274. static inline void local_r4k___flush_cache_all(void * args)
  275. {
  276. r4k_blast_dcache();
  277. r4k_blast_icache();
  278. switch (current_cpu_data.cputype) {
  279. case CPU_R4000SC:
  280. case CPU_R4000MC:
  281. case CPU_R4400SC:
  282. case CPU_R4400MC:
  283. case CPU_R10000:
  284. case CPU_R12000:
  285. case CPU_R14000:
  286. r4k_blast_scache();
  287. }
  288. }
  289. static void r4k___flush_cache_all(void)
  290. {
  291. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  292. }
  293. static inline void local_r4k_flush_cache_range(void * args)
  294. {
  295. struct vm_area_struct *vma = args;
  296. int exec;
  297. if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
  298. return;
  299. exec = vma->vm_flags & VM_EXEC;
  300. if (cpu_has_dc_aliases || exec)
  301. r4k_blast_dcache();
  302. if (exec)
  303. r4k_blast_icache();
  304. }
  305. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  306. unsigned long start, unsigned long end)
  307. {
  308. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  309. }
  310. static inline void local_r4k_flush_cache_mm(void * args)
  311. {
  312. struct mm_struct *mm = args;
  313. if (!cpu_context(smp_processor_id(), mm))
  314. return;
  315. r4k_blast_dcache();
  316. r4k_blast_icache();
  317. /*
  318. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  319. * only flush the primary caches but R10000 and R12000 behave sane ...
  320. */
  321. if (current_cpu_data.cputype == CPU_R4000SC ||
  322. current_cpu_data.cputype == CPU_R4000MC ||
  323. current_cpu_data.cputype == CPU_R4400SC ||
  324. current_cpu_data.cputype == CPU_R4400MC)
  325. r4k_blast_scache();
  326. }
  327. static void r4k_flush_cache_mm(struct mm_struct *mm)
  328. {
  329. if (!cpu_has_dc_aliases)
  330. return;
  331. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  332. }
  333. struct flush_cache_page_args {
  334. struct vm_area_struct *vma;
  335. unsigned long addr;
  336. unsigned long pfn;
  337. };
  338. static inline void local_r4k_flush_cache_page(void *args)
  339. {
  340. struct flush_cache_page_args *fcp_args = args;
  341. struct vm_area_struct *vma = fcp_args->vma;
  342. unsigned long addr = fcp_args->addr;
  343. unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
  344. int exec = vma->vm_flags & VM_EXEC;
  345. struct mm_struct *mm = vma->vm_mm;
  346. pgd_t *pgdp;
  347. pud_t *pudp;
  348. pmd_t *pmdp;
  349. pte_t *ptep;
  350. /*
  351. * If ownes no valid ASID yet, cannot possibly have gotten
  352. * this page into the cache.
  353. */
  354. if (cpu_context(smp_processor_id(), mm) == 0)
  355. return;
  356. addr &= PAGE_MASK;
  357. pgdp = pgd_offset(mm, addr);
  358. pudp = pud_offset(pgdp, addr);
  359. pmdp = pmd_offset(pudp, addr);
  360. ptep = pte_offset(pmdp, addr);
  361. /*
  362. * If the page isn't marked valid, the page cannot possibly be
  363. * in the cache.
  364. */
  365. if (!(pte_val(*ptep) & _PAGE_PRESENT))
  366. return;
  367. /*
  368. * Doing flushes for another ASID than the current one is
  369. * too difficult since stupid R4k caches do a TLB translation
  370. * for every cache flush operation. So we do indexed flushes
  371. * in that case, which doesn't overly flush the cache too much.
  372. */
  373. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
  374. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  375. r4k_blast_dcache_page(addr);
  376. if (exec && !cpu_icache_snoops_remote_store)
  377. r4k_blast_scache_page(addr);
  378. }
  379. if (exec)
  380. r4k_blast_icache_page(addr);
  381. return;
  382. }
  383. /*
  384. * Do indexed flush, too much work to get the (possible) TLB refills
  385. * to work correctly.
  386. */
  387. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  388. r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
  389. paddr : addr);
  390. if (exec && !cpu_icache_snoops_remote_store) {
  391. r4k_blast_scache_page_indexed(paddr);
  392. }
  393. }
  394. if (exec) {
  395. if (cpu_has_vtag_icache) {
  396. int cpu = smp_processor_id();
  397. if (cpu_context(cpu, mm) != 0)
  398. drop_mmu_context(mm, cpu);
  399. } else
  400. r4k_blast_icache_page_indexed(addr);
  401. }
  402. }
  403. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  404. unsigned long addr, unsigned long pfn)
  405. {
  406. struct flush_cache_page_args args;
  407. args.vma = vma;
  408. args.addr = addr;
  409. args.pfn = pfn;
  410. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  411. }
  412. static inline void local_r4k_flush_data_cache_page(void * addr)
  413. {
  414. r4k_blast_dcache_page((unsigned long) addr);
  415. }
  416. static void r4k_flush_data_cache_page(unsigned long addr)
  417. {
  418. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
  419. }
  420. struct flush_icache_range_args {
  421. unsigned long start;
  422. unsigned long end;
  423. };
  424. static inline void local_r4k_flush_icache_range(void *args)
  425. {
  426. struct flush_icache_range_args *fir_args = args;
  427. unsigned long start = fir_args->start;
  428. unsigned long end = fir_args->end;
  429. if (!cpu_has_ic_fills_f_dc) {
  430. if (end - start > dcache_size) {
  431. r4k_blast_dcache();
  432. } else {
  433. R4600_HIT_CACHEOP_WAR_IMPL;
  434. protected_blast_dcache_range(start, end);
  435. }
  436. if (!cpu_icache_snoops_remote_store && scache_size) {
  437. if (end - start > scache_size)
  438. r4k_blast_scache();
  439. else
  440. protected_blast_scache_range(start, end);
  441. }
  442. }
  443. if (end - start > icache_size)
  444. r4k_blast_icache();
  445. else
  446. protected_blast_icache_range(start, end);
  447. }
  448. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  449. {
  450. struct flush_icache_range_args args;
  451. args.start = start;
  452. args.end = end;
  453. r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  454. instruction_hazard();
  455. }
  456. /*
  457. * Ok, this seriously sucks. We use them to flush a user page but don't
  458. * know the virtual address, so we have to blast away the whole icache
  459. * which is significantly more expensive than the real thing. Otoh we at
  460. * least know the kernel address of the page so we can flush it
  461. * selectivly.
  462. */
  463. struct flush_icache_page_args {
  464. struct vm_area_struct *vma;
  465. struct page *page;
  466. };
  467. static inline void local_r4k_flush_icache_page(void *args)
  468. {
  469. struct flush_icache_page_args *fip_args = args;
  470. struct vm_area_struct *vma = fip_args->vma;
  471. struct page *page = fip_args->page;
  472. /*
  473. * Tricky ... Because we don't know the virtual address we've got the
  474. * choice of either invalidating the entire primary and secondary
  475. * caches or invalidating the secondary caches also. With the subset
  476. * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
  477. * secondary cache will result in any entries in the primary caches
  478. * also getting invalidated which hopefully is a bit more economical.
  479. */
  480. if (cpu_has_subset_pcaches) {
  481. unsigned long addr = (unsigned long) page_address(page);
  482. r4k_blast_scache_page(addr);
  483. ClearPageDcacheDirty(page);
  484. return;
  485. }
  486. if (!cpu_has_ic_fills_f_dc) {
  487. unsigned long addr = (unsigned long) page_address(page);
  488. r4k_blast_dcache_page(addr);
  489. if (!cpu_icache_snoops_remote_store)
  490. r4k_blast_scache_page(addr);
  491. ClearPageDcacheDirty(page);
  492. }
  493. /*
  494. * We're not sure of the virtual address(es) involved here, so
  495. * we have to flush the entire I-cache.
  496. */
  497. if (cpu_has_vtag_icache) {
  498. int cpu = smp_processor_id();
  499. if (cpu_context(cpu, vma->vm_mm) != 0)
  500. drop_mmu_context(vma->vm_mm, cpu);
  501. } else
  502. r4k_blast_icache();
  503. }
  504. static void r4k_flush_icache_page(struct vm_area_struct *vma,
  505. struct page *page)
  506. {
  507. struct flush_icache_page_args args;
  508. /*
  509. * If there's no context yet, or the page isn't executable, no I-cache
  510. * flush is needed.
  511. */
  512. if (!(vma->vm_flags & VM_EXEC))
  513. return;
  514. args.vma = vma;
  515. args.page = page;
  516. r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
  517. }
  518. #ifdef CONFIG_DMA_NONCOHERENT
  519. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  520. {
  521. /* Catch bad driver code */
  522. BUG_ON(size == 0);
  523. if (cpu_has_subset_pcaches) {
  524. if (size >= scache_size)
  525. r4k_blast_scache();
  526. else
  527. blast_scache_range(addr, addr + size);
  528. return;
  529. }
  530. /*
  531. * Either no secondary cache or the available caches don't have the
  532. * subset property so we have to flush the primary caches
  533. * explicitly
  534. */
  535. if (size >= dcache_size) {
  536. r4k_blast_dcache();
  537. } else {
  538. R4600_HIT_CACHEOP_WAR_IMPL;
  539. blast_dcache_range(addr, addr + size);
  540. }
  541. bc_wback_inv(addr, size);
  542. }
  543. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  544. {
  545. /* Catch bad driver code */
  546. BUG_ON(size == 0);
  547. if (cpu_has_subset_pcaches) {
  548. if (size >= scache_size)
  549. r4k_blast_scache();
  550. else
  551. blast_scache_range(addr, addr + size);
  552. return;
  553. }
  554. if (size >= dcache_size) {
  555. r4k_blast_dcache();
  556. } else {
  557. R4600_HIT_CACHEOP_WAR_IMPL;
  558. blast_dcache_range(addr, addr + size);
  559. }
  560. bc_inv(addr, size);
  561. }
  562. #endif /* CONFIG_DMA_NONCOHERENT */
  563. /*
  564. * While we're protected against bad userland addresses we don't care
  565. * very much about what happens in that case. Usually a segmentation
  566. * fault will dump the process later on anyway ...
  567. */
  568. static void local_r4k_flush_cache_sigtramp(void * arg)
  569. {
  570. unsigned long ic_lsize = cpu_icache_line_size();
  571. unsigned long dc_lsize = cpu_dcache_line_size();
  572. unsigned long sc_lsize = cpu_scache_line_size();
  573. unsigned long addr = (unsigned long) arg;
  574. R4600_HIT_CACHEOP_WAR_IMPL;
  575. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  576. if (!cpu_icache_snoops_remote_store && scache_size)
  577. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  578. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  579. if (MIPS4K_ICACHE_REFILL_WAR) {
  580. __asm__ __volatile__ (
  581. ".set push\n\t"
  582. ".set noat\n\t"
  583. ".set mips3\n\t"
  584. #ifdef CONFIG_32BIT
  585. "la $at,1f\n\t"
  586. #endif
  587. #ifdef CONFIG_64BIT
  588. "dla $at,1f\n\t"
  589. #endif
  590. "cache %0,($at)\n\t"
  591. "nop; nop; nop\n"
  592. "1:\n\t"
  593. ".set pop"
  594. :
  595. : "i" (Hit_Invalidate_I));
  596. }
  597. if (MIPS_CACHE_SYNC_WAR)
  598. __asm__ __volatile__ ("sync");
  599. }
  600. static void r4k_flush_cache_sigtramp(unsigned long addr)
  601. {
  602. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  603. }
  604. static void r4k_flush_icache_all(void)
  605. {
  606. if (cpu_has_vtag_icache)
  607. r4k_blast_icache();
  608. }
  609. static inline void rm7k_erratum31(void)
  610. {
  611. const unsigned long ic_lsize = 32;
  612. unsigned long addr;
  613. /* RM7000 erratum #31. The icache is screwed at startup. */
  614. write_c0_taglo(0);
  615. write_c0_taghi(0);
  616. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  617. __asm__ __volatile__ (
  618. ".set push\n\t"
  619. ".set noreorder\n\t"
  620. ".set mips3\n\t"
  621. "cache\t%1, 0(%0)\n\t"
  622. "cache\t%1, 0x1000(%0)\n\t"
  623. "cache\t%1, 0x2000(%0)\n\t"
  624. "cache\t%1, 0x3000(%0)\n\t"
  625. "cache\t%2, 0(%0)\n\t"
  626. "cache\t%2, 0x1000(%0)\n\t"
  627. "cache\t%2, 0x2000(%0)\n\t"
  628. "cache\t%2, 0x3000(%0)\n\t"
  629. "cache\t%1, 0(%0)\n\t"
  630. "cache\t%1, 0x1000(%0)\n\t"
  631. "cache\t%1, 0x2000(%0)\n\t"
  632. "cache\t%1, 0x3000(%0)\n\t"
  633. ".set pop\n"
  634. :
  635. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  636. }
  637. }
  638. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  639. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  640. };
  641. static void __init probe_pcache(void)
  642. {
  643. struct cpuinfo_mips *c = &current_cpu_data;
  644. unsigned int config = read_c0_config();
  645. unsigned int prid = read_c0_prid();
  646. unsigned long config1;
  647. unsigned int lsize;
  648. switch (c->cputype) {
  649. case CPU_R4600: /* QED style two way caches? */
  650. case CPU_R4700:
  651. case CPU_R5000:
  652. case CPU_NEVADA:
  653. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  654. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  655. c->icache.ways = 2;
  656. c->icache.waybit = __ffs(icache_size/2);
  657. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  658. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  659. c->dcache.ways = 2;
  660. c->dcache.waybit= __ffs(dcache_size/2);
  661. c->options |= MIPS_CPU_CACHE_CDEX_P;
  662. break;
  663. case CPU_R5432:
  664. case CPU_R5500:
  665. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  666. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  667. c->icache.ways = 2;
  668. c->icache.waybit= 0;
  669. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  670. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  671. c->dcache.ways = 2;
  672. c->dcache.waybit = 0;
  673. c->options |= MIPS_CPU_CACHE_CDEX_P;
  674. break;
  675. case CPU_TX49XX:
  676. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  677. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  678. c->icache.ways = 4;
  679. c->icache.waybit= 0;
  680. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  681. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  682. c->dcache.ways = 4;
  683. c->dcache.waybit = 0;
  684. c->options |= MIPS_CPU_CACHE_CDEX_P;
  685. c->options |= MIPS_CPU_PREFETCH;
  686. break;
  687. case CPU_R4000PC:
  688. case CPU_R4000SC:
  689. case CPU_R4000MC:
  690. case CPU_R4400PC:
  691. case CPU_R4400SC:
  692. case CPU_R4400MC:
  693. case CPU_R4300:
  694. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  695. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  696. c->icache.ways = 1;
  697. c->icache.waybit = 0; /* doesn't matter */
  698. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  699. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  700. c->dcache.ways = 1;
  701. c->dcache.waybit = 0; /* does not matter */
  702. c->options |= MIPS_CPU_CACHE_CDEX_P;
  703. break;
  704. case CPU_R10000:
  705. case CPU_R12000:
  706. case CPU_R14000:
  707. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  708. c->icache.linesz = 64;
  709. c->icache.ways = 2;
  710. c->icache.waybit = 0;
  711. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  712. c->dcache.linesz = 32;
  713. c->dcache.ways = 2;
  714. c->dcache.waybit = 0;
  715. c->options |= MIPS_CPU_PREFETCH;
  716. break;
  717. case CPU_VR4133:
  718. write_c0_config(config & ~CONF_EB);
  719. case CPU_VR4131:
  720. /* Workaround for cache instruction bug of VR4131 */
  721. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  722. c->processor_id == 0x0c82U) {
  723. config &= ~0x00000030U;
  724. config |= 0x00410000U;
  725. write_c0_config(config);
  726. }
  727. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  728. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  729. c->icache.ways = 2;
  730. c->icache.waybit = __ffs(icache_size/2);
  731. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  732. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  733. c->dcache.ways = 2;
  734. c->dcache.waybit = __ffs(dcache_size/2);
  735. c->options |= MIPS_CPU_CACHE_CDEX_P;
  736. break;
  737. case CPU_VR41XX:
  738. case CPU_VR4111:
  739. case CPU_VR4121:
  740. case CPU_VR4122:
  741. case CPU_VR4181:
  742. case CPU_VR4181A:
  743. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  744. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  745. c->icache.ways = 1;
  746. c->icache.waybit = 0; /* doesn't matter */
  747. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  748. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  749. c->dcache.ways = 1;
  750. c->dcache.waybit = 0; /* does not matter */
  751. c->options |= MIPS_CPU_CACHE_CDEX_P;
  752. break;
  753. case CPU_RM7000:
  754. rm7k_erratum31();
  755. case CPU_RM9000:
  756. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  757. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  758. c->icache.ways = 4;
  759. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  760. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  761. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  762. c->dcache.ways = 4;
  763. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  764. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  765. c->options |= MIPS_CPU_CACHE_CDEX_P;
  766. #endif
  767. c->options |= MIPS_CPU_PREFETCH;
  768. break;
  769. default:
  770. if (!(config & MIPS_CONF_M))
  771. panic("Don't know how to probe P-caches on this cpu.");
  772. /*
  773. * So we seem to be a MIPS32 or MIPS64 CPU
  774. * So let's probe the I-cache ...
  775. */
  776. config1 = read_c0_config1();
  777. if ((lsize = ((config1 >> 19) & 7)))
  778. c->icache.linesz = 2 << lsize;
  779. else
  780. c->icache.linesz = lsize;
  781. c->icache.sets = 64 << ((config1 >> 22) & 7);
  782. c->icache.ways = 1 + ((config1 >> 16) & 7);
  783. icache_size = c->icache.sets *
  784. c->icache.ways *
  785. c->icache.linesz;
  786. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  787. if (config & 0x8) /* VI bit */
  788. c->icache.flags |= MIPS_CACHE_VTAG;
  789. /*
  790. * Now probe the MIPS32 / MIPS64 data cache.
  791. */
  792. c->dcache.flags = 0;
  793. if ((lsize = ((config1 >> 10) & 7)))
  794. c->dcache.linesz = 2 << lsize;
  795. else
  796. c->dcache.linesz= lsize;
  797. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  798. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  799. dcache_size = c->dcache.sets *
  800. c->dcache.ways *
  801. c->dcache.linesz;
  802. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  803. c->options |= MIPS_CPU_PREFETCH;
  804. break;
  805. }
  806. /*
  807. * Processor configuration sanity check for the R4000SC erratum
  808. * #5. With page sizes larger than 32kB there is no possibility
  809. * to get a VCE exception anymore so we don't care about this
  810. * misconfiguration. The case is rather theoretical anyway;
  811. * presumably no vendor is shipping his hardware in the "bad"
  812. * configuration.
  813. */
  814. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  815. !(config & CONF_SC) && c->icache.linesz != 16 &&
  816. PAGE_SIZE <= 0x8000)
  817. panic("Improper R4000SC processor configuration detected");
  818. /* compute a couple of other cache variables */
  819. c->icache.waysize = icache_size / c->icache.ways;
  820. c->dcache.waysize = dcache_size / c->dcache.ways;
  821. c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
  822. c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
  823. /*
  824. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  825. * 2-way virtually indexed so normally would suffer from aliases. So
  826. * normally they'd suffer from aliases but magic in the hardware deals
  827. * with that for us so we don't need to take care ourselves.
  828. */
  829. switch (c->cputype) {
  830. case CPU_20KC:
  831. case CPU_25KF:
  832. c->dcache.flags |= MIPS_CACHE_PINDEX;
  833. case CPU_R10000:
  834. case CPU_R12000:
  835. case CPU_R14000:
  836. case CPU_SB1:
  837. break;
  838. case CPU_24K:
  839. case CPU_34K:
  840. if (!(read_c0_config7() & (1 << 16)))
  841. default:
  842. if (c->dcache.waysize > PAGE_SIZE)
  843. c->dcache.flags |= MIPS_CACHE_ALIASES;
  844. }
  845. switch (c->cputype) {
  846. case CPU_20KC:
  847. /*
  848. * Some older 20Kc chips doesn't have the 'VI' bit in
  849. * the config register.
  850. */
  851. c->icache.flags |= MIPS_CACHE_VTAG;
  852. break;
  853. case CPU_AU1000:
  854. case CPU_AU1500:
  855. case CPU_AU1100:
  856. case CPU_AU1550:
  857. case CPU_AU1200:
  858. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  859. break;
  860. }
  861. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  862. icache_size >> 10,
  863. cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
  864. way_string[c->icache.ways], c->icache.linesz);
  865. printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
  866. dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
  867. }
  868. /*
  869. * If you even _breathe_ on this function, look at the gcc output and make sure
  870. * it does not pop things on and off the stack for the cache sizing loop that
  871. * executes in KSEG1 space or else you will crash and burn badly. You have
  872. * been warned.
  873. */
  874. static int __init probe_scache(void)
  875. {
  876. extern unsigned long stext;
  877. unsigned long flags, addr, begin, end, pow2;
  878. unsigned int config = read_c0_config();
  879. struct cpuinfo_mips *c = &current_cpu_data;
  880. int tmp;
  881. if (config & CONF_SC)
  882. return 0;
  883. begin = (unsigned long) &stext;
  884. begin &= ~((4 * 1024 * 1024) - 1);
  885. end = begin + (4 * 1024 * 1024);
  886. /*
  887. * This is such a bitch, you'd think they would make it easy to do
  888. * this. Away you daemons of stupidity!
  889. */
  890. local_irq_save(flags);
  891. /* Fill each size-multiple cache line with a valid tag. */
  892. pow2 = (64 * 1024);
  893. for (addr = begin; addr < end; addr = (begin + pow2)) {
  894. unsigned long *p = (unsigned long *) addr;
  895. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  896. pow2 <<= 1;
  897. }
  898. /* Load first line with zero (therefore invalid) tag. */
  899. write_c0_taglo(0);
  900. write_c0_taghi(0);
  901. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  902. cache_op(Index_Store_Tag_I, begin);
  903. cache_op(Index_Store_Tag_D, begin);
  904. cache_op(Index_Store_Tag_SD, begin);
  905. /* Now search for the wrap around point. */
  906. pow2 = (128 * 1024);
  907. tmp = 0;
  908. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  909. cache_op(Index_Load_Tag_SD, addr);
  910. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  911. if (!read_c0_taglo())
  912. break;
  913. pow2 <<= 1;
  914. }
  915. local_irq_restore(flags);
  916. addr -= begin;
  917. scache_size = addr;
  918. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  919. c->scache.ways = 1;
  920. c->dcache.waybit = 0; /* does not matter */
  921. return 1;
  922. }
  923. extern int r5k_sc_init(void);
  924. extern int rm7k_sc_init(void);
  925. extern int mips_sc_init(void);
  926. static void __init setup_scache(void)
  927. {
  928. struct cpuinfo_mips *c = &current_cpu_data;
  929. unsigned int config = read_c0_config();
  930. int sc_present = 0;
  931. /*
  932. * Do the probing thing on R4000SC and R4400SC processors. Other
  933. * processors don't have a S-cache that would be relevant to the
  934. * Linux memory managment.
  935. */
  936. switch (c->cputype) {
  937. case CPU_R4000SC:
  938. case CPU_R4000MC:
  939. case CPU_R4400SC:
  940. case CPU_R4400MC:
  941. sc_present = run_uncached(probe_scache);
  942. if (sc_present)
  943. c->options |= MIPS_CPU_CACHE_CDEX_S;
  944. break;
  945. case CPU_R10000:
  946. case CPU_R12000:
  947. case CPU_R14000:
  948. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  949. c->scache.linesz = 64 << ((config >> 13) & 1);
  950. c->scache.ways = 2;
  951. c->scache.waybit= 0;
  952. sc_present = 1;
  953. break;
  954. case CPU_R5000:
  955. case CPU_NEVADA:
  956. #ifdef CONFIG_R5000_CPU_SCACHE
  957. r5k_sc_init();
  958. #endif
  959. return;
  960. case CPU_RM7000:
  961. case CPU_RM9000:
  962. #ifdef CONFIG_RM7000_CPU_SCACHE
  963. rm7k_sc_init();
  964. #endif
  965. return;
  966. default:
  967. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  968. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  969. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  970. c->isa_level == MIPS_CPU_ISA_M64R2) {
  971. #ifdef CONFIG_MIPS_CPU_SCACHE
  972. if (mips_sc_init ()) {
  973. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  974. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  975. scache_size >> 10,
  976. way_string[c->scache.ways], c->scache.linesz);
  977. }
  978. #else
  979. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  980. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  981. #endif
  982. return;
  983. }
  984. sc_present = 0;
  985. }
  986. if (!sc_present)
  987. return;
  988. /* compute a couple of other cache variables */
  989. c->scache.waysize = scache_size / c->scache.ways;
  990. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  991. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  992. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  993. c->options |= MIPS_CPU_SUBSET_CACHES;
  994. }
  995. void au1x00_fixup_config_od(void)
  996. {
  997. /*
  998. * c0_config.od (bit 19) was write only (and read as 0)
  999. * on the early revisions of Alchemy SOCs. It disables the bus
  1000. * transaction overlapping and needs to be set to fix various errata.
  1001. */
  1002. switch (read_c0_prid()) {
  1003. case 0x00030100: /* Au1000 DA */
  1004. case 0x00030201: /* Au1000 HA */
  1005. case 0x00030202: /* Au1000 HB */
  1006. case 0x01030200: /* Au1500 AB */
  1007. /*
  1008. * Au1100 errata actually keeps silence about this bit, so we set it
  1009. * just in case for those revisions that require it to be set according
  1010. * to arch/mips/au1000/common/cputable.c
  1011. */
  1012. case 0x02030200: /* Au1100 AB */
  1013. case 0x02030201: /* Au1100 BA */
  1014. case 0x02030202: /* Au1100 BC */
  1015. set_c0_config(1 << 19);
  1016. break;
  1017. }
  1018. }
  1019. static inline void coherency_setup(void)
  1020. {
  1021. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  1022. /*
  1023. * c0_status.cu=0 specifies that updates by the sc instruction use
  1024. * the coherency mode specified by the TLB; 1 means cachable
  1025. * coherent update on write will be used. Not all processors have
  1026. * this bit and; some wire it to zero, others like Toshiba had the
  1027. * silly idea of putting something else there ...
  1028. */
  1029. switch (current_cpu_data.cputype) {
  1030. case CPU_R4000PC:
  1031. case CPU_R4000SC:
  1032. case CPU_R4000MC:
  1033. case CPU_R4400PC:
  1034. case CPU_R4400SC:
  1035. case CPU_R4400MC:
  1036. clear_c0_config(CONF_CU);
  1037. break;
  1038. /*
  1039. * We need to catch the ealry Alchemy SOCs with
  1040. * the write-only co_config.od bit and set it back to one...
  1041. */
  1042. case CPU_AU1000: /* rev. DA, HA, HB */
  1043. case CPU_AU1100: /* rev. AB, BA, BC ?? */
  1044. case CPU_AU1500: /* rev. AB */
  1045. au1x00_fixup_config_od();
  1046. break;
  1047. }
  1048. }
  1049. void __init r4k_cache_init(void)
  1050. {
  1051. extern void build_clear_page(void);
  1052. extern void build_copy_page(void);
  1053. extern char except_vec2_generic;
  1054. struct cpuinfo_mips *c = &current_cpu_data;
  1055. /* Default cache error handler for R4000 and R5000 family */
  1056. set_uncached_handler (0x100, &except_vec2_generic, 0x80);
  1057. probe_pcache();
  1058. setup_scache();
  1059. r4k_blast_dcache_page_setup();
  1060. r4k_blast_dcache_page_indexed_setup();
  1061. r4k_blast_dcache_setup();
  1062. r4k_blast_icache_page_setup();
  1063. r4k_blast_icache_page_indexed_setup();
  1064. r4k_blast_icache_setup();
  1065. r4k_blast_scache_page_setup();
  1066. r4k_blast_scache_page_indexed_setup();
  1067. r4k_blast_scache_setup();
  1068. /*
  1069. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1070. * This code supports virtually indexed processors and will be
  1071. * unnecessarily inefficient on physically indexed processors.
  1072. */
  1073. shm_align_mask = max_t( unsigned long,
  1074. c->dcache.sets * c->dcache.linesz - 1,
  1075. PAGE_SIZE - 1);
  1076. flush_cache_all = r4k_flush_cache_all;
  1077. __flush_cache_all = r4k___flush_cache_all;
  1078. flush_cache_mm = r4k_flush_cache_mm;
  1079. flush_cache_page = r4k_flush_cache_page;
  1080. flush_icache_page = r4k_flush_icache_page;
  1081. flush_cache_range = r4k_flush_cache_range;
  1082. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1083. flush_icache_all = r4k_flush_icache_all;
  1084. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1085. flush_data_cache_page = r4k_flush_data_cache_page;
  1086. flush_icache_range = r4k_flush_icache_range;
  1087. #ifdef CONFIG_DMA_NONCOHERENT
  1088. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1089. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1090. _dma_cache_inv = r4k_dma_cache_inv;
  1091. #endif
  1092. build_clear_page();
  1093. build_copy_page();
  1094. local_r4k___flush_cache_all(NULL);
  1095. coherency_setup();
  1096. }