c-r4k.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/config.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/bitops.h>
  16. #include <asm/bcache.h>
  17. #include <asm/bootinfo.h>
  18. #include <asm/cacheops.h>
  19. #include <asm/cpu.h>
  20. #include <asm/cpu-features.h>
  21. #include <asm/io.h>
  22. #include <asm/page.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/r4kcache.h>
  25. #include <asm/system.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/war.h>
  28. static unsigned long icache_size, dcache_size, scache_size;
  29. /*
  30. * Dummy cache handling routines for machines without boardcaches
  31. */
  32. static void no_sc_noop(void) {}
  33. static struct bcache_ops no_sc_ops = {
  34. .bc_enable = (void *)no_sc_noop,
  35. .bc_disable = (void *)no_sc_noop,
  36. .bc_wback_inv = (void *)no_sc_noop,
  37. .bc_inv = (void *)no_sc_noop
  38. };
  39. struct bcache_ops *bcops = &no_sc_ops;
  40. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
  41. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020)
  42. #define R4600_HIT_CACHEOP_WAR_IMPL \
  43. do { \
  44. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  45. *(volatile unsigned long *)CKSEG1; \
  46. if (R4600_V1_HIT_CACHEOP_WAR) \
  47. __asm__ __volatile__("nop;nop;nop;nop"); \
  48. } while (0)
  49. static void (*r4k_blast_dcache_page)(unsigned long addr);
  50. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  51. {
  52. R4600_HIT_CACHEOP_WAR_IMPL;
  53. blast_dcache32_page(addr);
  54. }
  55. static inline void r4k_blast_dcache_page_setup(void)
  56. {
  57. unsigned long dc_lsize = cpu_dcache_line_size();
  58. if (dc_lsize == 16)
  59. r4k_blast_dcache_page = blast_dcache16_page;
  60. else if (dc_lsize == 32)
  61. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  62. }
  63. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  64. static inline void r4k_blast_dcache_page_indexed_setup(void)
  65. {
  66. unsigned long dc_lsize = cpu_dcache_line_size();
  67. if (dc_lsize == 16)
  68. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  69. else if (dc_lsize == 32)
  70. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  71. }
  72. static void (* r4k_blast_dcache)(void);
  73. static inline void r4k_blast_dcache_setup(void)
  74. {
  75. unsigned long dc_lsize = cpu_dcache_line_size();
  76. if (dc_lsize == 16)
  77. r4k_blast_dcache = blast_dcache16;
  78. else if (dc_lsize == 32)
  79. r4k_blast_dcache = blast_dcache32;
  80. }
  81. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  82. #define JUMP_TO_ALIGN(order) \
  83. __asm__ __volatile__( \
  84. "b\t1f\n\t" \
  85. ".align\t" #order "\n\t" \
  86. "1:\n\t" \
  87. )
  88. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  89. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  90. static inline void blast_r4600_v1_icache32(void)
  91. {
  92. unsigned long flags;
  93. local_irq_save(flags);
  94. blast_icache32();
  95. local_irq_restore(flags);
  96. }
  97. static inline void tx49_blast_icache32(void)
  98. {
  99. unsigned long start = INDEX_BASE;
  100. unsigned long end = start + current_cpu_data.icache.waysize;
  101. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  102. unsigned long ws_end = current_cpu_data.icache.ways <<
  103. current_cpu_data.icache.waybit;
  104. unsigned long ws, addr;
  105. CACHE32_UNROLL32_ALIGN2;
  106. /* I'm in even chunk. blast odd chunks */
  107. for (ws = 0; ws < ws_end; ws += ws_inc)
  108. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  109. cache32_unroll32(addr|ws,Index_Invalidate_I);
  110. CACHE32_UNROLL32_ALIGN;
  111. /* I'm in odd chunk. blast even chunks */
  112. for (ws = 0; ws < ws_end; ws += ws_inc)
  113. for (addr = start; addr < end; addr += 0x400 * 2)
  114. cache32_unroll32(addr|ws,Index_Invalidate_I);
  115. }
  116. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  117. {
  118. unsigned long flags;
  119. local_irq_save(flags);
  120. blast_icache32_page_indexed(page);
  121. local_irq_restore(flags);
  122. }
  123. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  124. {
  125. unsigned long start = page;
  126. unsigned long end = start + PAGE_SIZE;
  127. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  128. unsigned long ws_end = current_cpu_data.icache.ways <<
  129. current_cpu_data.icache.waybit;
  130. unsigned long ws, addr;
  131. CACHE32_UNROLL32_ALIGN2;
  132. /* I'm in even chunk. blast odd chunks */
  133. for (ws = 0; ws < ws_end; ws += ws_inc)
  134. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  135. cache32_unroll32(addr|ws,Index_Invalidate_I);
  136. CACHE32_UNROLL32_ALIGN;
  137. /* I'm in odd chunk. blast even chunks */
  138. for (ws = 0; ws < ws_end; ws += ws_inc)
  139. for (addr = start; addr < end; addr += 0x400 * 2)
  140. cache32_unroll32(addr|ws,Index_Invalidate_I);
  141. }
  142. static void (* r4k_blast_icache_page)(unsigned long addr);
  143. static inline void r4k_blast_icache_page_setup(void)
  144. {
  145. unsigned long ic_lsize = cpu_icache_line_size();
  146. if (ic_lsize == 16)
  147. r4k_blast_icache_page = blast_icache16_page;
  148. else if (ic_lsize == 32)
  149. r4k_blast_icache_page = blast_icache32_page;
  150. else if (ic_lsize == 64)
  151. r4k_blast_icache_page = blast_icache64_page;
  152. }
  153. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  154. static inline void r4k_blast_icache_page_indexed_setup(void)
  155. {
  156. unsigned long ic_lsize = cpu_icache_line_size();
  157. if (ic_lsize == 16)
  158. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  159. else if (ic_lsize == 32) {
  160. if (TX49XX_ICACHE_INDEX_INV_WAR)
  161. r4k_blast_icache_page_indexed =
  162. tx49_blast_icache32_page_indexed;
  163. else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  164. r4k_blast_icache_page_indexed =
  165. blast_icache32_r4600_v1_page_indexed;
  166. else
  167. r4k_blast_icache_page_indexed =
  168. blast_icache32_page_indexed;
  169. } else if (ic_lsize == 64)
  170. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  171. }
  172. static void (* r4k_blast_icache)(void);
  173. static inline void r4k_blast_icache_setup(void)
  174. {
  175. unsigned long ic_lsize = cpu_icache_line_size();
  176. if (ic_lsize == 16)
  177. r4k_blast_icache = blast_icache16;
  178. else if (ic_lsize == 32) {
  179. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  180. r4k_blast_icache = blast_r4600_v1_icache32;
  181. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  182. r4k_blast_icache = tx49_blast_icache32;
  183. else
  184. r4k_blast_icache = blast_icache32;
  185. } else if (ic_lsize == 64)
  186. r4k_blast_icache = blast_icache64;
  187. }
  188. static void (* r4k_blast_scache_page)(unsigned long addr);
  189. static inline void r4k_blast_scache_page_setup(void)
  190. {
  191. unsigned long sc_lsize = cpu_scache_line_size();
  192. if (sc_lsize == 16)
  193. r4k_blast_scache_page = blast_scache16_page;
  194. else if (sc_lsize == 32)
  195. r4k_blast_scache_page = blast_scache32_page;
  196. else if (sc_lsize == 64)
  197. r4k_blast_scache_page = blast_scache64_page;
  198. else if (sc_lsize == 128)
  199. r4k_blast_scache_page = blast_scache128_page;
  200. }
  201. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  202. static inline void r4k_blast_scache_page_indexed_setup(void)
  203. {
  204. unsigned long sc_lsize = cpu_scache_line_size();
  205. if (sc_lsize == 16)
  206. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  207. else if (sc_lsize == 32)
  208. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  209. else if (sc_lsize == 64)
  210. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  211. else if (sc_lsize == 128)
  212. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  213. }
  214. static void (* r4k_blast_scache)(void);
  215. static inline void r4k_blast_scache_setup(void)
  216. {
  217. unsigned long sc_lsize = cpu_scache_line_size();
  218. if (sc_lsize == 16)
  219. r4k_blast_scache = blast_scache16;
  220. else if (sc_lsize == 32)
  221. r4k_blast_scache = blast_scache32;
  222. else if (sc_lsize == 64)
  223. r4k_blast_scache = blast_scache64;
  224. else if (sc_lsize == 128)
  225. r4k_blast_scache = blast_scache128;
  226. }
  227. /*
  228. * This is former mm's flush_cache_all() which really should be
  229. * flush_cache_vunmap these days ...
  230. */
  231. static inline void local_r4k_flush_cache_all(void * args)
  232. {
  233. r4k_blast_dcache();
  234. r4k_blast_icache();
  235. }
  236. static void r4k_flush_cache_all(void)
  237. {
  238. if (!cpu_has_dc_aliases)
  239. return;
  240. on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
  241. }
  242. static inline void local_r4k___flush_cache_all(void * args)
  243. {
  244. r4k_blast_dcache();
  245. r4k_blast_icache();
  246. switch (current_cpu_data.cputype) {
  247. case CPU_R4000SC:
  248. case CPU_R4000MC:
  249. case CPU_R4400SC:
  250. case CPU_R4400MC:
  251. case CPU_R10000:
  252. case CPU_R12000:
  253. r4k_blast_scache();
  254. }
  255. }
  256. static void r4k___flush_cache_all(void)
  257. {
  258. on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  259. }
  260. static inline void local_r4k_flush_cache_range(void * args)
  261. {
  262. struct vm_area_struct *vma = args;
  263. int exec;
  264. if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
  265. return;
  266. exec = vma->vm_flags & VM_EXEC;
  267. if (cpu_has_dc_aliases || exec)
  268. r4k_blast_dcache();
  269. if (exec)
  270. r4k_blast_icache();
  271. }
  272. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  273. unsigned long start, unsigned long end)
  274. {
  275. on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  276. }
  277. static inline void local_r4k_flush_cache_mm(void * args)
  278. {
  279. struct mm_struct *mm = args;
  280. if (!cpu_context(smp_processor_id(), mm))
  281. return;
  282. r4k_blast_dcache();
  283. r4k_blast_icache();
  284. /*
  285. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  286. * only flush the primary caches but R10000 and R12000 behave sane ...
  287. */
  288. if (current_cpu_data.cputype == CPU_R4000SC ||
  289. current_cpu_data.cputype == CPU_R4000MC ||
  290. current_cpu_data.cputype == CPU_R4400SC ||
  291. current_cpu_data.cputype == CPU_R4400MC)
  292. r4k_blast_scache();
  293. }
  294. static void r4k_flush_cache_mm(struct mm_struct *mm)
  295. {
  296. if (!cpu_has_dc_aliases)
  297. return;
  298. on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  299. }
  300. struct flush_cache_page_args {
  301. struct vm_area_struct *vma;
  302. unsigned long page;
  303. };
  304. static inline void local_r4k_flush_cache_page(void *args)
  305. {
  306. struct flush_cache_page_args *fcp_args = args;
  307. struct vm_area_struct *vma = fcp_args->vma;
  308. unsigned long page = fcp_args->page;
  309. int exec = vma->vm_flags & VM_EXEC;
  310. struct mm_struct *mm = vma->vm_mm;
  311. pgd_t *pgdp;
  312. pud_t *pudp;
  313. pmd_t *pmdp;
  314. pte_t *ptep;
  315. /*
  316. * If ownes no valid ASID yet, cannot possibly have gotten
  317. * this page into the cache.
  318. */
  319. if (cpu_context(smp_processor_id(), mm) == 0)
  320. return;
  321. page &= PAGE_MASK;
  322. pgdp = pgd_offset(mm, page);
  323. pudp = pud_offset(pgdp, page);
  324. pmdp = pmd_offset(pudp, page);
  325. ptep = pte_offset(pmdp, page);
  326. /*
  327. * If the page isn't marked valid, the page cannot possibly be
  328. * in the cache.
  329. */
  330. if (!(pte_val(*ptep) & _PAGE_PRESENT))
  331. return;
  332. /*
  333. * Doing flushes for another ASID than the current one is
  334. * too difficult since stupid R4k caches do a TLB translation
  335. * for every cache flush operation. So we do indexed flushes
  336. * in that case, which doesn't overly flush the cache too much.
  337. */
  338. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
  339. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  340. r4k_blast_dcache_page(page);
  341. if (exec && !cpu_icache_snoops_remote_store)
  342. r4k_blast_scache_page(page);
  343. }
  344. if (exec)
  345. r4k_blast_icache_page(page);
  346. return;
  347. }
  348. /*
  349. * Do indexed flush, too much work to get the (possible) TLB refills
  350. * to work correctly.
  351. */
  352. page = INDEX_BASE + (page & (dcache_size - 1));
  353. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  354. r4k_blast_dcache_page_indexed(page);
  355. if (exec && !cpu_icache_snoops_remote_store)
  356. r4k_blast_scache_page_indexed(page);
  357. }
  358. if (exec) {
  359. if (cpu_has_vtag_icache) {
  360. int cpu = smp_processor_id();
  361. if (cpu_context(cpu, mm) != 0)
  362. drop_mmu_context(mm, cpu);
  363. } else
  364. r4k_blast_icache_page_indexed(page);
  365. }
  366. }
  367. static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
  368. {
  369. struct flush_cache_page_args args;
  370. args.vma = vma;
  371. args.page = page;
  372. on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  373. }
  374. static inline void local_r4k_flush_data_cache_page(void * addr)
  375. {
  376. r4k_blast_dcache_page((unsigned long) addr);
  377. }
  378. static void r4k_flush_data_cache_page(unsigned long addr)
  379. {
  380. on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
  381. }
  382. struct flush_icache_range_args {
  383. unsigned long __user start;
  384. unsigned long __user end;
  385. };
  386. static inline void local_r4k_flush_icache_range(void *args)
  387. {
  388. struct flush_icache_range_args *fir_args = args;
  389. unsigned long dc_lsize = current_cpu_data.dcache.linesz;
  390. unsigned long ic_lsize = current_cpu_data.icache.linesz;
  391. unsigned long sc_lsize = current_cpu_data.scache.linesz;
  392. unsigned long start = fir_args->start;
  393. unsigned long end = fir_args->end;
  394. unsigned long addr, aend;
  395. if (!cpu_has_ic_fills_f_dc) {
  396. if (end - start > dcache_size) {
  397. r4k_blast_dcache();
  398. } else {
  399. addr = start & ~(dc_lsize - 1);
  400. aend = (end - 1) & ~(dc_lsize - 1);
  401. while (1) {
  402. /* Hit_Writeback_Inv_D */
  403. protected_writeback_dcache_line(addr);
  404. if (addr == aend)
  405. break;
  406. addr += dc_lsize;
  407. }
  408. }
  409. if (!cpu_icache_snoops_remote_store) {
  410. if (end - start > scache_size) {
  411. r4k_blast_scache();
  412. } else {
  413. addr = start & ~(sc_lsize - 1);
  414. aend = (end - 1) & ~(sc_lsize - 1);
  415. while (1) {
  416. /* Hit_Writeback_Inv_D */
  417. protected_writeback_scache_line(addr);
  418. if (addr == aend)
  419. break;
  420. addr += sc_lsize;
  421. }
  422. }
  423. }
  424. }
  425. if (end - start > icache_size)
  426. r4k_blast_icache();
  427. else {
  428. addr = start & ~(ic_lsize - 1);
  429. aend = (end - 1) & ~(ic_lsize - 1);
  430. while (1) {
  431. /* Hit_Invalidate_I */
  432. protected_flush_icache_line(addr);
  433. if (addr == aend)
  434. break;
  435. addr += ic_lsize;
  436. }
  437. }
  438. }
  439. static void r4k_flush_icache_range(unsigned long __user start,
  440. unsigned long __user end)
  441. {
  442. struct flush_icache_range_args args;
  443. args.start = start;
  444. args.end = end;
  445. on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  446. }
  447. /*
  448. * Ok, this seriously sucks. We use them to flush a user page but don't
  449. * know the virtual address, so we have to blast away the whole icache
  450. * which is significantly more expensive than the real thing. Otoh we at
  451. * least know the kernel address of the page so we can flush it
  452. * selectivly.
  453. */
  454. struct flush_icache_page_args {
  455. struct vm_area_struct *vma;
  456. struct page *page;
  457. };
  458. static inline void local_r4k_flush_icache_page(void *args)
  459. {
  460. struct flush_icache_page_args *fip_args = args;
  461. struct vm_area_struct *vma = fip_args->vma;
  462. struct page *page = fip_args->page;
  463. /*
  464. * Tricky ... Because we don't know the virtual address we've got the
  465. * choice of either invalidating the entire primary and secondary
  466. * caches or invalidating the secondary caches also. With the subset
  467. * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
  468. * secondary cache will result in any entries in the primary caches
  469. * also getting invalidated which hopefully is a bit more economical.
  470. */
  471. if (cpu_has_subset_pcaches) {
  472. unsigned long addr = (unsigned long) page_address(page);
  473. r4k_blast_scache_page(addr);
  474. ClearPageDcacheDirty(page);
  475. return;
  476. }
  477. if (!cpu_has_ic_fills_f_dc) {
  478. unsigned long addr = (unsigned long) page_address(page);
  479. r4k_blast_dcache_page(addr);
  480. if (!cpu_icache_snoops_remote_store)
  481. r4k_blast_scache_page(addr);
  482. ClearPageDcacheDirty(page);
  483. }
  484. /*
  485. * We're not sure of the virtual address(es) involved here, so
  486. * we have to flush the entire I-cache.
  487. */
  488. if (cpu_has_vtag_icache) {
  489. int cpu = smp_processor_id();
  490. if (cpu_context(cpu, vma->vm_mm) != 0)
  491. drop_mmu_context(vma->vm_mm, cpu);
  492. } else
  493. r4k_blast_icache();
  494. }
  495. static void r4k_flush_icache_page(struct vm_area_struct *vma,
  496. struct page *page)
  497. {
  498. struct flush_icache_page_args args;
  499. /*
  500. * If there's no context yet, or the page isn't executable, no I-cache
  501. * flush is needed.
  502. */
  503. if (!(vma->vm_flags & VM_EXEC))
  504. return;
  505. args.vma = vma;
  506. args.page = page;
  507. on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
  508. }
  509. #ifdef CONFIG_DMA_NONCOHERENT
  510. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  511. {
  512. unsigned long end, a;
  513. /* Catch bad driver code */
  514. BUG_ON(size == 0);
  515. if (cpu_has_subset_pcaches) {
  516. unsigned long sc_lsize = current_cpu_data.scache.linesz;
  517. if (size >= scache_size) {
  518. r4k_blast_scache();
  519. return;
  520. }
  521. a = addr & ~(sc_lsize - 1);
  522. end = (addr + size - 1) & ~(sc_lsize - 1);
  523. while (1) {
  524. flush_scache_line(a); /* Hit_Writeback_Inv_SD */
  525. if (a == end)
  526. break;
  527. a += sc_lsize;
  528. }
  529. return;
  530. }
  531. /*
  532. * Either no secondary cache or the available caches don't have the
  533. * subset property so we have to flush the primary caches
  534. * explicitly
  535. */
  536. if (size >= dcache_size) {
  537. r4k_blast_dcache();
  538. } else {
  539. unsigned long dc_lsize = current_cpu_data.dcache.linesz;
  540. R4600_HIT_CACHEOP_WAR_IMPL;
  541. a = addr & ~(dc_lsize - 1);
  542. end = (addr + size - 1) & ~(dc_lsize - 1);
  543. while (1) {
  544. flush_dcache_line(a); /* Hit_Writeback_Inv_D */
  545. if (a == end)
  546. break;
  547. a += dc_lsize;
  548. }
  549. }
  550. bc_wback_inv(addr, size);
  551. }
  552. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  553. {
  554. unsigned long end, a;
  555. /* Catch bad driver code */
  556. BUG_ON(size == 0);
  557. if (cpu_has_subset_pcaches) {
  558. unsigned long sc_lsize = current_cpu_data.scache.linesz;
  559. if (size >= scache_size) {
  560. r4k_blast_scache();
  561. return;
  562. }
  563. a = addr & ~(sc_lsize - 1);
  564. end = (addr + size - 1) & ~(sc_lsize - 1);
  565. while (1) {
  566. flush_scache_line(a); /* Hit_Writeback_Inv_SD */
  567. if (a == end)
  568. break;
  569. a += sc_lsize;
  570. }
  571. return;
  572. }
  573. if (size >= dcache_size) {
  574. r4k_blast_dcache();
  575. } else {
  576. unsigned long dc_lsize = current_cpu_data.dcache.linesz;
  577. R4600_HIT_CACHEOP_WAR_IMPL;
  578. a = addr & ~(dc_lsize - 1);
  579. end = (addr + size - 1) & ~(dc_lsize - 1);
  580. while (1) {
  581. flush_dcache_line(a); /* Hit_Writeback_Inv_D */
  582. if (a == end)
  583. break;
  584. a += dc_lsize;
  585. }
  586. }
  587. bc_inv(addr, size);
  588. }
  589. #endif /* CONFIG_DMA_NONCOHERENT */
  590. /*
  591. * While we're protected against bad userland addresses we don't care
  592. * very much about what happens in that case. Usually a segmentation
  593. * fault will dump the process later on anyway ...
  594. */
  595. static void local_r4k_flush_cache_sigtramp(void * arg)
  596. {
  597. unsigned long ic_lsize = current_cpu_data.icache.linesz;
  598. unsigned long dc_lsize = current_cpu_data.dcache.linesz;
  599. unsigned long sc_lsize = current_cpu_data.scache.linesz;
  600. unsigned long addr = (unsigned long) arg;
  601. R4600_HIT_CACHEOP_WAR_IMPL;
  602. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  603. if (!cpu_icache_snoops_remote_store)
  604. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  605. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  606. if (MIPS4K_ICACHE_REFILL_WAR) {
  607. __asm__ __volatile__ (
  608. ".set push\n\t"
  609. ".set noat\n\t"
  610. ".set mips3\n\t"
  611. #ifdef CONFIG_32BIT
  612. "la $at,1f\n\t"
  613. #endif
  614. #ifdef CONFIG_64BIT
  615. "dla $at,1f\n\t"
  616. #endif
  617. "cache %0,($at)\n\t"
  618. "nop; nop; nop\n"
  619. "1:\n\t"
  620. ".set pop"
  621. :
  622. : "i" (Hit_Invalidate_I));
  623. }
  624. if (MIPS_CACHE_SYNC_WAR)
  625. __asm__ __volatile__ ("sync");
  626. }
  627. static void r4k_flush_cache_sigtramp(unsigned long addr)
  628. {
  629. on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  630. }
  631. static void r4k_flush_icache_all(void)
  632. {
  633. if (cpu_has_vtag_icache)
  634. r4k_blast_icache();
  635. }
  636. static inline void rm7k_erratum31(void)
  637. {
  638. const unsigned long ic_lsize = 32;
  639. unsigned long addr;
  640. /* RM7000 erratum #31. The icache is screwed at startup. */
  641. write_c0_taglo(0);
  642. write_c0_taghi(0);
  643. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  644. __asm__ __volatile__ (
  645. ".set noreorder\n\t"
  646. ".set mips3\n\t"
  647. "cache\t%1, 0(%0)\n\t"
  648. "cache\t%1, 0x1000(%0)\n\t"
  649. "cache\t%1, 0x2000(%0)\n\t"
  650. "cache\t%1, 0x3000(%0)\n\t"
  651. "cache\t%2, 0(%0)\n\t"
  652. "cache\t%2, 0x1000(%0)\n\t"
  653. "cache\t%2, 0x2000(%0)\n\t"
  654. "cache\t%2, 0x3000(%0)\n\t"
  655. "cache\t%1, 0(%0)\n\t"
  656. "cache\t%1, 0x1000(%0)\n\t"
  657. "cache\t%1, 0x2000(%0)\n\t"
  658. "cache\t%1, 0x3000(%0)\n\t"
  659. ".set\tmips0\n\t"
  660. ".set\treorder\n\t"
  661. :
  662. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  663. }
  664. }
  665. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  666. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  667. };
  668. static void __init probe_pcache(void)
  669. {
  670. struct cpuinfo_mips *c = &current_cpu_data;
  671. unsigned int config = read_c0_config();
  672. unsigned int prid = read_c0_prid();
  673. unsigned long config1;
  674. unsigned int lsize;
  675. switch (c->cputype) {
  676. case CPU_R4600: /* QED style two way caches? */
  677. case CPU_R4700:
  678. case CPU_R5000:
  679. case CPU_NEVADA:
  680. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  681. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  682. c->icache.ways = 2;
  683. c->icache.waybit = ffs(icache_size/2) - 1;
  684. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  685. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  686. c->dcache.ways = 2;
  687. c->dcache.waybit= ffs(dcache_size/2) - 1;
  688. c->options |= MIPS_CPU_CACHE_CDEX_P;
  689. break;
  690. case CPU_R5432:
  691. case CPU_R5500:
  692. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  693. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  694. c->icache.ways = 2;
  695. c->icache.waybit= 0;
  696. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  697. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  698. c->dcache.ways = 2;
  699. c->dcache.waybit = 0;
  700. c->options |= MIPS_CPU_CACHE_CDEX_P;
  701. break;
  702. case CPU_TX49XX:
  703. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  704. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  705. c->icache.ways = 4;
  706. c->icache.waybit= 0;
  707. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  708. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  709. c->dcache.ways = 4;
  710. c->dcache.waybit = 0;
  711. c->options |= MIPS_CPU_CACHE_CDEX_P;
  712. break;
  713. case CPU_R4000PC:
  714. case CPU_R4000SC:
  715. case CPU_R4000MC:
  716. case CPU_R4400PC:
  717. case CPU_R4400SC:
  718. case CPU_R4400MC:
  719. case CPU_R4300:
  720. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  721. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  722. c->icache.ways = 1;
  723. c->icache.waybit = 0; /* doesn't matter */
  724. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  725. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  726. c->dcache.ways = 1;
  727. c->dcache.waybit = 0; /* does not matter */
  728. c->options |= MIPS_CPU_CACHE_CDEX_P;
  729. break;
  730. case CPU_R10000:
  731. case CPU_R12000:
  732. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  733. c->icache.linesz = 64;
  734. c->icache.ways = 2;
  735. c->icache.waybit = 0;
  736. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  737. c->dcache.linesz = 32;
  738. c->dcache.ways = 2;
  739. c->dcache.waybit = 0;
  740. c->options |= MIPS_CPU_PREFETCH;
  741. break;
  742. case CPU_VR4133:
  743. write_c0_config(config & ~CONF_EB);
  744. case CPU_VR4131:
  745. /* Workaround for cache instruction bug of VR4131 */
  746. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  747. c->processor_id == 0x0c82U) {
  748. config &= ~0x00000030U;
  749. config |= 0x00410000U;
  750. write_c0_config(config);
  751. }
  752. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  753. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  754. c->icache.ways = 2;
  755. c->icache.waybit = ffs(icache_size/2) - 1;
  756. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  757. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  758. c->dcache.ways = 2;
  759. c->dcache.waybit = ffs(dcache_size/2) - 1;
  760. c->options |= MIPS_CPU_CACHE_CDEX_P;
  761. break;
  762. case CPU_VR41XX:
  763. case CPU_VR4111:
  764. case CPU_VR4121:
  765. case CPU_VR4122:
  766. case CPU_VR4181:
  767. case CPU_VR4181A:
  768. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  769. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  770. c->icache.ways = 1;
  771. c->icache.waybit = 0; /* doesn't matter */
  772. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  773. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  774. c->dcache.ways = 1;
  775. c->dcache.waybit = 0; /* does not matter */
  776. c->options |= MIPS_CPU_CACHE_CDEX_P;
  777. break;
  778. case CPU_RM7000:
  779. rm7k_erratum31();
  780. case CPU_RM9000:
  781. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  782. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  783. c->icache.ways = 4;
  784. c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
  785. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  786. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  787. c->dcache.ways = 4;
  788. c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
  789. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  790. c->options |= MIPS_CPU_CACHE_CDEX_P;
  791. #endif
  792. c->options |= MIPS_CPU_PREFETCH;
  793. break;
  794. default:
  795. if (!(config & MIPS_CONF_M))
  796. panic("Don't know how to probe P-caches on this cpu.");
  797. /*
  798. * So we seem to be a MIPS32 or MIPS64 CPU
  799. * So let's probe the I-cache ...
  800. */
  801. config1 = read_c0_config1();
  802. if ((lsize = ((config1 >> 19) & 7)))
  803. c->icache.linesz = 2 << lsize;
  804. else
  805. c->icache.linesz = lsize;
  806. c->icache.sets = 64 << ((config1 >> 22) & 7);
  807. c->icache.ways = 1 + ((config1 >> 16) & 7);
  808. icache_size = c->icache.sets *
  809. c->icache.ways *
  810. c->icache.linesz;
  811. c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
  812. if (config & 0x8) /* VI bit */
  813. c->icache.flags |= MIPS_CACHE_VTAG;
  814. /*
  815. * Now probe the MIPS32 / MIPS64 data cache.
  816. */
  817. c->dcache.flags = 0;
  818. if ((lsize = ((config1 >> 10) & 7)))
  819. c->dcache.linesz = 2 << lsize;
  820. else
  821. c->dcache.linesz= lsize;
  822. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  823. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  824. dcache_size = c->dcache.sets *
  825. c->dcache.ways *
  826. c->dcache.linesz;
  827. c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
  828. c->options |= MIPS_CPU_PREFETCH;
  829. break;
  830. }
  831. /*
  832. * Processor configuration sanity check for the R4000SC erratum
  833. * #5. With page sizes larger than 32kB there is no possibility
  834. * to get a VCE exception anymore so we don't care about this
  835. * misconfiguration. The case is rather theoretical anyway;
  836. * presumably no vendor is shipping his hardware in the "bad"
  837. * configuration.
  838. */
  839. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  840. !(config & CONF_SC) && c->icache.linesz != 16 &&
  841. PAGE_SIZE <= 0x8000)
  842. panic("Improper R4000SC processor configuration detected");
  843. /* compute a couple of other cache variables */
  844. c->icache.waysize = icache_size / c->icache.ways;
  845. c->dcache.waysize = dcache_size / c->dcache.ways;
  846. c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
  847. c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
  848. /*
  849. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  850. * 2-way virtually indexed so normally would suffer from aliases. So
  851. * normally they'd suffer from aliases but magic in the hardware deals
  852. * with that for us so we don't need to take care ourselves.
  853. */
  854. switch (c->cputype) {
  855. case CPU_20KC:
  856. case CPU_25KF:
  857. case CPU_R10000:
  858. case CPU_R12000:
  859. case CPU_SB1:
  860. break;
  861. case CPU_24K:
  862. if (!(read_c0_config7() & (1 << 16)))
  863. default:
  864. if (c->dcache.waysize > PAGE_SIZE)
  865. c->dcache.flags |= MIPS_CACHE_ALIASES;
  866. }
  867. switch (c->cputype) {
  868. case CPU_20KC:
  869. /*
  870. * Some older 20Kc chips doesn't have the 'VI' bit in
  871. * the config register.
  872. */
  873. c->icache.flags |= MIPS_CACHE_VTAG;
  874. break;
  875. case CPU_AU1000:
  876. case CPU_AU1500:
  877. case CPU_AU1100:
  878. case CPU_AU1550:
  879. case CPU_AU1200:
  880. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  881. break;
  882. }
  883. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  884. icache_size >> 10,
  885. cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
  886. way_string[c->icache.ways], c->icache.linesz);
  887. printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
  888. dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
  889. }
  890. /*
  891. * If you even _breathe_ on this function, look at the gcc output and make sure
  892. * it does not pop things on and off the stack for the cache sizing loop that
  893. * executes in KSEG1 space or else you will crash and burn badly. You have
  894. * been warned.
  895. */
  896. static int __init probe_scache(void)
  897. {
  898. extern unsigned long stext;
  899. unsigned long flags, addr, begin, end, pow2;
  900. unsigned int config = read_c0_config();
  901. struct cpuinfo_mips *c = &current_cpu_data;
  902. int tmp;
  903. if (config & CONF_SC)
  904. return 0;
  905. begin = (unsigned long) &stext;
  906. begin &= ~((4 * 1024 * 1024) - 1);
  907. end = begin + (4 * 1024 * 1024);
  908. /*
  909. * This is such a bitch, you'd think they would make it easy to do
  910. * this. Away you daemons of stupidity!
  911. */
  912. local_irq_save(flags);
  913. /* Fill each size-multiple cache line with a valid tag. */
  914. pow2 = (64 * 1024);
  915. for (addr = begin; addr < end; addr = (begin + pow2)) {
  916. unsigned long *p = (unsigned long *) addr;
  917. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  918. pow2 <<= 1;
  919. }
  920. /* Load first line with zero (therefore invalid) tag. */
  921. write_c0_taglo(0);
  922. write_c0_taghi(0);
  923. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  924. cache_op(Index_Store_Tag_I, begin);
  925. cache_op(Index_Store_Tag_D, begin);
  926. cache_op(Index_Store_Tag_SD, begin);
  927. /* Now search for the wrap around point. */
  928. pow2 = (128 * 1024);
  929. tmp = 0;
  930. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  931. cache_op(Index_Load_Tag_SD, addr);
  932. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  933. if (!read_c0_taglo())
  934. break;
  935. pow2 <<= 1;
  936. }
  937. local_irq_restore(flags);
  938. addr -= begin;
  939. scache_size = addr;
  940. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  941. c->scache.ways = 1;
  942. c->dcache.waybit = 0; /* does not matter */
  943. return 1;
  944. }
  945. typedef int (*probe_func_t)(unsigned long);
  946. extern int r5k_sc_init(void);
  947. extern int rm7k_sc_init(void);
  948. static void __init setup_scache(void)
  949. {
  950. struct cpuinfo_mips *c = &current_cpu_data;
  951. unsigned int config = read_c0_config();
  952. probe_func_t probe_scache_kseg1;
  953. int sc_present = 0;
  954. /*
  955. * Do the probing thing on R4000SC and R4400SC processors. Other
  956. * processors don't have a S-cache that would be relevant to the
  957. * Linux memory managment.
  958. */
  959. switch (c->cputype) {
  960. case CPU_R4000SC:
  961. case CPU_R4000MC:
  962. case CPU_R4400SC:
  963. case CPU_R4400MC:
  964. probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));
  965. sc_present = probe_scache_kseg1(config);
  966. if (sc_present)
  967. c->options |= MIPS_CPU_CACHE_CDEX_S;
  968. break;
  969. case CPU_R10000:
  970. case CPU_R12000:
  971. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  972. c->scache.linesz = 64 << ((config >> 13) & 1);
  973. c->scache.ways = 2;
  974. c->scache.waybit= 0;
  975. sc_present = 1;
  976. break;
  977. case CPU_R5000:
  978. case CPU_NEVADA:
  979. #ifdef CONFIG_R5000_CPU_SCACHE
  980. r5k_sc_init();
  981. #endif
  982. return;
  983. case CPU_RM7000:
  984. case CPU_RM9000:
  985. #ifdef CONFIG_RM7000_CPU_SCACHE
  986. rm7k_sc_init();
  987. #endif
  988. return;
  989. default:
  990. sc_present = 0;
  991. }
  992. if (!sc_present)
  993. return;
  994. if ((c->isa_level == MIPS_CPU_ISA_M32 ||
  995. c->isa_level == MIPS_CPU_ISA_M64) &&
  996. !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  997. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  998. /* compute a couple of other cache variables */
  999. c->scache.waysize = scache_size / c->scache.ways;
  1000. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1001. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1002. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1003. c->options |= MIPS_CPU_SUBSET_CACHES;
  1004. }
  1005. static inline void coherency_setup(void)
  1006. {
  1007. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  1008. /*
  1009. * c0_status.cu=0 specifies that updates by the sc instruction use
  1010. * the coherency mode specified by the TLB; 1 means cachable
  1011. * coherent update on write will be used. Not all processors have
  1012. * this bit and; some wire it to zero, others like Toshiba had the
  1013. * silly idea of putting something else there ...
  1014. */
  1015. switch (current_cpu_data.cputype) {
  1016. case CPU_R4000PC:
  1017. case CPU_R4000SC:
  1018. case CPU_R4000MC:
  1019. case CPU_R4400PC:
  1020. case CPU_R4400SC:
  1021. case CPU_R4400MC:
  1022. clear_c0_config(CONF_CU);
  1023. break;
  1024. }
  1025. }
  1026. void __init ld_mmu_r4xx0(void)
  1027. {
  1028. extern void build_clear_page(void);
  1029. extern void build_copy_page(void);
  1030. extern char except_vec2_generic;
  1031. struct cpuinfo_mips *c = &current_cpu_data;
  1032. /* Default cache error handler for R4000 and R5000 family */
  1033. memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80);
  1034. memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
  1035. probe_pcache();
  1036. setup_scache();
  1037. r4k_blast_dcache_page_setup();
  1038. r4k_blast_dcache_page_indexed_setup();
  1039. r4k_blast_dcache_setup();
  1040. r4k_blast_icache_page_setup();
  1041. r4k_blast_icache_page_indexed_setup();
  1042. r4k_blast_icache_setup();
  1043. r4k_blast_scache_page_setup();
  1044. r4k_blast_scache_page_indexed_setup();
  1045. r4k_blast_scache_setup();
  1046. /*
  1047. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1048. * This code supports virtually indexed processors and will be
  1049. * unnecessarily inefficient on physically indexed processors.
  1050. */
  1051. shm_align_mask = max_t( unsigned long,
  1052. c->dcache.sets * c->dcache.linesz - 1,
  1053. PAGE_SIZE - 1);
  1054. flush_cache_all = r4k_flush_cache_all;
  1055. __flush_cache_all = r4k___flush_cache_all;
  1056. flush_cache_mm = r4k_flush_cache_mm;
  1057. flush_cache_page = r4k_flush_cache_page;
  1058. flush_icache_page = r4k_flush_icache_page;
  1059. flush_cache_range = r4k_flush_cache_range;
  1060. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1061. flush_icache_all = r4k_flush_icache_all;
  1062. flush_data_cache_page = r4k_flush_data_cache_page;
  1063. flush_icache_range = r4k_flush_icache_range;
  1064. #ifdef CONFIG_DMA_NONCOHERENT
  1065. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1066. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1067. _dma_cache_inv = r4k_dma_cache_inv;
  1068. #endif
  1069. __flush_cache_all();
  1070. coherency_setup();
  1071. build_clear_page();
  1072. build_copy_page();
  1073. }