c-r4k.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/hardirq.h>
  11. #include <linux/init.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/sched.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/bitops.h>
  19. #include <asm/bcache.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/cache.h>
  22. #include <asm/cacheops.h>
  23. #include <asm/cpu.h>
  24. #include <asm/cpu-features.h>
  25. #include <asm/io.h>
  26. #include <asm/page.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/r4kcache.h>
  29. #include <asm/sections.h>
  30. #include <asm/system.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/war.h>
  33. #include <asm/cacheflush.h> /* for run_uncached() */
  34. /*
  35. * Special Variant of smp_call_function for use by cache functions:
  36. *
  37. * o No return value
  38. * o collapses to normal function call on UP kernels
  39. * o collapses to normal function call on systems with a single shared
  40. * primary cache.
  41. */
  42. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
  43. int wait)
  44. {
  45. preempt_disable();
  46. #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  47. smp_call_function(func, info, wait);
  48. #endif
  49. func(info);
  50. preempt_enable();
  51. }
  52. #if defined(CONFIG_MIPS_CMP)
  53. #define cpu_has_safe_index_cacheops 0
  54. #else
  55. #define cpu_has_safe_index_cacheops 1
  56. #endif
  57. /*
  58. * Must die.
  59. */
  60. static unsigned long icache_size __read_mostly;
  61. static unsigned long dcache_size __read_mostly;
  62. static unsigned long scache_size __read_mostly;
  63. /*
  64. * Dummy cache handling routines for machines without boardcaches
  65. */
  66. static void cache_noop(void) {}
  67. static struct bcache_ops no_sc_ops = {
  68. .bc_enable = (void *)cache_noop,
  69. .bc_disable = (void *)cache_noop,
  70. .bc_wback_inv = (void *)cache_noop,
  71. .bc_inv = (void *)cache_noop
  72. };
  73. struct bcache_ops *bcops = &no_sc_ops;
  74. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  75. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  76. #define R4600_HIT_CACHEOP_WAR_IMPL \
  77. do { \
  78. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  79. *(volatile unsigned long *)CKSEG1; \
  80. if (R4600_V1_HIT_CACHEOP_WAR) \
  81. __asm__ __volatile__("nop;nop;nop;nop"); \
  82. } while (0)
  83. static void (*r4k_blast_dcache_page)(unsigned long addr);
  84. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  85. {
  86. R4600_HIT_CACHEOP_WAR_IMPL;
  87. blast_dcache32_page(addr);
  88. }
  89. static void __cpuinit r4k_blast_dcache_page_setup(void)
  90. {
  91. unsigned long dc_lsize = cpu_dcache_line_size();
  92. if (dc_lsize == 0)
  93. r4k_blast_dcache_page = (void *)cache_noop;
  94. else if (dc_lsize == 16)
  95. r4k_blast_dcache_page = blast_dcache16_page;
  96. else if (dc_lsize == 32)
  97. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  98. }
  99. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  100. static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
  101. {
  102. unsigned long dc_lsize = cpu_dcache_line_size();
  103. if (dc_lsize == 0)
  104. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  105. else if (dc_lsize == 16)
  106. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  107. else if (dc_lsize == 32)
  108. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  109. }
  110. static void (* r4k_blast_dcache)(void);
  111. static void __cpuinit r4k_blast_dcache_setup(void)
  112. {
  113. unsigned long dc_lsize = cpu_dcache_line_size();
  114. if (dc_lsize == 0)
  115. r4k_blast_dcache = (void *)cache_noop;
  116. else if (dc_lsize == 16)
  117. r4k_blast_dcache = blast_dcache16;
  118. else if (dc_lsize == 32)
  119. r4k_blast_dcache = blast_dcache32;
  120. }
  121. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  122. #define JUMP_TO_ALIGN(order) \
  123. __asm__ __volatile__( \
  124. "b\t1f\n\t" \
  125. ".align\t" #order "\n\t" \
  126. "1:\n\t" \
  127. )
  128. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  129. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  130. static inline void blast_r4600_v1_icache32(void)
  131. {
  132. unsigned long flags;
  133. local_irq_save(flags);
  134. blast_icache32();
  135. local_irq_restore(flags);
  136. }
  137. static inline void tx49_blast_icache32(void)
  138. {
  139. unsigned long start = INDEX_BASE;
  140. unsigned long end = start + current_cpu_data.icache.waysize;
  141. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  142. unsigned long ws_end = current_cpu_data.icache.ways <<
  143. current_cpu_data.icache.waybit;
  144. unsigned long ws, addr;
  145. CACHE32_UNROLL32_ALIGN2;
  146. /* I'm in even chunk. blast odd chunks */
  147. for (ws = 0; ws < ws_end; ws += ws_inc)
  148. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  149. cache32_unroll32(addr|ws, Index_Invalidate_I);
  150. CACHE32_UNROLL32_ALIGN;
  151. /* I'm in odd chunk. blast even chunks */
  152. for (ws = 0; ws < ws_end; ws += ws_inc)
  153. for (addr = start; addr < end; addr += 0x400 * 2)
  154. cache32_unroll32(addr|ws, Index_Invalidate_I);
  155. }
  156. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  157. {
  158. unsigned long flags;
  159. local_irq_save(flags);
  160. blast_icache32_page_indexed(page);
  161. local_irq_restore(flags);
  162. }
  163. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  164. {
  165. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  166. unsigned long start = INDEX_BASE + (page & indexmask);
  167. unsigned long end = start + PAGE_SIZE;
  168. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  169. unsigned long ws_end = current_cpu_data.icache.ways <<
  170. current_cpu_data.icache.waybit;
  171. unsigned long ws, addr;
  172. CACHE32_UNROLL32_ALIGN2;
  173. /* I'm in even chunk. blast odd chunks */
  174. for (ws = 0; ws < ws_end; ws += ws_inc)
  175. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  176. cache32_unroll32(addr|ws, Index_Invalidate_I);
  177. CACHE32_UNROLL32_ALIGN;
  178. /* I'm in odd chunk. blast even chunks */
  179. for (ws = 0; ws < ws_end; ws += ws_inc)
  180. for (addr = start; addr < end; addr += 0x400 * 2)
  181. cache32_unroll32(addr|ws, Index_Invalidate_I);
  182. }
  183. static void (* r4k_blast_icache_page)(unsigned long addr);
  184. static void __cpuinit r4k_blast_icache_page_setup(void)
  185. {
  186. unsigned long ic_lsize = cpu_icache_line_size();
  187. if (ic_lsize == 0)
  188. r4k_blast_icache_page = (void *)cache_noop;
  189. else if (ic_lsize == 16)
  190. r4k_blast_icache_page = blast_icache16_page;
  191. else if (ic_lsize == 32)
  192. r4k_blast_icache_page = blast_icache32_page;
  193. else if (ic_lsize == 64)
  194. r4k_blast_icache_page = blast_icache64_page;
  195. }
  196. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  197. static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
  198. {
  199. unsigned long ic_lsize = cpu_icache_line_size();
  200. if (ic_lsize == 0)
  201. r4k_blast_icache_page_indexed = (void *)cache_noop;
  202. else if (ic_lsize == 16)
  203. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  204. else if (ic_lsize == 32) {
  205. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  206. r4k_blast_icache_page_indexed =
  207. blast_icache32_r4600_v1_page_indexed;
  208. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  209. r4k_blast_icache_page_indexed =
  210. tx49_blast_icache32_page_indexed;
  211. else
  212. r4k_blast_icache_page_indexed =
  213. blast_icache32_page_indexed;
  214. } else if (ic_lsize == 64)
  215. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  216. }
  217. static void (* r4k_blast_icache)(void);
  218. static void __cpuinit r4k_blast_icache_setup(void)
  219. {
  220. unsigned long ic_lsize = cpu_icache_line_size();
  221. if (ic_lsize == 0)
  222. r4k_blast_icache = (void *)cache_noop;
  223. else if (ic_lsize == 16)
  224. r4k_blast_icache = blast_icache16;
  225. else if (ic_lsize == 32) {
  226. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  227. r4k_blast_icache = blast_r4600_v1_icache32;
  228. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  229. r4k_blast_icache = tx49_blast_icache32;
  230. else
  231. r4k_blast_icache = blast_icache32;
  232. } else if (ic_lsize == 64)
  233. r4k_blast_icache = blast_icache64;
  234. }
  235. static void (* r4k_blast_scache_page)(unsigned long addr);
  236. static void __cpuinit r4k_blast_scache_page_setup(void)
  237. {
  238. unsigned long sc_lsize = cpu_scache_line_size();
  239. if (scache_size == 0)
  240. r4k_blast_scache_page = (void *)cache_noop;
  241. else if (sc_lsize == 16)
  242. r4k_blast_scache_page = blast_scache16_page;
  243. else if (sc_lsize == 32)
  244. r4k_blast_scache_page = blast_scache32_page;
  245. else if (sc_lsize == 64)
  246. r4k_blast_scache_page = blast_scache64_page;
  247. else if (sc_lsize == 128)
  248. r4k_blast_scache_page = blast_scache128_page;
  249. }
  250. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  251. static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
  252. {
  253. unsigned long sc_lsize = cpu_scache_line_size();
  254. if (scache_size == 0)
  255. r4k_blast_scache_page_indexed = (void *)cache_noop;
  256. else if (sc_lsize == 16)
  257. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  258. else if (sc_lsize == 32)
  259. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  260. else if (sc_lsize == 64)
  261. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  262. else if (sc_lsize == 128)
  263. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  264. }
  265. static void (* r4k_blast_scache)(void);
  266. static void __cpuinit r4k_blast_scache_setup(void)
  267. {
  268. unsigned long sc_lsize = cpu_scache_line_size();
  269. if (scache_size == 0)
  270. r4k_blast_scache = (void *)cache_noop;
  271. else if (sc_lsize == 16)
  272. r4k_blast_scache = blast_scache16;
  273. else if (sc_lsize == 32)
  274. r4k_blast_scache = blast_scache32;
  275. else if (sc_lsize == 64)
  276. r4k_blast_scache = blast_scache64;
  277. else if (sc_lsize == 128)
  278. r4k_blast_scache = blast_scache128;
  279. }
  280. static inline void local_r4k___flush_cache_all(void * args)
  281. {
  282. #if defined(CONFIG_CPU_LOONGSON2)
  283. r4k_blast_scache();
  284. return;
  285. #endif
  286. r4k_blast_dcache();
  287. r4k_blast_icache();
  288. switch (current_cpu_type()) {
  289. case CPU_R4000SC:
  290. case CPU_R4000MC:
  291. case CPU_R4400SC:
  292. case CPU_R4400MC:
  293. case CPU_R10000:
  294. case CPU_R12000:
  295. case CPU_R14000:
  296. r4k_blast_scache();
  297. }
  298. }
  299. static void r4k___flush_cache_all(void)
  300. {
  301. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
  302. }
  303. static inline int has_valid_asid(const struct mm_struct *mm)
  304. {
  305. #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
  306. int i;
  307. for_each_online_cpu(i)
  308. if (cpu_context(i, mm))
  309. return 1;
  310. return 0;
  311. #else
  312. return cpu_context(smp_processor_id(), mm);
  313. #endif
  314. }
  315. static void r4k__flush_cache_vmap(void)
  316. {
  317. r4k_blast_dcache();
  318. }
  319. static void r4k__flush_cache_vunmap(void)
  320. {
  321. r4k_blast_dcache();
  322. }
  323. static inline void local_r4k_flush_cache_range(void * args)
  324. {
  325. struct vm_area_struct *vma = args;
  326. int exec = vma->vm_flags & VM_EXEC;
  327. if (!(has_valid_asid(vma->vm_mm)))
  328. return;
  329. r4k_blast_dcache();
  330. if (exec)
  331. r4k_blast_icache();
  332. }
  333. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  334. unsigned long start, unsigned long end)
  335. {
  336. int exec = vma->vm_flags & VM_EXEC;
  337. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  338. r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
  339. }
  340. static inline void local_r4k_flush_cache_mm(void * args)
  341. {
  342. struct mm_struct *mm = args;
  343. if (!has_valid_asid(mm))
  344. return;
  345. /*
  346. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  347. * only flush the primary caches but R10000 and R12000 behave sane ...
  348. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  349. * caches, so we can bail out early.
  350. */
  351. if (current_cpu_type() == CPU_R4000SC ||
  352. current_cpu_type() == CPU_R4000MC ||
  353. current_cpu_type() == CPU_R4400SC ||
  354. current_cpu_type() == CPU_R4400MC) {
  355. r4k_blast_scache();
  356. return;
  357. }
  358. r4k_blast_dcache();
  359. }
  360. static void r4k_flush_cache_mm(struct mm_struct *mm)
  361. {
  362. if (!cpu_has_dc_aliases)
  363. return;
  364. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
  365. }
  366. struct flush_cache_page_args {
  367. struct vm_area_struct *vma;
  368. unsigned long addr;
  369. unsigned long pfn;
  370. };
  371. static inline void local_r4k_flush_cache_page(void *args)
  372. {
  373. struct flush_cache_page_args *fcp_args = args;
  374. struct vm_area_struct *vma = fcp_args->vma;
  375. unsigned long addr = fcp_args->addr;
  376. struct page *page = pfn_to_page(fcp_args->pfn);
  377. int exec = vma->vm_flags & VM_EXEC;
  378. struct mm_struct *mm = vma->vm_mm;
  379. int map_coherent = 0;
  380. pgd_t *pgdp;
  381. pud_t *pudp;
  382. pmd_t *pmdp;
  383. pte_t *ptep;
  384. void *vaddr;
  385. /*
  386. * If ownes no valid ASID yet, cannot possibly have gotten
  387. * this page into the cache.
  388. */
  389. if (!has_valid_asid(mm))
  390. return;
  391. addr &= PAGE_MASK;
  392. pgdp = pgd_offset(mm, addr);
  393. pudp = pud_offset(pgdp, addr);
  394. pmdp = pmd_offset(pudp, addr);
  395. ptep = pte_offset(pmdp, addr);
  396. /*
  397. * If the page isn't marked valid, the page cannot possibly be
  398. * in the cache.
  399. */
  400. if (!(pte_present(*ptep)))
  401. return;
  402. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  403. vaddr = NULL;
  404. else {
  405. /*
  406. * Use kmap_coherent or kmap_atomic to do flushes for
  407. * another ASID than the current one.
  408. */
  409. map_coherent = (cpu_has_dc_aliases &&
  410. page_mapped(page) && !Page_dcache_dirty(page));
  411. if (map_coherent)
  412. vaddr = kmap_coherent(page, addr);
  413. else
  414. vaddr = kmap_atomic(page, KM_USER0);
  415. addr = (unsigned long)vaddr;
  416. }
  417. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  418. r4k_blast_dcache_page(addr);
  419. if (exec && !cpu_icache_snoops_remote_store)
  420. r4k_blast_scache_page(addr);
  421. }
  422. if (exec) {
  423. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  424. int cpu = smp_processor_id();
  425. if (cpu_context(cpu, mm) != 0)
  426. drop_mmu_context(mm, cpu);
  427. } else
  428. r4k_blast_icache_page(addr);
  429. }
  430. if (vaddr) {
  431. if (map_coherent)
  432. kunmap_coherent();
  433. else
  434. kunmap_atomic(vaddr, KM_USER0);
  435. }
  436. }
  437. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  438. unsigned long addr, unsigned long pfn)
  439. {
  440. struct flush_cache_page_args args;
  441. args.vma = vma;
  442. args.addr = addr;
  443. args.pfn = pfn;
  444. r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
  445. }
  446. static inline void local_r4k_flush_data_cache_page(void * addr)
  447. {
  448. r4k_blast_dcache_page((unsigned long) addr);
  449. }
  450. static void r4k_flush_data_cache_page(unsigned long addr)
  451. {
  452. if (in_atomic())
  453. local_r4k_flush_data_cache_page((void *)addr);
  454. else
  455. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
  456. 1);
  457. }
  458. struct flush_icache_range_args {
  459. unsigned long start;
  460. unsigned long end;
  461. };
  462. static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  463. {
  464. if (!cpu_has_ic_fills_f_dc) {
  465. if (end - start >= dcache_size) {
  466. r4k_blast_dcache();
  467. } else {
  468. R4600_HIT_CACHEOP_WAR_IMPL;
  469. protected_blast_dcache_range(start, end);
  470. }
  471. }
  472. if (end - start > icache_size)
  473. r4k_blast_icache();
  474. else
  475. protected_blast_icache_range(start, end);
  476. }
  477. static inline void local_r4k_flush_icache_range_ipi(void *args)
  478. {
  479. struct flush_icache_range_args *fir_args = args;
  480. unsigned long start = fir_args->start;
  481. unsigned long end = fir_args->end;
  482. local_r4k_flush_icache_range(start, end);
  483. }
  484. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  485. {
  486. struct flush_icache_range_args args;
  487. args.start = start;
  488. args.end = end;
  489. r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
  490. instruction_hazard();
  491. }
  492. #ifdef CONFIG_DMA_NONCOHERENT
  493. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  494. {
  495. /* Catch bad driver code */
  496. BUG_ON(size == 0);
  497. if (cpu_has_inclusive_pcaches) {
  498. if (size >= scache_size)
  499. r4k_blast_scache();
  500. else
  501. blast_scache_range(addr, addr + size);
  502. return;
  503. }
  504. /*
  505. * Either no secondary cache or the available caches don't have the
  506. * subset property so we have to flush the primary caches
  507. * explicitly
  508. */
  509. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  510. r4k_blast_dcache();
  511. } else {
  512. R4600_HIT_CACHEOP_WAR_IMPL;
  513. blast_dcache_range(addr, addr + size);
  514. }
  515. bc_wback_inv(addr, size);
  516. }
  517. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  518. {
  519. /* Catch bad driver code */
  520. BUG_ON(size == 0);
  521. if (cpu_has_inclusive_pcaches) {
  522. if (size >= scache_size)
  523. r4k_blast_scache();
  524. else
  525. blast_inv_scache_range(addr, addr + size);
  526. return;
  527. }
  528. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  529. r4k_blast_dcache();
  530. } else {
  531. R4600_HIT_CACHEOP_WAR_IMPL;
  532. blast_inv_dcache_range(addr, addr + size);
  533. }
  534. bc_inv(addr, size);
  535. }
  536. #endif /* CONFIG_DMA_NONCOHERENT */
  537. /*
  538. * While we're protected against bad userland addresses we don't care
  539. * very much about what happens in that case. Usually a segmentation
  540. * fault will dump the process later on anyway ...
  541. */
  542. static void local_r4k_flush_cache_sigtramp(void * arg)
  543. {
  544. unsigned long ic_lsize = cpu_icache_line_size();
  545. unsigned long dc_lsize = cpu_dcache_line_size();
  546. unsigned long sc_lsize = cpu_scache_line_size();
  547. unsigned long addr = (unsigned long) arg;
  548. R4600_HIT_CACHEOP_WAR_IMPL;
  549. if (dc_lsize)
  550. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  551. if (!cpu_icache_snoops_remote_store && scache_size)
  552. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  553. if (ic_lsize)
  554. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  555. if (MIPS4K_ICACHE_REFILL_WAR) {
  556. __asm__ __volatile__ (
  557. ".set push\n\t"
  558. ".set noat\n\t"
  559. ".set mips3\n\t"
  560. #ifdef CONFIG_32BIT
  561. "la $at,1f\n\t"
  562. #endif
  563. #ifdef CONFIG_64BIT
  564. "dla $at,1f\n\t"
  565. #endif
  566. "cache %0,($at)\n\t"
  567. "nop; nop; nop\n"
  568. "1:\n\t"
  569. ".set pop"
  570. :
  571. : "i" (Hit_Invalidate_I));
  572. }
  573. if (MIPS_CACHE_SYNC_WAR)
  574. __asm__ __volatile__ ("sync");
  575. }
  576. static void r4k_flush_cache_sigtramp(unsigned long addr)
  577. {
  578. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
  579. }
  580. static void r4k_flush_icache_all(void)
  581. {
  582. if (cpu_has_vtag_icache)
  583. r4k_blast_icache();
  584. }
  585. static inline void rm7k_erratum31(void)
  586. {
  587. const unsigned long ic_lsize = 32;
  588. unsigned long addr;
  589. /* RM7000 erratum #31. The icache is screwed at startup. */
  590. write_c0_taglo(0);
  591. write_c0_taghi(0);
  592. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  593. __asm__ __volatile__ (
  594. ".set push\n\t"
  595. ".set noreorder\n\t"
  596. ".set mips3\n\t"
  597. "cache\t%1, 0(%0)\n\t"
  598. "cache\t%1, 0x1000(%0)\n\t"
  599. "cache\t%1, 0x2000(%0)\n\t"
  600. "cache\t%1, 0x3000(%0)\n\t"
  601. "cache\t%2, 0(%0)\n\t"
  602. "cache\t%2, 0x1000(%0)\n\t"
  603. "cache\t%2, 0x2000(%0)\n\t"
  604. "cache\t%2, 0x3000(%0)\n\t"
  605. "cache\t%1, 0(%0)\n\t"
  606. "cache\t%1, 0x1000(%0)\n\t"
  607. "cache\t%1, 0x2000(%0)\n\t"
  608. "cache\t%1, 0x3000(%0)\n\t"
  609. ".set pop\n"
  610. :
  611. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  612. }
  613. }
  614. static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
  615. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  616. };
  617. static void __cpuinit probe_pcache(void)
  618. {
  619. struct cpuinfo_mips *c = &current_cpu_data;
  620. unsigned int config = read_c0_config();
  621. unsigned int prid = read_c0_prid();
  622. unsigned long config1;
  623. unsigned int lsize;
  624. switch (c->cputype) {
  625. case CPU_R4600: /* QED style two way caches? */
  626. case CPU_R4700:
  627. case CPU_R5000:
  628. case CPU_NEVADA:
  629. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  630. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  631. c->icache.ways = 2;
  632. c->icache.waybit = __ffs(icache_size/2);
  633. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  634. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  635. c->dcache.ways = 2;
  636. c->dcache.waybit= __ffs(dcache_size/2);
  637. c->options |= MIPS_CPU_CACHE_CDEX_P;
  638. break;
  639. case CPU_R5432:
  640. case CPU_R5500:
  641. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  642. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  643. c->icache.ways = 2;
  644. c->icache.waybit= 0;
  645. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  646. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  647. c->dcache.ways = 2;
  648. c->dcache.waybit = 0;
  649. c->options |= MIPS_CPU_CACHE_CDEX_P;
  650. break;
  651. case CPU_TX49XX:
  652. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  653. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  654. c->icache.ways = 4;
  655. c->icache.waybit= 0;
  656. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  657. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  658. c->dcache.ways = 4;
  659. c->dcache.waybit = 0;
  660. c->options |= MIPS_CPU_CACHE_CDEX_P;
  661. c->options |= MIPS_CPU_PREFETCH;
  662. break;
  663. case CPU_R4000PC:
  664. case CPU_R4000SC:
  665. case CPU_R4000MC:
  666. case CPU_R4400PC:
  667. case CPU_R4400SC:
  668. case CPU_R4400MC:
  669. case CPU_R4300:
  670. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  671. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  672. c->icache.ways = 1;
  673. c->icache.waybit = 0; /* doesn't matter */
  674. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  675. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  676. c->dcache.ways = 1;
  677. c->dcache.waybit = 0; /* does not matter */
  678. c->options |= MIPS_CPU_CACHE_CDEX_P;
  679. break;
  680. case CPU_R10000:
  681. case CPU_R12000:
  682. case CPU_R14000:
  683. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  684. c->icache.linesz = 64;
  685. c->icache.ways = 2;
  686. c->icache.waybit = 0;
  687. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  688. c->dcache.linesz = 32;
  689. c->dcache.ways = 2;
  690. c->dcache.waybit = 0;
  691. c->options |= MIPS_CPU_PREFETCH;
  692. break;
  693. case CPU_VR4133:
  694. write_c0_config(config & ~VR41_CONF_P4K);
  695. case CPU_VR4131:
  696. /* Workaround for cache instruction bug of VR4131 */
  697. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  698. c->processor_id == 0x0c82U) {
  699. config |= 0x00400000U;
  700. if (c->processor_id == 0x0c80U)
  701. config |= VR41_CONF_BP;
  702. write_c0_config(config);
  703. } else
  704. c->options |= MIPS_CPU_CACHE_CDEX_P;
  705. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  706. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  707. c->icache.ways = 2;
  708. c->icache.waybit = __ffs(icache_size/2);
  709. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  710. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  711. c->dcache.ways = 2;
  712. c->dcache.waybit = __ffs(dcache_size/2);
  713. break;
  714. case CPU_VR41XX:
  715. case CPU_VR4111:
  716. case CPU_VR4121:
  717. case CPU_VR4122:
  718. case CPU_VR4181:
  719. case CPU_VR4181A:
  720. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  721. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  722. c->icache.ways = 1;
  723. c->icache.waybit = 0; /* doesn't matter */
  724. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  725. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  726. c->dcache.ways = 1;
  727. c->dcache.waybit = 0; /* does not matter */
  728. c->options |= MIPS_CPU_CACHE_CDEX_P;
  729. break;
  730. case CPU_RM7000:
  731. rm7k_erratum31();
  732. case CPU_RM9000:
  733. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  734. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  735. c->icache.ways = 4;
  736. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  737. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  738. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  739. c->dcache.ways = 4;
  740. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  741. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  742. c->options |= MIPS_CPU_CACHE_CDEX_P;
  743. #endif
  744. c->options |= MIPS_CPU_PREFETCH;
  745. break;
  746. case CPU_LOONGSON2:
  747. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  748. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  749. if (prid & 0x3)
  750. c->icache.ways = 4;
  751. else
  752. c->icache.ways = 2;
  753. c->icache.waybit = 0;
  754. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  755. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  756. if (prid & 0x3)
  757. c->dcache.ways = 4;
  758. else
  759. c->dcache.ways = 2;
  760. c->dcache.waybit = 0;
  761. break;
  762. default:
  763. if (!(config & MIPS_CONF_M))
  764. panic("Don't know how to probe P-caches on this cpu.");
  765. /*
  766. * So we seem to be a MIPS32 or MIPS64 CPU
  767. * So let's probe the I-cache ...
  768. */
  769. config1 = read_c0_config1();
  770. if ((lsize = ((config1 >> 19) & 7)))
  771. c->icache.linesz = 2 << lsize;
  772. else
  773. c->icache.linesz = lsize;
  774. c->icache.sets = 64 << ((config1 >> 22) & 7);
  775. c->icache.ways = 1 + ((config1 >> 16) & 7);
  776. icache_size = c->icache.sets *
  777. c->icache.ways *
  778. c->icache.linesz;
  779. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  780. if (config & 0x8) /* VI bit */
  781. c->icache.flags |= MIPS_CACHE_VTAG;
  782. /*
  783. * Now probe the MIPS32 / MIPS64 data cache.
  784. */
  785. c->dcache.flags = 0;
  786. if ((lsize = ((config1 >> 10) & 7)))
  787. c->dcache.linesz = 2 << lsize;
  788. else
  789. c->dcache.linesz= lsize;
  790. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  791. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  792. dcache_size = c->dcache.sets *
  793. c->dcache.ways *
  794. c->dcache.linesz;
  795. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  796. c->options |= MIPS_CPU_PREFETCH;
  797. break;
  798. }
  799. /*
  800. * Processor configuration sanity check for the R4000SC erratum
  801. * #5. With page sizes larger than 32kB there is no possibility
  802. * to get a VCE exception anymore so we don't care about this
  803. * misconfiguration. The case is rather theoretical anyway;
  804. * presumably no vendor is shipping his hardware in the "bad"
  805. * configuration.
  806. */
  807. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  808. !(config & CONF_SC) && c->icache.linesz != 16 &&
  809. PAGE_SIZE <= 0x8000)
  810. panic("Improper R4000SC processor configuration detected");
  811. /* compute a couple of other cache variables */
  812. c->icache.waysize = icache_size / c->icache.ways;
  813. c->dcache.waysize = dcache_size / c->dcache.ways;
  814. c->icache.sets = c->icache.linesz ?
  815. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  816. c->dcache.sets = c->dcache.linesz ?
  817. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  818. /*
  819. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  820. * 2-way virtually indexed so normally would suffer from aliases. So
  821. * normally they'd suffer from aliases but magic in the hardware deals
  822. * with that for us so we don't need to take care ourselves.
  823. */
  824. switch (c->cputype) {
  825. case CPU_20KC:
  826. case CPU_25KF:
  827. case CPU_SB1:
  828. case CPU_SB1A:
  829. c->dcache.flags |= MIPS_CACHE_PINDEX;
  830. break;
  831. case CPU_R10000:
  832. case CPU_R12000:
  833. case CPU_R14000:
  834. break;
  835. case CPU_24K:
  836. case CPU_34K:
  837. case CPU_74K:
  838. case CPU_1004K:
  839. if ((read_c0_config7() & (1 << 16))) {
  840. /* effectively physically indexed dcache,
  841. thus no virtual aliases. */
  842. c->dcache.flags |= MIPS_CACHE_PINDEX;
  843. break;
  844. }
  845. default:
  846. if (c->dcache.waysize > PAGE_SIZE)
  847. c->dcache.flags |= MIPS_CACHE_ALIASES;
  848. }
  849. switch (c->cputype) {
  850. case CPU_20KC:
  851. /*
  852. * Some older 20Kc chips doesn't have the 'VI' bit in
  853. * the config register.
  854. */
  855. c->icache.flags |= MIPS_CACHE_VTAG;
  856. break;
  857. case CPU_AU1000:
  858. case CPU_AU1500:
  859. case CPU_AU1100:
  860. case CPU_AU1550:
  861. case CPU_AU1200:
  862. case CPU_AU1210:
  863. case CPU_AU1250:
  864. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  865. break;
  866. }
  867. #ifdef CONFIG_CPU_LOONGSON2
  868. /*
  869. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  870. * one op will act on all 4 ways
  871. */
  872. c->icache.ways = 1;
  873. #endif
  874. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  875. icache_size >> 10,
  876. cpu_has_vtag_icache ? "VIVT" : "VIPT",
  877. way_string[c->icache.ways], c->icache.linesz);
  878. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  879. dcache_size >> 10, way_string[c->dcache.ways],
  880. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  881. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  882. "cache aliases" : "no aliases",
  883. c->dcache.linesz);
  884. }
  885. /*
  886. * If you even _breathe_ on this function, look at the gcc output and make sure
  887. * it does not pop things on and off the stack for the cache sizing loop that
  888. * executes in KSEG1 space or else you will crash and burn badly. You have
  889. * been warned.
  890. */
  891. static int __cpuinit probe_scache(void)
  892. {
  893. unsigned long flags, addr, begin, end, pow2;
  894. unsigned int config = read_c0_config();
  895. struct cpuinfo_mips *c = &current_cpu_data;
  896. int tmp;
  897. if (config & CONF_SC)
  898. return 0;
  899. begin = (unsigned long) &_stext;
  900. begin &= ~((4 * 1024 * 1024) - 1);
  901. end = begin + (4 * 1024 * 1024);
  902. /*
  903. * This is such a bitch, you'd think they would make it easy to do
  904. * this. Away you daemons of stupidity!
  905. */
  906. local_irq_save(flags);
  907. /* Fill each size-multiple cache line with a valid tag. */
  908. pow2 = (64 * 1024);
  909. for (addr = begin; addr < end; addr = (begin + pow2)) {
  910. unsigned long *p = (unsigned long *) addr;
  911. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  912. pow2 <<= 1;
  913. }
  914. /* Load first line with zero (therefore invalid) tag. */
  915. write_c0_taglo(0);
  916. write_c0_taghi(0);
  917. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  918. cache_op(Index_Store_Tag_I, begin);
  919. cache_op(Index_Store_Tag_D, begin);
  920. cache_op(Index_Store_Tag_SD, begin);
  921. /* Now search for the wrap around point. */
  922. pow2 = (128 * 1024);
  923. tmp = 0;
  924. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  925. cache_op(Index_Load_Tag_SD, addr);
  926. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  927. if (!read_c0_taglo())
  928. break;
  929. pow2 <<= 1;
  930. }
  931. local_irq_restore(flags);
  932. addr -= begin;
  933. scache_size = addr;
  934. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  935. c->scache.ways = 1;
  936. c->dcache.waybit = 0; /* does not matter */
  937. return 1;
  938. }
  939. #if defined(CONFIG_CPU_LOONGSON2)
  940. static void __init loongson2_sc_init(void)
  941. {
  942. struct cpuinfo_mips *c = &current_cpu_data;
  943. scache_size = 512*1024;
  944. c->scache.linesz = 32;
  945. c->scache.ways = 4;
  946. c->scache.waybit = 0;
  947. c->scache.waysize = scache_size / (c->scache.ways);
  948. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  949. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  950. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  951. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  952. }
  953. #endif
  954. extern int r5k_sc_init(void);
  955. extern int rm7k_sc_init(void);
  956. extern int mips_sc_init(void);
  957. static void __cpuinit setup_scache(void)
  958. {
  959. struct cpuinfo_mips *c = &current_cpu_data;
  960. unsigned int config = read_c0_config();
  961. int sc_present = 0;
  962. /*
  963. * Do the probing thing on R4000SC and R4400SC processors. Other
  964. * processors don't have a S-cache that would be relevant to the
  965. * Linux memory management.
  966. */
  967. switch (c->cputype) {
  968. case CPU_R4000SC:
  969. case CPU_R4000MC:
  970. case CPU_R4400SC:
  971. case CPU_R4400MC:
  972. sc_present = run_uncached(probe_scache);
  973. if (sc_present)
  974. c->options |= MIPS_CPU_CACHE_CDEX_S;
  975. break;
  976. case CPU_R10000:
  977. case CPU_R12000:
  978. case CPU_R14000:
  979. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  980. c->scache.linesz = 64 << ((config >> 13) & 1);
  981. c->scache.ways = 2;
  982. c->scache.waybit= 0;
  983. sc_present = 1;
  984. break;
  985. case CPU_R5000:
  986. case CPU_NEVADA:
  987. #ifdef CONFIG_R5000_CPU_SCACHE
  988. r5k_sc_init();
  989. #endif
  990. return;
  991. case CPU_RM7000:
  992. case CPU_RM9000:
  993. #ifdef CONFIG_RM7000_CPU_SCACHE
  994. rm7k_sc_init();
  995. #endif
  996. return;
  997. #if defined(CONFIG_CPU_LOONGSON2)
  998. case CPU_LOONGSON2:
  999. loongson2_sc_init();
  1000. return;
  1001. #endif
  1002. default:
  1003. if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
  1004. c->isa_level == MIPS_CPU_ISA_M32R2 ||
  1005. c->isa_level == MIPS_CPU_ISA_M64R1 ||
  1006. c->isa_level == MIPS_CPU_ISA_M64R2) {
  1007. #ifdef CONFIG_MIPS_CPU_SCACHE
  1008. if (mips_sc_init ()) {
  1009. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  1010. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  1011. scache_size >> 10,
  1012. way_string[c->scache.ways], c->scache.linesz);
  1013. }
  1014. #else
  1015. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1016. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1017. #endif
  1018. return;
  1019. }
  1020. sc_present = 0;
  1021. }
  1022. if (!sc_present)
  1023. return;
  1024. /* compute a couple of other cache variables */
  1025. c->scache.waysize = scache_size / c->scache.ways;
  1026. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1027. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1028. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1029. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1030. }
  1031. void au1x00_fixup_config_od(void)
  1032. {
  1033. /*
  1034. * c0_config.od (bit 19) was write only (and read as 0)
  1035. * on the early revisions of Alchemy SOCs. It disables the bus
  1036. * transaction overlapping and needs to be set to fix various errata.
  1037. */
  1038. switch (read_c0_prid()) {
  1039. case 0x00030100: /* Au1000 DA */
  1040. case 0x00030201: /* Au1000 HA */
  1041. case 0x00030202: /* Au1000 HB */
  1042. case 0x01030200: /* Au1500 AB */
  1043. /*
  1044. * Au1100 errata actually keeps silence about this bit, so we set it
  1045. * just in case for those revisions that require it to be set according
  1046. * to arch/mips/au1000/common/cputable.c
  1047. */
  1048. case 0x02030200: /* Au1100 AB */
  1049. case 0x02030201: /* Au1100 BA */
  1050. case 0x02030202: /* Au1100 BC */
  1051. set_c0_config(1 << 19);
  1052. break;
  1053. }
  1054. }
  1055. /* CP0 hazard avoidance. */
  1056. #define NXP_BARRIER() \
  1057. __asm__ __volatile__( \
  1058. ".set noreorder\n\t" \
  1059. "nop; nop; nop; nop; nop; nop;\n\t" \
  1060. ".set reorder\n\t")
  1061. static void nxp_pr4450_fixup_config(void)
  1062. {
  1063. unsigned long config0;
  1064. config0 = read_c0_config();
  1065. /* clear all three cache coherency fields */
  1066. config0 &= ~(0x7 | (7 << 25) | (7 << 28));
  1067. config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
  1068. ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
  1069. ((_page_cachable_default >> _CACHE_SHIFT) << 28));
  1070. write_c0_config(config0);
  1071. NXP_BARRIER();
  1072. }
  1073. static int __cpuinitdata cca = -1;
  1074. static int __init cca_setup(char *str)
  1075. {
  1076. get_option(&str, &cca);
  1077. return 1;
  1078. }
  1079. __setup("cca=", cca_setup);
  1080. static void __cpuinit coherency_setup(void)
  1081. {
  1082. if (cca < 0 || cca > 7)
  1083. cca = read_c0_config() & CONF_CM_CMASK;
  1084. _page_cachable_default = cca << _CACHE_SHIFT;
  1085. pr_debug("Using cache attribute %d\n", cca);
  1086. change_c0_config(CONF_CM_CMASK, cca);
  1087. /*
  1088. * c0_status.cu=0 specifies that updates by the sc instruction use
  1089. * the coherency mode specified by the TLB; 1 means cachable
  1090. * coherent update on write will be used. Not all processors have
  1091. * this bit and; some wire it to zero, others like Toshiba had the
  1092. * silly idea of putting something else there ...
  1093. */
  1094. switch (current_cpu_type()) {
  1095. case CPU_R4000PC:
  1096. case CPU_R4000SC:
  1097. case CPU_R4000MC:
  1098. case CPU_R4400PC:
  1099. case CPU_R4400SC:
  1100. case CPU_R4400MC:
  1101. clear_c0_config(CONF_CU);
  1102. break;
  1103. /*
  1104. * We need to catch the early Alchemy SOCs with
  1105. * the write-only co_config.od bit and set it back to one...
  1106. */
  1107. case CPU_AU1000: /* rev. DA, HA, HB */
  1108. case CPU_AU1100: /* rev. AB, BA, BC ?? */
  1109. case CPU_AU1500: /* rev. AB */
  1110. au1x00_fixup_config_od();
  1111. break;
  1112. case PRID_IMP_PR4450:
  1113. nxp_pr4450_fixup_config();
  1114. break;
  1115. }
  1116. }
  1117. #if defined(CONFIG_DMA_NONCOHERENT)
  1118. static int __cpuinitdata coherentio;
  1119. static int __init setcoherentio(char *str)
  1120. {
  1121. coherentio = 1;
  1122. return 1;
  1123. }
  1124. __setup("coherentio", setcoherentio);
  1125. #endif
  1126. void __cpuinit r4k_cache_init(void)
  1127. {
  1128. extern void build_clear_page(void);
  1129. extern void build_copy_page(void);
  1130. extern char __weak except_vec2_generic;
  1131. extern char __weak except_vec2_sb1;
  1132. struct cpuinfo_mips *c = &current_cpu_data;
  1133. switch (c->cputype) {
  1134. case CPU_SB1:
  1135. case CPU_SB1A:
  1136. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1137. break;
  1138. default:
  1139. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1140. break;
  1141. }
  1142. probe_pcache();
  1143. setup_scache();
  1144. r4k_blast_dcache_page_setup();
  1145. r4k_blast_dcache_page_indexed_setup();
  1146. r4k_blast_dcache_setup();
  1147. r4k_blast_icache_page_setup();
  1148. r4k_blast_icache_page_indexed_setup();
  1149. r4k_blast_icache_setup();
  1150. r4k_blast_scache_page_setup();
  1151. r4k_blast_scache_page_indexed_setup();
  1152. r4k_blast_scache_setup();
  1153. /*
  1154. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1155. * This code supports virtually indexed processors and will be
  1156. * unnecessarily inefficient on physically indexed processors.
  1157. */
  1158. if (c->dcache.linesz)
  1159. shm_align_mask = max_t( unsigned long,
  1160. c->dcache.sets * c->dcache.linesz - 1,
  1161. PAGE_SIZE - 1);
  1162. else
  1163. shm_align_mask = PAGE_SIZE-1;
  1164. __flush_cache_vmap = r4k__flush_cache_vmap;
  1165. __flush_cache_vunmap = r4k__flush_cache_vunmap;
  1166. flush_cache_all = cache_noop;
  1167. __flush_cache_all = r4k___flush_cache_all;
  1168. flush_cache_mm = r4k_flush_cache_mm;
  1169. flush_cache_page = r4k_flush_cache_page;
  1170. flush_cache_range = r4k_flush_cache_range;
  1171. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1172. flush_icache_all = r4k_flush_icache_all;
  1173. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1174. flush_data_cache_page = r4k_flush_data_cache_page;
  1175. flush_icache_range = r4k_flush_icache_range;
  1176. local_flush_icache_range = local_r4k_flush_icache_range;
  1177. #if defined(CONFIG_DMA_NONCOHERENT)
  1178. if (coherentio) {
  1179. _dma_cache_wback_inv = (void *)cache_noop;
  1180. _dma_cache_wback = (void *)cache_noop;
  1181. _dma_cache_inv = (void *)cache_noop;
  1182. } else {
  1183. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1184. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1185. _dma_cache_inv = r4k_dma_cache_inv;
  1186. }
  1187. #endif
  1188. build_clear_page();
  1189. build_copy_page();
  1190. #if !defined(CONFIG_MIPS_CMP)
  1191. local_r4k___flush_cache_all(NULL);
  1192. #endif
  1193. coherency_setup();
  1194. }