c-r4k.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/config.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/bitops.h>
  16. #include <asm/bcache.h>
  17. #include <asm/bootinfo.h>
  18. #include <asm/cache.h>
  19. #include <asm/cacheops.h>
  20. #include <asm/cpu.h>
  21. #include <asm/cpu-features.h>
  22. #include <asm/io.h>
  23. #include <asm/page.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/r4kcache.h>
  26. #include <asm/system.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/war.h>
  29. #include <asm/cacheflush.h> /* for run_uncached() */
  30. /*
  31. * Must die.
  32. */
  33. static unsigned long icache_size __read_mostly;
  34. static unsigned long dcache_size __read_mostly;
  35. static unsigned long scache_size __read_mostly;
  36. /*
  37. * Dummy cache handling routines for machines without boardcaches
  38. */
  39. static void no_sc_noop(void) {}
  40. static struct bcache_ops no_sc_ops = {
  41. .bc_enable = (void *)no_sc_noop,
  42. .bc_disable = (void *)no_sc_noop,
  43. .bc_wback_inv = (void *)no_sc_noop,
  44. .bc_inv = (void *)no_sc_noop
  45. };
  46. struct bcache_ops *bcops = &no_sc_ops;
  47. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  48. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  49. #define R4600_HIT_CACHEOP_WAR_IMPL \
  50. do { \
  51. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  52. *(volatile unsigned long *)CKSEG1; \
  53. if (R4600_V1_HIT_CACHEOP_WAR) \
  54. __asm__ __volatile__("nop;nop;nop;nop"); \
  55. } while (0)
  56. static void (*r4k_blast_dcache_page)(unsigned long addr);
  57. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  58. {
  59. R4600_HIT_CACHEOP_WAR_IMPL;
  60. blast_dcache32_page(addr);
  61. }
  62. static inline void r4k_blast_dcache_page_setup(void)
  63. {
  64. unsigned long dc_lsize = cpu_dcache_line_size();
  65. if (dc_lsize == 16)
  66. r4k_blast_dcache_page = blast_dcache16_page;
  67. else if (dc_lsize == 32)
  68. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  69. }
  70. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  71. static inline void r4k_blast_dcache_page_indexed_setup(void)
  72. {
  73. unsigned long dc_lsize = cpu_dcache_line_size();
  74. if (dc_lsize == 16)
  75. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  76. else if (dc_lsize == 32)
  77. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  78. }
  79. static void (* r4k_blast_dcache)(void);
  80. static inline void r4k_blast_dcache_setup(void)
  81. {
  82. unsigned long dc_lsize = cpu_dcache_line_size();
  83. if (dc_lsize == 16)
  84. r4k_blast_dcache = blast_dcache16;
  85. else if (dc_lsize == 32)
  86. r4k_blast_dcache = blast_dcache32;
  87. }
  88. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  89. #define JUMP_TO_ALIGN(order) \
  90. __asm__ __volatile__( \
  91. "b\t1f\n\t" \
  92. ".align\t" #order "\n\t" \
  93. "1:\n\t" \
  94. )
  95. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  96. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  97. static inline void blast_r4600_v1_icache32(void)
  98. {
  99. unsigned long flags;
  100. local_irq_save(flags);
  101. blast_icache32();
  102. local_irq_restore(flags);
  103. }
  104. static inline void tx49_blast_icache32(void)
  105. {
  106. unsigned long start = INDEX_BASE;
  107. unsigned long end = start + current_cpu_data.icache.waysize;
  108. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  109. unsigned long ws_end = current_cpu_data.icache.ways <<
  110. current_cpu_data.icache.waybit;
  111. unsigned long ws, addr;
  112. CACHE32_UNROLL32_ALIGN2;
  113. /* I'm in even chunk. blast odd chunks */
  114. for (ws = 0; ws < ws_end; ws += ws_inc)
  115. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  116. cache32_unroll32(addr|ws,Index_Invalidate_I);
  117. CACHE32_UNROLL32_ALIGN;
  118. /* I'm in odd chunk. blast even chunks */
  119. for (ws = 0; ws < ws_end; ws += ws_inc)
  120. for (addr = start; addr < end; addr += 0x400 * 2)
  121. cache32_unroll32(addr|ws,Index_Invalidate_I);
  122. }
  123. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  124. {
  125. unsigned long flags;
  126. local_irq_save(flags);
  127. blast_icache32_page_indexed(page);
  128. local_irq_restore(flags);
  129. }
  130. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  131. {
  132. unsigned long start = page;
  133. unsigned long end = start + PAGE_SIZE;
  134. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  135. unsigned long ws_end = current_cpu_data.icache.ways <<
  136. current_cpu_data.icache.waybit;
  137. unsigned long ws, addr;
  138. CACHE32_UNROLL32_ALIGN2;
  139. /* I'm in even chunk. blast odd chunks */
  140. for (ws = 0; ws < ws_end; ws += ws_inc)
  141. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  142. cache32_unroll32(addr|ws,Index_Invalidate_I);
  143. CACHE32_UNROLL32_ALIGN;
  144. /* I'm in odd chunk. blast even chunks */
  145. for (ws = 0; ws < ws_end; ws += ws_inc)
  146. for (addr = start; addr < end; addr += 0x400 * 2)
  147. cache32_unroll32(addr|ws,Index_Invalidate_I);
  148. }
  149. static void (* r4k_blast_icache_page)(unsigned long addr);
  150. static inline void r4k_blast_icache_page_setup(void)
  151. {
  152. unsigned long ic_lsize = cpu_icache_line_size();
  153. if (ic_lsize == 16)
  154. r4k_blast_icache_page = blast_icache16_page;
  155. else if (ic_lsize == 32)
  156. r4k_blast_icache_page = blast_icache32_page;
  157. else if (ic_lsize == 64)
  158. r4k_blast_icache_page = blast_icache64_page;
  159. }
  160. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  161. static inline void r4k_blast_icache_page_indexed_setup(void)
  162. {
  163. unsigned long ic_lsize = cpu_icache_line_size();
  164. if (ic_lsize == 16)
  165. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  166. else if (ic_lsize == 32) {
  167. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  168. r4k_blast_icache_page_indexed =
  169. blast_icache32_r4600_v1_page_indexed;
  170. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  171. r4k_blast_icache_page_indexed =
  172. tx49_blast_icache32_page_indexed;
  173. else
  174. r4k_blast_icache_page_indexed =
  175. blast_icache32_page_indexed;
  176. } else if (ic_lsize == 64)
  177. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  178. }
  179. static void (* r4k_blast_icache)(void);
  180. static inline void r4k_blast_icache_setup(void)
  181. {
  182. unsigned long ic_lsize = cpu_icache_line_size();
  183. if (ic_lsize == 16)
  184. r4k_blast_icache = blast_icache16;
  185. else if (ic_lsize == 32) {
  186. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  187. r4k_blast_icache = blast_r4600_v1_icache32;
  188. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  189. r4k_blast_icache = tx49_blast_icache32;
  190. else
  191. r4k_blast_icache = blast_icache32;
  192. } else if (ic_lsize == 64)
  193. r4k_blast_icache = blast_icache64;
  194. }
  195. static void (* r4k_blast_scache_page)(unsigned long addr);
  196. static inline void r4k_blast_scache_page_setup(void)
  197. {
  198. unsigned long sc_lsize = cpu_scache_line_size();
  199. if (scache_size == 0)
  200. r4k_blast_scache_page = (void *)no_sc_noop;
  201. else if (sc_lsize == 16)
  202. r4k_blast_scache_page = blast_scache16_page;
  203. else if (sc_lsize == 32)
  204. r4k_blast_scache_page = blast_scache32_page;
  205. else if (sc_lsize == 64)
  206. r4k_blast_scache_page = blast_scache64_page;
  207. else if (sc_lsize == 128)
  208. r4k_blast_scache_page = blast_scache128_page;
  209. }
  210. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  211. static inline void r4k_blast_scache_page_indexed_setup(void)
  212. {
  213. unsigned long sc_lsize = cpu_scache_line_size();
  214. if (scache_size == 0)
  215. r4k_blast_scache_page_indexed = (void *)no_sc_noop;
  216. else if (sc_lsize == 16)
  217. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  218. else if (sc_lsize == 32)
  219. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  220. else if (sc_lsize == 64)
  221. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  222. else if (sc_lsize == 128)
  223. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  224. }
  225. static void (* r4k_blast_scache)(void);
  226. static inline void r4k_blast_scache_setup(void)
  227. {
  228. unsigned long sc_lsize = cpu_scache_line_size();
  229. if (scache_size == 0)
  230. r4k_blast_scache = (void *)no_sc_noop;
  231. else if (sc_lsize == 16)
  232. r4k_blast_scache = blast_scache16;
  233. else if (sc_lsize == 32)
  234. r4k_blast_scache = blast_scache32;
  235. else if (sc_lsize == 64)
  236. r4k_blast_scache = blast_scache64;
  237. else if (sc_lsize == 128)
  238. r4k_blast_scache = blast_scache128;
  239. }
  240. /*
  241. * This is former mm's flush_cache_all() which really should be
  242. * flush_cache_vunmap these days ...
  243. */
  244. static inline void local_r4k_flush_cache_all(void * args)
  245. {
  246. r4k_blast_dcache();
  247. r4k_blast_icache();
  248. }
  249. static void r4k_flush_cache_all(void)
  250. {
  251. if (!cpu_has_dc_aliases)
  252. return;
  253. on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
  254. }
  255. static inline void local_r4k___flush_cache_all(void * args)
  256. {
  257. r4k_blast_dcache();
  258. r4k_blast_icache();
  259. switch (current_cpu_data.cputype) {
  260. case CPU_R4000SC:
  261. case CPU_R4000MC:
  262. case CPU_R4400SC:
  263. case CPU_R4400MC:
  264. case CPU_R10000:
  265. case CPU_R12000:
  266. r4k_blast_scache();
  267. }
  268. }
  269. static void r4k___flush_cache_all(void)
  270. {
  271. on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
  272. }
  273. static inline void local_r4k_flush_cache_range(void * args)
  274. {
  275. struct vm_area_struct *vma = args;
  276. int exec;
  277. if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
  278. return;
  279. exec = vma->vm_flags & VM_EXEC;
  280. if (cpu_has_dc_aliases || exec)
  281. r4k_blast_dcache();
  282. if (exec)
  283. r4k_blast_icache();
  284. }
  285. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  286. unsigned long start, unsigned long end)
  287. {
  288. on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
  289. }
  290. static inline void local_r4k_flush_cache_mm(void * args)
  291. {
  292. struct mm_struct *mm = args;
  293. if (!cpu_context(smp_processor_id(), mm))
  294. return;
  295. r4k_blast_dcache();
  296. r4k_blast_icache();
  297. /*
  298. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  299. * only flush the primary caches but R10000 and R12000 behave sane ...
  300. */
  301. if (current_cpu_data.cputype == CPU_R4000SC ||
  302. current_cpu_data.cputype == CPU_R4000MC ||
  303. current_cpu_data.cputype == CPU_R4400SC ||
  304. current_cpu_data.cputype == CPU_R4400MC)
  305. r4k_blast_scache();
  306. }
  307. static void r4k_flush_cache_mm(struct mm_struct *mm)
  308. {
  309. if (!cpu_has_dc_aliases)
  310. return;
  311. on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
  312. }
  313. struct flush_cache_page_args {
  314. struct vm_area_struct *vma;
  315. unsigned long addr;
  316. };
  317. static inline void local_r4k_flush_cache_page(void *args)
  318. {
  319. struct flush_cache_page_args *fcp_args = args;
  320. struct vm_area_struct *vma = fcp_args->vma;
  321. unsigned long addr = fcp_args->addr;
  322. int exec = vma->vm_flags & VM_EXEC;
  323. struct mm_struct *mm = vma->vm_mm;
  324. pgd_t *pgdp;
  325. pud_t *pudp;
  326. pmd_t *pmdp;
  327. pte_t *ptep;
  328. /*
  329. * If ownes no valid ASID yet, cannot possibly have gotten
  330. * this page into the cache.
  331. */
  332. if (cpu_context(smp_processor_id(), mm) == 0)
  333. return;
  334. addr &= PAGE_MASK;
  335. pgdp = pgd_offset(mm, addr);
  336. pudp = pud_offset(pgdp, addr);
  337. pmdp = pmd_offset(pudp, addr);
  338. ptep = pte_offset(pmdp, addr);
  339. /*
  340. * If the page isn't marked valid, the page cannot possibly be
  341. * in the cache.
  342. */
  343. if (!(pte_val(*ptep) & _PAGE_PRESENT))
  344. return;
  345. /*
  346. * Doing flushes for another ASID than the current one is
  347. * too difficult since stupid R4k caches do a TLB translation
  348. * for every cache flush operation. So we do indexed flushes
  349. * in that case, which doesn't overly flush the cache too much.
  350. */
  351. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
  352. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  353. r4k_blast_dcache_page(addr);
  354. if (exec && !cpu_icache_snoops_remote_store)
  355. r4k_blast_scache_page(addr);
  356. }
  357. if (exec)
  358. r4k_blast_icache_page(addr);
  359. return;
  360. }
  361. /*
  362. * Do indexed flush, too much work to get the (possible) TLB refills
  363. * to work correctly.
  364. */
  365. addr = INDEX_BASE + (addr & (dcache_size - 1));
  366. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  367. r4k_blast_dcache_page_indexed(addr);
  368. if (exec && !cpu_icache_snoops_remote_store)
  369. r4k_blast_scache_page_indexed(addr);
  370. }
  371. if (exec) {
  372. if (cpu_has_vtag_icache) {
  373. int cpu = smp_processor_id();
  374. if (cpu_context(cpu, mm) != 0)
  375. drop_mmu_context(mm, cpu);
  376. } else
  377. r4k_blast_icache_page_indexed(addr);
  378. }
  379. }
  380. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  381. unsigned long addr, unsigned long pfn)
  382. {
  383. struct flush_cache_page_args args;
  384. args.vma = vma;
  385. args.addr = addr;
  386. on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
  387. }
  388. static inline void local_r4k_flush_data_cache_page(void * addr)
  389. {
  390. r4k_blast_dcache_page((unsigned long) addr);
  391. }
  392. static void r4k_flush_data_cache_page(unsigned long addr)
  393. {
  394. on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
  395. }
  396. struct flush_icache_range_args {
  397. unsigned long start;
  398. unsigned long end;
  399. };
  400. static inline void local_r4k_flush_icache_range(void *args)
  401. {
  402. struct flush_icache_range_args *fir_args = args;
  403. unsigned long start = fir_args->start;
  404. unsigned long end = fir_args->end;
  405. if (!cpu_has_ic_fills_f_dc) {
  406. if (end - start > dcache_size) {
  407. r4k_blast_dcache();
  408. } else {
  409. R4600_HIT_CACHEOP_WAR_IMPL;
  410. protected_blast_dcache_range(start, end);
  411. }
  412. if (!cpu_icache_snoops_remote_store && scache_size) {
  413. if (end - start > scache_size)
  414. r4k_blast_scache();
  415. else
  416. protected_blast_scache_range(start, end);
  417. }
  418. }
  419. if (end - start > icache_size)
  420. r4k_blast_icache();
  421. else
  422. protected_blast_icache_range(start, end);
  423. }
  424. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  425. {
  426. struct flush_icache_range_args args;
  427. args.start = start;
  428. args.end = end;
  429. on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
  430. instruction_hazard();
  431. }
  432. /*
  433. * Ok, this seriously sucks. We use them to flush a user page but don't
  434. * know the virtual address, so we have to blast away the whole icache
  435. * which is significantly more expensive than the real thing. Otoh we at
  436. * least know the kernel address of the page so we can flush it
  437. * selectivly.
  438. */
  439. struct flush_icache_page_args {
  440. struct vm_area_struct *vma;
  441. struct page *page;
  442. };
  443. static inline void local_r4k_flush_icache_page(void *args)
  444. {
  445. struct flush_icache_page_args *fip_args = args;
  446. struct vm_area_struct *vma = fip_args->vma;
  447. struct page *page = fip_args->page;
  448. /*
  449. * Tricky ... Because we don't know the virtual address we've got the
  450. * choice of either invalidating the entire primary and secondary
  451. * caches or invalidating the secondary caches also. With the subset
  452. * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
  453. * secondary cache will result in any entries in the primary caches
  454. * also getting invalidated which hopefully is a bit more economical.
  455. */
  456. if (cpu_has_subset_pcaches) {
  457. unsigned long addr = (unsigned long) page_address(page);
  458. r4k_blast_scache_page(addr);
  459. ClearPageDcacheDirty(page);
  460. return;
  461. }
  462. if (!cpu_has_ic_fills_f_dc) {
  463. unsigned long addr = (unsigned long) page_address(page);
  464. r4k_blast_dcache_page(addr);
  465. if (!cpu_icache_snoops_remote_store)
  466. r4k_blast_scache_page(addr);
  467. ClearPageDcacheDirty(page);
  468. }
  469. /*
  470. * We're not sure of the virtual address(es) involved here, so
  471. * we have to flush the entire I-cache.
  472. */
  473. if (cpu_has_vtag_icache) {
  474. int cpu = smp_processor_id();
  475. if (cpu_context(cpu, vma->vm_mm) != 0)
  476. drop_mmu_context(vma->vm_mm, cpu);
  477. } else
  478. r4k_blast_icache();
  479. }
  480. static void r4k_flush_icache_page(struct vm_area_struct *vma,
  481. struct page *page)
  482. {
  483. struct flush_icache_page_args args;
  484. /*
  485. * If there's no context yet, or the page isn't executable, no I-cache
  486. * flush is needed.
  487. */
  488. if (!(vma->vm_flags & VM_EXEC))
  489. return;
  490. args.vma = vma;
  491. args.page = page;
  492. on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
  493. }
  494. #ifdef CONFIG_DMA_NONCOHERENT
  495. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  496. {
  497. /* Catch bad driver code */
  498. BUG_ON(size == 0);
  499. if (cpu_has_subset_pcaches) {
  500. if (size >= scache_size)
  501. r4k_blast_scache();
  502. else
  503. blast_scache_range(addr, addr + size);
  504. return;
  505. }
  506. /*
  507. * Either no secondary cache or the available caches don't have the
  508. * subset property so we have to flush the primary caches
  509. * explicitly
  510. */
  511. if (size >= dcache_size) {
  512. r4k_blast_dcache();
  513. } else {
  514. R4600_HIT_CACHEOP_WAR_IMPL;
  515. blast_dcache_range(addr, addr + size);
  516. }
  517. bc_wback_inv(addr, size);
  518. }
  519. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  520. {
  521. /* Catch bad driver code */
  522. BUG_ON(size == 0);
  523. if (cpu_has_subset_pcaches) {
  524. if (size >= scache_size)
  525. r4k_blast_scache();
  526. else
  527. blast_scache_range(addr, addr + size);
  528. return;
  529. }
  530. if (size >= dcache_size) {
  531. r4k_blast_dcache();
  532. } else {
  533. R4600_HIT_CACHEOP_WAR_IMPL;
  534. blast_dcache_range(addr, addr + size);
  535. }
  536. bc_inv(addr, size);
  537. }
  538. #endif /* CONFIG_DMA_NONCOHERENT */
  539. /*
  540. * While we're protected against bad userland addresses we don't care
  541. * very much about what happens in that case. Usually a segmentation
  542. * fault will dump the process later on anyway ...
  543. */
  544. static void local_r4k_flush_cache_sigtramp(void * arg)
  545. {
  546. unsigned long ic_lsize = cpu_icache_line_size();
  547. unsigned long dc_lsize = cpu_dcache_line_size();
  548. unsigned long sc_lsize = cpu_scache_line_size();
  549. unsigned long addr = (unsigned long) arg;
  550. R4600_HIT_CACHEOP_WAR_IMPL;
  551. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  552. if (!cpu_icache_snoops_remote_store && scache_size)
  553. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  554. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  555. if (MIPS4K_ICACHE_REFILL_WAR) {
  556. __asm__ __volatile__ (
  557. ".set push\n\t"
  558. ".set noat\n\t"
  559. ".set mips3\n\t"
  560. #ifdef CONFIG_32BIT
  561. "la $at,1f\n\t"
  562. #endif
  563. #ifdef CONFIG_64BIT
  564. "dla $at,1f\n\t"
  565. #endif
  566. "cache %0,($at)\n\t"
  567. "nop; nop; nop\n"
  568. "1:\n\t"
  569. ".set pop"
  570. :
  571. : "i" (Hit_Invalidate_I));
  572. }
  573. if (MIPS_CACHE_SYNC_WAR)
  574. __asm__ __volatile__ ("sync");
  575. }
  576. static void r4k_flush_cache_sigtramp(unsigned long addr)
  577. {
  578. on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
  579. }
  580. static void r4k_flush_icache_all(void)
  581. {
  582. if (cpu_has_vtag_icache)
  583. r4k_blast_icache();
  584. }
  585. static inline void rm7k_erratum31(void)
  586. {
  587. const unsigned long ic_lsize = 32;
  588. unsigned long addr;
  589. /* RM7000 erratum #31. The icache is screwed at startup. */
  590. write_c0_taglo(0);
  591. write_c0_taghi(0);
  592. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  593. __asm__ __volatile__ (
  594. ".set push\n\t"
  595. ".set noreorder\n\t"
  596. ".set mips3\n\t"
  597. "cache\t%1, 0(%0)\n\t"
  598. "cache\t%1, 0x1000(%0)\n\t"
  599. "cache\t%1, 0x2000(%0)\n\t"
  600. "cache\t%1, 0x3000(%0)\n\t"
  601. "cache\t%2, 0(%0)\n\t"
  602. "cache\t%2, 0x1000(%0)\n\t"
  603. "cache\t%2, 0x2000(%0)\n\t"
  604. "cache\t%2, 0x3000(%0)\n\t"
  605. "cache\t%1, 0(%0)\n\t"
  606. "cache\t%1, 0x1000(%0)\n\t"
  607. "cache\t%1, 0x2000(%0)\n\t"
  608. "cache\t%1, 0x3000(%0)\n\t"
  609. ".set pop\n"
  610. :
  611. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  612. }
  613. }
  614. static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
  615. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
  616. };
  617. static void __init probe_pcache(void)
  618. {
  619. struct cpuinfo_mips *c = &current_cpu_data;
  620. unsigned int config = read_c0_config();
  621. unsigned int prid = read_c0_prid();
  622. unsigned long config1;
  623. unsigned int lsize;
  624. switch (c->cputype) {
  625. case CPU_R4600: /* QED style two way caches? */
  626. case CPU_R4700:
  627. case CPU_R5000:
  628. case CPU_NEVADA:
  629. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  630. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  631. c->icache.ways = 2;
  632. c->icache.waybit = ffs(icache_size/2) - 1;
  633. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  634. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  635. c->dcache.ways = 2;
  636. c->dcache.waybit= ffs(dcache_size/2) - 1;
  637. c->options |= MIPS_CPU_CACHE_CDEX_P;
  638. break;
  639. case CPU_R5432:
  640. case CPU_R5500:
  641. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  642. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  643. c->icache.ways = 2;
  644. c->icache.waybit= 0;
  645. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  646. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  647. c->dcache.ways = 2;
  648. c->dcache.waybit = 0;
  649. c->options |= MIPS_CPU_CACHE_CDEX_P;
  650. break;
  651. case CPU_TX49XX:
  652. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  653. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  654. c->icache.ways = 4;
  655. c->icache.waybit= 0;
  656. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  657. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  658. c->dcache.ways = 4;
  659. c->dcache.waybit = 0;
  660. c->options |= MIPS_CPU_CACHE_CDEX_P;
  661. break;
  662. case CPU_R4000PC:
  663. case CPU_R4000SC:
  664. case CPU_R4000MC:
  665. case CPU_R4400PC:
  666. case CPU_R4400SC:
  667. case CPU_R4400MC:
  668. case CPU_R4300:
  669. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  670. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  671. c->icache.ways = 1;
  672. c->icache.waybit = 0; /* doesn't matter */
  673. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  674. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  675. c->dcache.ways = 1;
  676. c->dcache.waybit = 0; /* does not matter */
  677. c->options |= MIPS_CPU_CACHE_CDEX_P;
  678. break;
  679. case CPU_R10000:
  680. case CPU_R12000:
  681. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  682. c->icache.linesz = 64;
  683. c->icache.ways = 2;
  684. c->icache.waybit = 0;
  685. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  686. c->dcache.linesz = 32;
  687. c->dcache.ways = 2;
  688. c->dcache.waybit = 0;
  689. c->options |= MIPS_CPU_PREFETCH;
  690. break;
  691. case CPU_VR4133:
  692. write_c0_config(config & ~CONF_EB);
  693. case CPU_VR4131:
  694. /* Workaround for cache instruction bug of VR4131 */
  695. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  696. c->processor_id == 0x0c82U) {
  697. config &= ~0x00000030U;
  698. config |= 0x00410000U;
  699. write_c0_config(config);
  700. }
  701. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  702. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  703. c->icache.ways = 2;
  704. c->icache.waybit = ffs(icache_size/2) - 1;
  705. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  706. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  707. c->dcache.ways = 2;
  708. c->dcache.waybit = ffs(dcache_size/2) - 1;
  709. c->options |= MIPS_CPU_CACHE_CDEX_P;
  710. break;
  711. case CPU_VR41XX:
  712. case CPU_VR4111:
  713. case CPU_VR4121:
  714. case CPU_VR4122:
  715. case CPU_VR4181:
  716. case CPU_VR4181A:
  717. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  718. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  719. c->icache.ways = 1;
  720. c->icache.waybit = 0; /* doesn't matter */
  721. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  722. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  723. c->dcache.ways = 1;
  724. c->dcache.waybit = 0; /* does not matter */
  725. c->options |= MIPS_CPU_CACHE_CDEX_P;
  726. break;
  727. case CPU_RM7000:
  728. rm7k_erratum31();
  729. case CPU_RM9000:
  730. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  731. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  732. c->icache.ways = 4;
  733. c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
  734. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  735. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  736. c->dcache.ways = 4;
  737. c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
  738. #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
  739. c->options |= MIPS_CPU_CACHE_CDEX_P;
  740. #endif
  741. c->options |= MIPS_CPU_PREFETCH;
  742. break;
  743. default:
  744. if (!(config & MIPS_CONF_M))
  745. panic("Don't know how to probe P-caches on this cpu.");
  746. /*
  747. * So we seem to be a MIPS32 or MIPS64 CPU
  748. * So let's probe the I-cache ...
  749. */
  750. config1 = read_c0_config1();
  751. if ((lsize = ((config1 >> 19) & 7)))
  752. c->icache.linesz = 2 << lsize;
  753. else
  754. c->icache.linesz = lsize;
  755. c->icache.sets = 64 << ((config1 >> 22) & 7);
  756. c->icache.ways = 1 + ((config1 >> 16) & 7);
  757. icache_size = c->icache.sets *
  758. c->icache.ways *
  759. c->icache.linesz;
  760. c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
  761. if (config & 0x8) /* VI bit */
  762. c->icache.flags |= MIPS_CACHE_VTAG;
  763. /*
  764. * Now probe the MIPS32 / MIPS64 data cache.
  765. */
  766. c->dcache.flags = 0;
  767. if ((lsize = ((config1 >> 10) & 7)))
  768. c->dcache.linesz = 2 << lsize;
  769. else
  770. c->dcache.linesz= lsize;
  771. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  772. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  773. dcache_size = c->dcache.sets *
  774. c->dcache.ways *
  775. c->dcache.linesz;
  776. c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
  777. c->options |= MIPS_CPU_PREFETCH;
  778. break;
  779. }
  780. /*
  781. * Processor configuration sanity check for the R4000SC erratum
  782. * #5. With page sizes larger than 32kB there is no possibility
  783. * to get a VCE exception anymore so we don't care about this
  784. * misconfiguration. The case is rather theoretical anyway;
  785. * presumably no vendor is shipping his hardware in the "bad"
  786. * configuration.
  787. */
  788. if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
  789. !(config & CONF_SC) && c->icache.linesz != 16 &&
  790. PAGE_SIZE <= 0x8000)
  791. panic("Improper R4000SC processor configuration detected");
  792. /* compute a couple of other cache variables */
  793. c->icache.waysize = icache_size / c->icache.ways;
  794. c->dcache.waysize = dcache_size / c->dcache.ways;
  795. c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
  796. c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
  797. /*
  798. * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
  799. * 2-way virtually indexed so normally would suffer from aliases. So
  800. * normally they'd suffer from aliases but magic in the hardware deals
  801. * with that for us so we don't need to take care ourselves.
  802. */
  803. switch (c->cputype) {
  804. case CPU_20KC:
  805. case CPU_25KF:
  806. case CPU_R10000:
  807. case CPU_R12000:
  808. case CPU_SB1:
  809. break;
  810. case CPU_24K:
  811. if (!(read_c0_config7() & (1 << 16)))
  812. default:
  813. if (c->dcache.waysize > PAGE_SIZE)
  814. c->dcache.flags |= MIPS_CACHE_ALIASES;
  815. }
  816. switch (c->cputype) {
  817. case CPU_20KC:
  818. /*
  819. * Some older 20Kc chips doesn't have the 'VI' bit in
  820. * the config register.
  821. */
  822. c->icache.flags |= MIPS_CACHE_VTAG;
  823. break;
  824. case CPU_AU1000:
  825. case CPU_AU1500:
  826. case CPU_AU1100:
  827. case CPU_AU1550:
  828. case CPU_AU1200:
  829. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  830. break;
  831. }
  832. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  833. icache_size >> 10,
  834. cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
  835. way_string[c->icache.ways], c->icache.linesz);
  836. printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
  837. dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
  838. }
  839. /*
  840. * If you even _breathe_ on this function, look at the gcc output and make sure
  841. * it does not pop things on and off the stack for the cache sizing loop that
  842. * executes in KSEG1 space or else you will crash and burn badly. You have
  843. * been warned.
  844. */
  845. static int __init probe_scache(void)
  846. {
  847. extern unsigned long stext;
  848. unsigned long flags, addr, begin, end, pow2;
  849. unsigned int config = read_c0_config();
  850. struct cpuinfo_mips *c = &current_cpu_data;
  851. int tmp;
  852. if (config & CONF_SC)
  853. return 0;
  854. begin = (unsigned long) &stext;
  855. begin &= ~((4 * 1024 * 1024) - 1);
  856. end = begin + (4 * 1024 * 1024);
  857. /*
  858. * This is such a bitch, you'd think they would make it easy to do
  859. * this. Away you daemons of stupidity!
  860. */
  861. local_irq_save(flags);
  862. /* Fill each size-multiple cache line with a valid tag. */
  863. pow2 = (64 * 1024);
  864. for (addr = begin; addr < end; addr = (begin + pow2)) {
  865. unsigned long *p = (unsigned long *) addr;
  866. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  867. pow2 <<= 1;
  868. }
  869. /* Load first line with zero (therefore invalid) tag. */
  870. write_c0_taglo(0);
  871. write_c0_taghi(0);
  872. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  873. cache_op(Index_Store_Tag_I, begin);
  874. cache_op(Index_Store_Tag_D, begin);
  875. cache_op(Index_Store_Tag_SD, begin);
  876. /* Now search for the wrap around point. */
  877. pow2 = (128 * 1024);
  878. tmp = 0;
  879. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  880. cache_op(Index_Load_Tag_SD, addr);
  881. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  882. if (!read_c0_taglo())
  883. break;
  884. pow2 <<= 1;
  885. }
  886. local_irq_restore(flags);
  887. addr -= begin;
  888. scache_size = addr;
  889. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  890. c->scache.ways = 1;
  891. c->dcache.waybit = 0; /* does not matter */
  892. return 1;
  893. }
  894. extern int r5k_sc_init(void);
  895. extern int rm7k_sc_init(void);
  896. static void __init setup_scache(void)
  897. {
  898. struct cpuinfo_mips *c = &current_cpu_data;
  899. unsigned int config = read_c0_config();
  900. int sc_present = 0;
  901. /*
  902. * Do the probing thing on R4000SC and R4400SC processors. Other
  903. * processors don't have a S-cache that would be relevant to the
  904. * Linux memory managment.
  905. */
  906. switch (c->cputype) {
  907. case CPU_R4000SC:
  908. case CPU_R4000MC:
  909. case CPU_R4400SC:
  910. case CPU_R4400MC:
  911. sc_present = run_uncached(probe_scache);
  912. if (sc_present)
  913. c->options |= MIPS_CPU_CACHE_CDEX_S;
  914. break;
  915. case CPU_R10000:
  916. case CPU_R12000:
  917. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  918. c->scache.linesz = 64 << ((config >> 13) & 1);
  919. c->scache.ways = 2;
  920. c->scache.waybit= 0;
  921. sc_present = 1;
  922. break;
  923. case CPU_R5000:
  924. case CPU_NEVADA:
  925. #ifdef CONFIG_R5000_CPU_SCACHE
  926. r5k_sc_init();
  927. #endif
  928. return;
  929. case CPU_RM7000:
  930. case CPU_RM9000:
  931. #ifdef CONFIG_RM7000_CPU_SCACHE
  932. rm7k_sc_init();
  933. #endif
  934. return;
  935. default:
  936. sc_present = 0;
  937. }
  938. if (!sc_present)
  939. return;
  940. if ((c->isa_level == MIPS_CPU_ISA_M32R1 ||
  941. c->isa_level == MIPS_CPU_ISA_M64R1) &&
  942. !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  943. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  944. /* compute a couple of other cache variables */
  945. c->scache.waysize = scache_size / c->scache.ways;
  946. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  947. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  948. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  949. c->options |= MIPS_CPU_SUBSET_CACHES;
  950. }
  951. static inline void coherency_setup(void)
  952. {
  953. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  954. /*
  955. * c0_status.cu=0 specifies that updates by the sc instruction use
  956. * the coherency mode specified by the TLB; 1 means cachable
  957. * coherent update on write will be used. Not all processors have
  958. * this bit and; some wire it to zero, others like Toshiba had the
  959. * silly idea of putting something else there ...
  960. */
  961. switch (current_cpu_data.cputype) {
  962. case CPU_R4000PC:
  963. case CPU_R4000SC:
  964. case CPU_R4000MC:
  965. case CPU_R4400PC:
  966. case CPU_R4400SC:
  967. case CPU_R4400MC:
  968. clear_c0_config(CONF_CU);
  969. break;
  970. }
  971. }
  972. void __init r4k_cache_init(void)
  973. {
  974. extern void build_clear_page(void);
  975. extern void build_copy_page(void);
  976. extern char except_vec2_generic;
  977. struct cpuinfo_mips *c = &current_cpu_data;
  978. /* Default cache error handler for R4000 and R5000 family */
  979. set_uncached_handler (0x100, &except_vec2_generic, 0x80);
  980. probe_pcache();
  981. setup_scache();
  982. r4k_blast_dcache_page_setup();
  983. r4k_blast_dcache_page_indexed_setup();
  984. r4k_blast_dcache_setup();
  985. r4k_blast_icache_page_setup();
  986. r4k_blast_icache_page_indexed_setup();
  987. r4k_blast_icache_setup();
  988. r4k_blast_scache_page_setup();
  989. r4k_blast_scache_page_indexed_setup();
  990. r4k_blast_scache_setup();
  991. /*
  992. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  993. * This code supports virtually indexed processors and will be
  994. * unnecessarily inefficient on physically indexed processors.
  995. */
  996. shm_align_mask = max_t( unsigned long,
  997. c->dcache.sets * c->dcache.linesz - 1,
  998. PAGE_SIZE - 1);
  999. flush_cache_all = r4k_flush_cache_all;
  1000. __flush_cache_all = r4k___flush_cache_all;
  1001. flush_cache_mm = r4k_flush_cache_mm;
  1002. flush_cache_page = r4k_flush_cache_page;
  1003. flush_icache_page = r4k_flush_icache_page;
  1004. flush_cache_range = r4k_flush_cache_range;
  1005. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1006. flush_icache_all = r4k_flush_icache_all;
  1007. flush_data_cache_page = r4k_flush_data_cache_page;
  1008. flush_icache_range = r4k_flush_icache_range;
  1009. #ifdef CONFIG_DMA_NONCOHERENT
  1010. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1011. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1012. _dma_cache_inv = r4k_dma_cache_inv;
  1013. #endif
  1014. build_clear_page();
  1015. build_copy_page();
  1016. local_r4k___flush_cache_all(NULL);
  1017. coherency_setup();
  1018. }