c-sb1.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  3. * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
  4. * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
  5. * Copyright (C) 2004 Maciej W. Rozycki
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. #include <linux/init.h>
  22. #include <asm/asm.h>
  23. #include <asm/bootinfo.h>
  24. #include <asm/cacheops.h>
  25. #include <asm/cpu.h>
  26. #include <asm/mipsregs.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/uaccess.h>
  29. extern void sb1_dma_init(void);
  30. /* These are probed at ld_mmu time */
  31. static unsigned long icache_size;
  32. static unsigned long dcache_size;
  33. static unsigned short icache_line_size;
  34. static unsigned short dcache_line_size;
  35. static unsigned int icache_index_mask;
  36. static unsigned int dcache_index_mask;
  37. static unsigned short icache_assoc;
  38. static unsigned short dcache_assoc;
  39. static unsigned short icache_sets;
  40. static unsigned short dcache_sets;
  41. static unsigned int icache_range_cutoff;
  42. static unsigned int dcache_range_cutoff;
  43. /*
  44. * The dcache is fully coherent to the system, with one
  45. * big caveat: the instruction stream. In other words,
  46. * if we miss in the icache, and have dirty data in the
  47. * L1 dcache, then we'll go out to memory (or the L2) and
  48. * get the not-as-recent data.
  49. *
  50. * So the only time we have to flush the dcache is when
  51. * we're flushing the icache. Since the L2 is fully
  52. * coherent to everything, including I/O, we never have
  53. * to flush it
  54. */
  55. #define cache_set_op(op, addr) \
  56. __asm__ __volatile__( \
  57. " .set noreorder \n" \
  58. " .set mips64\n\t \n" \
  59. " cache %0, (0<<13)(%1) \n" \
  60. " cache %0, (1<<13)(%1) \n" \
  61. " cache %0, (2<<13)(%1) \n" \
  62. " cache %0, (3<<13)(%1) \n" \
  63. " .set mips0 \n" \
  64. " .set reorder" \
  65. : \
  66. : "i" (op), "r" (addr))
  67. #define sync() \
  68. __asm__ __volatile( \
  69. " .set mips64\n\t \n" \
  70. " sync \n" \
  71. " .set mips0")
  72. #define mispredict() \
  73. __asm__ __volatile__( \
  74. " bnezl $0, 1f \n" /* Force mispredict */ \
  75. "1: \n");
  76. /*
  77. * Writeback and invalidate the entire dcache
  78. */
  79. static inline void __sb1_writeback_inv_dcache_all(void)
  80. {
  81. unsigned long addr = 0;
  82. while (addr < dcache_line_size * dcache_sets) {
  83. cache_set_op(Index_Writeback_Inv_D, addr);
  84. addr += dcache_line_size;
  85. }
  86. }
  87. /*
  88. * Writeback and invalidate a range of the dcache. The addresses are
  89. * virtual, and since we're using index ops and bit 12 is part of both
  90. * the virtual frame and physical index, we have to clear both sets
  91. * (bit 12 set and cleared).
  92. */
  93. static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
  94. unsigned long end)
  95. {
  96. unsigned long index;
  97. start &= ~(dcache_line_size - 1);
  98. end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
  99. while (start != end) {
  100. index = start & dcache_index_mask;
  101. cache_set_op(Index_Writeback_Inv_D, index);
  102. cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12));
  103. start += dcache_line_size;
  104. }
  105. sync();
  106. }
  107. /*
  108. * Writeback and invalidate a range of the dcache. With physical
  109. * addresseses, we don't have to worry about possible bit 12 aliasing.
  110. * XXXKW is it worth turning on KX and using hit ops with xkphys?
  111. */
  112. static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
  113. unsigned long end)
  114. {
  115. start &= ~(dcache_line_size - 1);
  116. end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
  117. while (start != end) {
  118. cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask);
  119. start += dcache_line_size;
  120. }
  121. sync();
  122. }
  123. /*
  124. * Invalidate the entire icache
  125. */
  126. static inline void __sb1_flush_icache_all(void)
  127. {
  128. unsigned long addr = 0;
  129. while (addr < icache_line_size * icache_sets) {
  130. cache_set_op(Index_Invalidate_I, addr);
  131. addr += icache_line_size;
  132. }
  133. }
  134. /*
  135. * Invalidate a range of the icache. The addresses are virtual, and
  136. * the cache is virtually indexed and tagged. However, we don't
  137. * necessarily have the right ASID context, so use index ops instead
  138. * of hit ops.
  139. */
  140. static inline void __sb1_flush_icache_range(unsigned long start,
  141. unsigned long end)
  142. {
  143. start &= ~(icache_line_size - 1);
  144. end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
  145. while (start != end) {
  146. cache_set_op(Index_Invalidate_I, start & icache_index_mask);
  147. start += icache_line_size;
  148. }
  149. mispredict();
  150. sync();
  151. }
  152. /*
  153. * Flush the icache for a given physical page. Need to writeback the
  154. * dcache first, then invalidate the icache. If the page isn't
  155. * executable, nothing is required.
  156. */
  157. static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
  158. {
  159. int cpu = smp_processor_id();
  160. #ifndef CONFIG_SMP
  161. if (!(vma->vm_flags & VM_EXEC))
  162. return;
  163. #endif
  164. __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
  165. /*
  166. * Bumping the ASID is probably cheaper than the flush ...
  167. */
  168. if (vma->vm_mm == current->active_mm) {
  169. if (cpu_context(cpu, vma->vm_mm) != 0)
  170. drop_mmu_context(vma->vm_mm, cpu);
  171. } else
  172. __sb1_flush_icache_range(addr, addr + PAGE_SIZE);
  173. }
  174. #ifdef CONFIG_SMP
  175. struct flush_cache_page_args {
  176. struct vm_area_struct *vma;
  177. unsigned long addr;
  178. unsigned long pfn;
  179. };
  180. static void sb1_flush_cache_page_ipi(void *info)
  181. {
  182. struct flush_cache_page_args *args = info;
  183. local_sb1_flush_cache_page(args->vma, args->addr, args->pfn);
  184. }
  185. /* Dirty dcache could be on another CPU, so do the IPIs */
  186. static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
  187. {
  188. struct flush_cache_page_args args;
  189. if (!(vma->vm_flags & VM_EXEC))
  190. return;
  191. addr &= PAGE_MASK;
  192. args.vma = vma;
  193. args.addr = addr;
  194. args.pfn = pfn;
  195. on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
  196. }
  197. #else
  198. void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
  199. __attribute__((alias("local_sb1_flush_cache_page")));
  200. #endif
  201. /*
  202. * Invalidate all caches on this CPU
  203. */
  204. static void __attribute_used__ local_sb1___flush_cache_all(void)
  205. {
  206. __sb1_writeback_inv_dcache_all();
  207. __sb1_flush_icache_all();
  208. }
  209. #ifdef CONFIG_SMP
  210. void sb1___flush_cache_all_ipi(void *ignored)
  211. __attribute__((alias("local_sb1___flush_cache_all")));
  212. static void sb1___flush_cache_all(void)
  213. {
  214. on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
  215. }
  216. #else
  217. void sb1___flush_cache_all(void)
  218. __attribute__((alias("local_sb1___flush_cache_all")));
  219. #endif
  220. /*
  221. * When flushing a range in the icache, we have to first writeback
  222. * the dcache for the same range, so new ifetches will see any
  223. * data that was dirty in the dcache.
  224. *
  225. * The start/end arguments are Kseg addresses (possibly mapped Kseg).
  226. */
  227. static void local_sb1_flush_icache_range(unsigned long start,
  228. unsigned long end)
  229. {
  230. /* Just wb-inv the whole dcache if the range is big enough */
  231. if ((end - start) > dcache_range_cutoff)
  232. __sb1_writeback_inv_dcache_all();
  233. else
  234. __sb1_writeback_inv_dcache_range(start, end);
  235. /* Just flush the whole icache if the range is big enough */
  236. if ((end - start) > icache_range_cutoff)
  237. __sb1_flush_icache_all();
  238. else
  239. __sb1_flush_icache_range(start, end);
  240. }
  241. #ifdef CONFIG_SMP
  242. struct flush_icache_range_args {
  243. unsigned long start;
  244. unsigned long end;
  245. };
  246. static void sb1_flush_icache_range_ipi(void *info)
  247. {
  248. struct flush_icache_range_args *args = info;
  249. local_sb1_flush_icache_range(args->start, args->end);
  250. }
  251. void sb1_flush_icache_range(unsigned long start, unsigned long end)
  252. {
  253. struct flush_icache_range_args args;
  254. args.start = start;
  255. args.end = end;
  256. on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
  257. }
  258. #else
  259. void sb1_flush_icache_range(unsigned long start, unsigned long end)
  260. __attribute__((alias("local_sb1_flush_icache_range")));
  261. #endif
  262. /*
  263. * Flush the icache for a given physical page. Need to writeback the
  264. * dcache first, then invalidate the icache. If the page isn't
  265. * executable, nothing is required.
  266. */
  267. static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
  268. struct page *page)
  269. {
  270. unsigned long start;
  271. int cpu = smp_processor_id();
  272. #ifndef CONFIG_SMP
  273. if (!(vma->vm_flags & VM_EXEC))
  274. return;
  275. #endif
  276. /* Need to writeback any dirty data for that page, we have the PA */
  277. start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
  278. __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
  279. /*
  280. * If there's a context, bump the ASID (cheaper than a flush,
  281. * since we don't know VAs!)
  282. */
  283. if (vma->vm_mm == current->active_mm) {
  284. if (cpu_context(cpu, vma->vm_mm) != 0)
  285. drop_mmu_context(vma->vm_mm, cpu);
  286. } else
  287. __sb1_flush_icache_range(start, start + PAGE_SIZE);
  288. }
  289. #ifdef CONFIG_SMP
  290. struct flush_icache_page_args {
  291. struct vm_area_struct *vma;
  292. struct page *page;
  293. };
  294. static void sb1_flush_icache_page_ipi(void *info)
  295. {
  296. struct flush_icache_page_args *args = info;
  297. local_sb1_flush_icache_page(args->vma, args->page);
  298. }
  299. /* Dirty dcache could be on another CPU, so do the IPIs */
  300. static void sb1_flush_icache_page(struct vm_area_struct *vma,
  301. struct page *page)
  302. {
  303. struct flush_icache_page_args args;
  304. if (!(vma->vm_flags & VM_EXEC))
  305. return;
  306. args.vma = vma;
  307. args.page = page;
  308. on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
  309. }
  310. #else
  311. void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page)
  312. __attribute__((alias("local_sb1_flush_icache_page")));
  313. #endif
  314. /*
  315. * A signal trampoline must fit into a single cacheline.
  316. */
  317. static void local_sb1_flush_cache_sigtramp(unsigned long addr)
  318. {
  319. cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask);
  320. cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask);
  321. cache_set_op(Index_Invalidate_I, addr & icache_index_mask);
  322. mispredict();
  323. }
  324. #ifdef CONFIG_SMP
  325. static void sb1_flush_cache_sigtramp_ipi(void *info)
  326. {
  327. unsigned long iaddr = (unsigned long) info;
  328. local_sb1_flush_cache_sigtramp(iaddr);
  329. }
  330. static void sb1_flush_cache_sigtramp(unsigned long addr)
  331. {
  332. on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
  333. }
  334. #else
  335. void sb1_flush_cache_sigtramp(unsigned long addr)
  336. __attribute__((alias("local_sb1_flush_cache_sigtramp")));
  337. #endif
  338. /*
  339. * Anything that just flushes dcache state can be ignored, as we're always
  340. * coherent in dcache space. This is just a dummy function that all the
  341. * nop'ed routines point to
  342. */
  343. static void sb1_nop(void)
  344. {
  345. }
  346. /*
  347. * Cache set values (from the mips64 spec)
  348. * 0 - 64
  349. * 1 - 128
  350. * 2 - 256
  351. * 3 - 512
  352. * 4 - 1024
  353. * 5 - 2048
  354. * 6 - 4096
  355. * 7 - Reserved
  356. */
  357. static unsigned int decode_cache_sets(unsigned int config_field)
  358. {
  359. if (config_field == 7) {
  360. /* JDCXXX - Find a graceful way to abort. */
  361. return 0;
  362. }
  363. return (1<<(config_field + 6));
  364. }
  365. /*
  366. * Cache line size values (from the mips64 spec)
  367. * 0 - No cache present.
  368. * 1 - 4 bytes
  369. * 2 - 8 bytes
  370. * 3 - 16 bytes
  371. * 4 - 32 bytes
  372. * 5 - 64 bytes
  373. * 6 - 128 bytes
  374. * 7 - Reserved
  375. */
  376. static unsigned int decode_cache_line_size(unsigned int config_field)
  377. {
  378. if (config_field == 0) {
  379. return 0;
  380. } else if (config_field == 7) {
  381. /* JDCXXX - Find a graceful way to abort. */
  382. return 0;
  383. }
  384. return (1<<(config_field + 1));
  385. }
  386. /*
  387. * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
  388. *
  389. * 24:22 Icache sets per way
  390. * 21:19 Icache line size
  391. * 18:16 Icache Associativity
  392. * 15:13 Dcache sets per way
  393. * 12:10 Dcache line size
  394. * 9:7 Dcache Associativity
  395. */
  396. static char *way_string[] = {
  397. "direct mapped", "2-way", "3-way", "4-way",
  398. "5-way", "6-way", "7-way", "8-way",
  399. };
  400. static __init void probe_cache_sizes(void)
  401. {
  402. u32 config1;
  403. config1 = read_c0_config1();
  404. icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
  405. dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
  406. icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
  407. dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
  408. icache_assoc = ((config1 >> 16) & 0x7) + 1;
  409. dcache_assoc = ((config1 >> 7) & 0x7) + 1;
  410. icache_size = icache_line_size * icache_sets * icache_assoc;
  411. dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
  412. /* Need to remove non-index bits for index ops */
  413. icache_index_mask = (icache_sets - 1) * icache_line_size;
  414. dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
  415. /*
  416. * These are for choosing range (index ops) versus all.
  417. * icache flushes all ways for each set, so drop icache_assoc.
  418. * dcache flushes all ways and each setting of bit 12 for each
  419. * index, so drop dcache_assoc and halve the dcache_sets.
  420. */
  421. icache_range_cutoff = icache_sets * icache_line_size;
  422. dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
  423. printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
  424. icache_size >> 10, way_string[icache_assoc - 1],
  425. icache_line_size);
  426. printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
  427. dcache_size >> 10, way_string[dcache_assoc - 1],
  428. dcache_line_size);
  429. }
  430. /*
  431. * This is called from cache.c. We have to set up all the
  432. * memory management function pointers, as well as initialize
  433. * the caches and tlbs
  434. */
  435. void sb1_cache_init(void)
  436. {
  437. extern char except_vec2_sb1;
  438. extern char handle_vec2_sb1;
  439. /* Special cache error handler for SB1 */
  440. set_uncached_handler (0x100, &except_vec2_sb1, 0x80);
  441. probe_cache_sizes();
  442. #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
  443. sb1_dma_init();
  444. #endif
  445. /*
  446. * None of these are needed for the SB1 - the Dcache is
  447. * physically indexed and tagged, so no virtual aliasing can
  448. * occur
  449. */
  450. flush_cache_range = (void *) sb1_nop;
  451. flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
  452. flush_cache_all = sb1_nop;
  453. /* These routines are for Icache coherence with the Dcache */
  454. flush_icache_range = sb1_flush_icache_range;
  455. __flush_icache_page = sb1_flush_icache_page;
  456. flush_icache_all = __sb1_flush_icache_all; /* local only */
  457. /* This implies an Icache flush too, so can't be nop'ed */
  458. flush_cache_page = sb1_flush_cache_page;
  459. flush_cache_sigtramp = sb1_flush_cache_sigtramp;
  460. local_flush_data_cache_page = (void *) sb1_nop;
  461. flush_data_cache_page = (void *) sb1_nop;
  462. /* Full flush */
  463. __flush_cache_all = sb1___flush_cache_all;
  464. change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
  465. /*
  466. * This is the only way to force the update of K0 to complete
  467. * before subsequent instruction fetch.
  468. */
  469. __asm__ __volatile__(
  470. ".set push \n"
  471. " .set noat \n"
  472. " .set noreorder \n"
  473. " .set mips3 \n"
  474. " " STR(PTR_LA) " $1, 1f \n"
  475. " " STR(MTC0) " $1, $14 \n"
  476. " eret \n"
  477. "1: .set pop"
  478. :
  479. :
  480. : "memory");
  481. flush_cache_all();
  482. }