cache_arc700.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * ARC700 VIPT Cache Management
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
  11. * -flush_cache_dup_mm (fork)
  12. * -likewise for flush_cache_mm (exit/execve)
  13. * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
  14. *
  15. * vineetg: Apr 2011
  16. * -Now that MMU can support larger pg sz (16K), the determiniation of
  17. * aliasing shd not be based on assumption of 8k pg
  18. *
  19. * vineetg: Mar 2011
  20. * -optimised version of flush_icache_range( ) for making I/D coherent
  21. * when vaddr is available (agnostic of num of aliases)
  22. *
  23. * vineetg: Mar 2011
  24. * -Added documentation about I-cache aliasing on ARC700 and the way it
  25. * was handled up until MMU V2.
  26. * -Spotted a three year old bug when killing the 4 aliases, which needs
  27. * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
  28. * instead of paddr | {0x00, 0x01, 0x10, 0x11}
  29. * (Rajesh you owe me one now)
  30. *
  31. * vineetg: Dec 2010
  32. * -Off-by-one error when computing num_of_lines to flush
  33. * This broke signal handling with bionic which uses synthetic sigret stub
  34. *
  35. * vineetg: Mar 2010
  36. * -GCC can't generate ZOL for core cache flush loops.
  37. * Conv them into iterations based as opposed to while (start < end) types
  38. *
  39. * Vineetg: July 2009
  40. * -In I-cache flush routine we used to chk for aliasing for every line INV.
  41. * Instead now we setup routines per cache geometry and invoke them
  42. * via function pointers.
  43. *
  44. * Vineetg: Jan 2009
  45. * -Cache Line flush routines used to flush an extra line beyond end addr
  46. * because check was while (end >= start) instead of (end > start)
  47. * =Some call sites had to work around by doing -1, -4 etc to end param
  48. * =Some callers didnt care. This was spec bad in case of INV routines
  49. * which would discard valid data (cause of the horrible ext2 bug
  50. * in ARC IDE driver)
  51. *
  52. * vineetg: June 11th 2008: Fixed flush_icache_range( )
  53. * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
  54. * to be flushed, which it was not doing.
  55. * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
  56. * however ARC cache maintenance OPs require PHY addr. Thus need to do
  57. * vmalloc_to_phy.
  58. * -Also added optimisation there, that for range > PAGE SIZE we flush the
  59. * entire cache in one shot rather than line by line. For e.g. a module
  60. * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
  61. * while cache is only 16 or 32k.
  62. */
  63. #include <linux/module.h>
  64. #include <linux/mm.h>
  65. #include <linux/sched.h>
  66. #include <linux/cache.h>
  67. #include <linux/mmu_context.h>
  68. #include <linux/syscalls.h>
  69. #include <linux/uaccess.h>
  70. #include <linux/pagemap.h>
  71. #include <asm/cacheflush.h>
  72. #include <asm/cachectl.h>
  73. #include <asm/setup.h>
  74. char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
  75. {
  76. int n = 0;
  77. unsigned int c = smp_processor_id();
  78. #define PR_CACHE(p, enb, str) \
  79. { \
  80. if (!(p)->ver) \
  81. n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
  82. else \
  83. n += scnprintf(buf + n, len - n, \
  84. str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
  85. TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
  86. enb ? "" : "DISABLED (kernel-build)"); \
  87. }
  88. PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
  89. PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
  90. return buf;
  91. }
  92. /*
  93. * Read the Cache Build Confuration Registers, Decode them and save into
  94. * the cpuinfo structure for later use.
  95. * No Validation done here, simply read/convert the BCRs
  96. */
  97. void __cpuinit read_decode_cache_bcr(void)
  98. {
  99. struct bcr_cache ibcr, dbcr;
  100. struct cpuinfo_arc_cache *p_ic, *p_dc;
  101. unsigned int cpu = smp_processor_id();
  102. p_ic = &cpuinfo_arc700[cpu].icache;
  103. READ_BCR(ARC_REG_IC_BCR, ibcr);
  104. if (ibcr.config == 0x3)
  105. p_ic->assoc = 2;
  106. p_ic->line_len = 8 << ibcr.line_len;
  107. p_ic->sz = 0x200 << ibcr.sz;
  108. p_ic->ver = ibcr.ver;
  109. p_dc = &cpuinfo_arc700[cpu].dcache;
  110. READ_BCR(ARC_REG_DC_BCR, dbcr);
  111. if (dbcr.config == 0x2)
  112. p_dc->assoc = 4;
  113. p_dc->line_len = 16 << dbcr.line_len;
  114. p_dc->sz = 0x200 << dbcr.sz;
  115. p_dc->ver = dbcr.ver;
  116. }
  117. /*
  118. * 1. Validate the Cache Geomtery (compile time config matches hardware)
  119. * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
  120. * (aliasing D-cache configurations are not supported YET)
  121. * 3. Enable the Caches, setup default flush mode for D-Cache
  122. * 3. Calculate the SHMLBA used by user space
  123. */
  124. void __cpuinit arc_cache_init(void)
  125. {
  126. unsigned int temp;
  127. unsigned int cpu = smp_processor_id();
  128. struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
  129. struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
  130. int way_pg_ratio = way_pg_ratio;
  131. int dcache_does_alias;
  132. char str[256];
  133. printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
  134. if (!ic->ver)
  135. goto chk_dc;
  136. #ifdef CONFIG_ARC_HAS_ICACHE
  137. /* 1. Confirm some of I-cache params which Linux assumes */
  138. if ((ic->assoc != ARC_ICACHE_WAYS) ||
  139. (ic->line_len != ARC_ICACHE_LINE_LEN)) {
  140. panic("Cache H/W doesn't match kernel Config");
  141. }
  142. #if (CONFIG_ARC_MMU_VER > 2)
  143. if (ic->ver != 3) {
  144. if (running_on_hw)
  145. panic("Cache ver doesn't match MMU ver\n");
  146. /* For ISS - suggest the toggles to use */
  147. pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
  148. }
  149. #endif
  150. #endif
  151. /* Enable/disable I-Cache */
  152. temp = read_aux_reg(ARC_REG_IC_CTRL);
  153. #ifdef CONFIG_ARC_HAS_ICACHE
  154. temp &= ~IC_CTRL_CACHE_DISABLE;
  155. #else
  156. temp |= IC_CTRL_CACHE_DISABLE;
  157. #endif
  158. write_aux_reg(ARC_REG_IC_CTRL, temp);
  159. chk_dc:
  160. if (!dc->ver)
  161. return;
  162. #ifdef CONFIG_ARC_HAS_DCACHE
  163. if ((dc->assoc != ARC_DCACHE_WAYS) ||
  164. (dc->line_len != ARC_DCACHE_LINE_LEN)) {
  165. panic("Cache H/W doesn't match kernel Config");
  166. }
  167. dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
  168. /* check for D-Cache aliasing */
  169. if (dcache_does_alias && !cache_is_vipt_aliasing())
  170. panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
  171. else if (!dcache_does_alias && cache_is_vipt_aliasing())
  172. panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
  173. #endif
  174. /* Set the default Invalidate Mode to "simpy discard dirty lines"
  175. * as this is more frequent then flush before invalidate
  176. * Ofcourse we toggle this default behviour when desired
  177. */
  178. temp = read_aux_reg(ARC_REG_DC_CTRL);
  179. temp &= ~DC_CTRL_INV_MODE_FLUSH;
  180. #ifdef CONFIG_ARC_HAS_DCACHE
  181. /* Enable D-Cache: Clear Bit 0 */
  182. write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
  183. #else
  184. /* Flush D cache */
  185. write_aux_reg(ARC_REG_DC_FLSH, 0x1);
  186. /* Disable D cache */
  187. write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
  188. #endif
  189. return;
  190. }
  191. #define OP_INV 0x1
  192. #define OP_FLUSH 0x2
  193. #define OP_FLUSH_N_INV 0x3
  194. #ifdef CONFIG_ARC_HAS_DCACHE
  195. /***************************************************************
  196. * Machine specific helpers for Entire D-Cache or Per Line ops
  197. */
  198. static inline void wait_for_flush(void)
  199. {
  200. while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
  201. ;
  202. }
  203. /*
  204. * Operation on Entire D-Cache
  205. * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
  206. * Note that constant propagation ensures all the checks are gone
  207. * in generated code
  208. */
  209. static inline void __dc_entire_op(const int cacheop)
  210. {
  211. unsigned long flags, tmp = tmp;
  212. int aux;
  213. local_irq_save(flags);
  214. if (cacheop == OP_FLUSH_N_INV) {
  215. /* Dcache provides 2 cmd: FLUSH or INV
  216. * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
  217. * flush-n-inv is achieved by INV cmd but with IM=1
  218. * Default INV sub-mode is DISCARD, which needs to be toggled
  219. */
  220. tmp = read_aux_reg(ARC_REG_DC_CTRL);
  221. write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
  222. }
  223. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  224. aux = ARC_REG_DC_IVDC;
  225. else
  226. aux = ARC_REG_DC_FLSH;
  227. write_aux_reg(aux, 0x1);
  228. if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
  229. wait_for_flush();
  230. /* Switch back the DISCARD ONLY Invalidate mode */
  231. if (cacheop == OP_FLUSH_N_INV)
  232. write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
  233. local_irq_restore(flags);
  234. }
  235. /*
  236. * Per Line Operation on D-Cache
  237. * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  238. * It's sole purpose is to help gcc generate ZOL
  239. * (aliasing VIPT dcache flushing needs both vaddr and paddr)
  240. */
  241. static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
  242. unsigned long sz, const int aux_reg)
  243. {
  244. int num_lines;
  245. /* Ensure we properly floor/ceil the non-line aligned/sized requests
  246. * and have @paddr - aligned to cache line and integral @num_lines.
  247. * This however can be avoided for page sized since:
  248. * -@paddr will be cache-line aligned already (being page aligned)
  249. * -@sz will be integral multiple of line size (being page sized).
  250. */
  251. if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
  252. sz += paddr & ~DCACHE_LINE_MASK;
  253. paddr &= DCACHE_LINE_MASK;
  254. vaddr &= DCACHE_LINE_MASK;
  255. }
  256. num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
  257. #if (CONFIG_ARC_MMU_VER <= 2)
  258. paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
  259. #endif
  260. while (num_lines-- > 0) {
  261. #if (CONFIG_ARC_MMU_VER > 2)
  262. /*
  263. * Just as for I$, in MMU v3, D$ ops also require
  264. * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
  265. */
  266. write_aux_reg(ARC_REG_DC_PTAG, paddr);
  267. write_aux_reg(aux_reg, vaddr);
  268. vaddr += ARC_DCACHE_LINE_LEN;
  269. #else
  270. /* paddr contains stuffed vaddrs bits */
  271. write_aux_reg(aux_reg, paddr);
  272. #endif
  273. paddr += ARC_DCACHE_LINE_LEN;
  274. }
  275. }
  276. /* For kernel mappings cache operation: index is same as paddr */
  277. #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
  278. /*
  279. * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  280. */
  281. static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
  282. unsigned long sz, const int cacheop)
  283. {
  284. unsigned long flags, tmp = tmp;
  285. int aux;
  286. local_irq_save(flags);
  287. if (cacheop == OP_FLUSH_N_INV) {
  288. /*
  289. * Dcache provides 2 cmd: FLUSH or INV
  290. * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
  291. * flush-n-inv is achieved by INV cmd but with IM=1
  292. * Default INV sub-mode is DISCARD, which needs to be toggled
  293. */
  294. tmp = read_aux_reg(ARC_REG_DC_CTRL);
  295. write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
  296. }
  297. if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
  298. aux = ARC_REG_DC_IVDL;
  299. else
  300. aux = ARC_REG_DC_FLDL;
  301. __dc_line_loop(paddr, vaddr, sz, aux);
  302. if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
  303. wait_for_flush();
  304. /* Switch back the DISCARD ONLY Invalidate mode */
  305. if (cacheop == OP_FLUSH_N_INV)
  306. write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
  307. local_irq_restore(flags);
  308. }
  309. #else
  310. #define __dc_entire_op(cacheop)
  311. #define __dc_line_op(paddr, vaddr, sz, cacheop)
  312. #define __dc_line_op_k(paddr, sz, cacheop)
  313. #endif /* CONFIG_ARC_HAS_DCACHE */
  314. #ifdef CONFIG_ARC_HAS_ICACHE
  315. /*
  316. * I-Cache Aliasing in ARC700 VIPT caches
  317. *
  318. * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
  319. * The orig Cache Management Module "CDU" only required paddr to invalidate a
  320. * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
  321. * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
  322. * the exact same line.
  323. *
  324. * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
  325. * paddr alone could not be used to correctly index the cache.
  326. *
  327. * ------------------
  328. * MMU v1/v2 (Fixed Page Size 8k)
  329. * ------------------
  330. * The solution was to provide CDU with these additonal vaddr bits. These
  331. * would be bits [x:13], x would depend on cache-geometry, 13 comes from
  332. * standard page size of 8k.
  333. * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
  334. * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
  335. * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
  336. * represent the offset within cache-line. The adv of using this "clumsy"
  337. * interface for additional info was no new reg was needed in CDU programming
  338. * model.
  339. *
  340. * 17:13 represented the max num of bits passable, actual bits needed were
  341. * fewer, based on the num-of-aliases possible.
  342. * -for 2 alias possibility, only bit 13 needed (32K cache)
  343. * -for 4 alias possibility, bits 14:13 needed (64K cache)
  344. *
  345. * ------------------
  346. * MMU v3
  347. * ------------------
  348. * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  349. * only support 8k (default), 16k and 4k.
  350. * However from hardware perspective, smaller page sizes aggrevate aliasing
  351. * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  352. * the existing scheme of piggybacking won't work for certain configurations.
  353. * Two new registers IC_PTAG and DC_PTAG inttoduced.
  354. * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
  355. */
  356. /***********************************************************
  357. * Machine specific helper for per line I-Cache invalidate.
  358. */
  359. static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
  360. unsigned long sz)
  361. {
  362. unsigned long flags;
  363. int num_lines;
  364. /*
  365. * Ensure we properly floor/ceil the non-line aligned/sized requests:
  366. * However page sized flushes can be compile time optimised.
  367. * -@paddr will be cache-line aligned already (being page aligned)
  368. * -@sz will be integral multiple of line size (being page sized).
  369. */
  370. if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
  371. sz += paddr & ~ICACHE_LINE_MASK;
  372. paddr &= ICACHE_LINE_MASK;
  373. vaddr &= ICACHE_LINE_MASK;
  374. }
  375. num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
  376. #if (CONFIG_ARC_MMU_VER <= 2)
  377. /* bits 17:13 of vaddr go as bits 4:0 of paddr */
  378. paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
  379. #endif
  380. local_irq_save(flags);
  381. while (num_lines-- > 0) {
  382. #if (CONFIG_ARC_MMU_VER > 2)
  383. /* tag comes from phy addr */
  384. write_aux_reg(ARC_REG_IC_PTAG, paddr);
  385. /* index bits come from vaddr */
  386. write_aux_reg(ARC_REG_IC_IVIL, vaddr);
  387. vaddr += ARC_ICACHE_LINE_LEN;
  388. #else
  389. /* paddr contains stuffed vaddrs bits */
  390. write_aux_reg(ARC_REG_IC_IVIL, paddr);
  391. #endif
  392. paddr += ARC_ICACHE_LINE_LEN;
  393. }
  394. local_irq_restore(flags);
  395. }
  396. #else
  397. #define __ic_line_inv_vaddr(pstart, vstart, sz)
  398. #endif /* CONFIG_ARC_HAS_ICACHE */
  399. /***********************************************************
  400. * Exported APIs
  401. */
  402. /*
  403. * Handle cache congruency of kernel and userspace mappings of page when kernel
  404. * writes-to/reads-from
  405. *
  406. * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
  407. * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
  408. * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
  409. * -In SMP, if hardware caches are coherent
  410. *
  411. * There's a corollary case, where kernel READs from a userspace mapped page.
  412. * If the U-mapping is not congruent to to K-mapping, former needs flushing.
  413. */
  414. void flush_dcache_page(struct page *page)
  415. {
  416. struct address_space *mapping;
  417. if (!cache_is_vipt_aliasing()) {
  418. set_bit(PG_arch_1, &page->flags);
  419. return;
  420. }
  421. /* don't handle anon pages here */
  422. mapping = page_mapping(page);
  423. if (!mapping)
  424. return;
  425. /*
  426. * pagecache page, file not yet mapped to userspace
  427. * Make a note that K-mapping is dirty
  428. */
  429. if (!mapping_mapped(mapping)) {
  430. set_bit(PG_arch_1, &page->flags);
  431. } else if (page_mapped(page)) {
  432. /* kernel reading from page with U-mapping */
  433. void *paddr = page_address(page);
  434. unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
  435. if (addr_not_cache_congruent(paddr, vaddr))
  436. __flush_dcache_page(paddr, vaddr);
  437. }
  438. }
  439. EXPORT_SYMBOL(flush_dcache_page);
  440. void dma_cache_wback_inv(unsigned long start, unsigned long sz)
  441. {
  442. __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
  443. }
  444. EXPORT_SYMBOL(dma_cache_wback_inv);
  445. void dma_cache_inv(unsigned long start, unsigned long sz)
  446. {
  447. __dc_line_op_k(start, sz, OP_INV);
  448. }
  449. EXPORT_SYMBOL(dma_cache_inv);
  450. void dma_cache_wback(unsigned long start, unsigned long sz)
  451. {
  452. __dc_line_op_k(start, sz, OP_FLUSH);
  453. }
  454. EXPORT_SYMBOL(dma_cache_wback);
  455. /*
  456. * This is API for making I/D Caches consistent when modifying
  457. * kernel code (loadable modules, kprobes, kgdb...)
  458. * This is called on insmod, with kernel virtual address for CODE of
  459. * the module. ARC cache maintenance ops require PHY address thus we
  460. * need to convert vmalloc addr to PHY addr
  461. */
  462. void flush_icache_range(unsigned long kstart, unsigned long kend)
  463. {
  464. unsigned int tot_sz, off, sz;
  465. unsigned long phy, pfn;
  466. /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
  467. /* This is not the right API for user virtual address */
  468. if (kstart < TASK_SIZE) {
  469. BUG_ON("Flush icache range for user virtual addr space");
  470. return;
  471. }
  472. /* Shortcut for bigger flush ranges.
  473. * Here we don't care if this was kernel virtual or phy addr
  474. */
  475. tot_sz = kend - kstart;
  476. if (tot_sz > PAGE_SIZE) {
  477. flush_cache_all();
  478. return;
  479. }
  480. /* Case: Kernel Phy addr (0x8000_0000 onwards) */
  481. if (likely(kstart > PAGE_OFFSET)) {
  482. /*
  483. * The 2nd arg despite being paddr will be used to index icache
  484. * This is OK since no alternate virtual mappings will exist
  485. * given the callers for this case: kprobe/kgdb in built-in
  486. * kernel code only.
  487. */
  488. __sync_icache_dcache(kstart, kstart, kend - kstart);
  489. return;
  490. }
  491. /*
  492. * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
  493. * (1) ARC Cache Maintenance ops only take Phy addr, hence special
  494. * handling of kernel vaddr.
  495. *
  496. * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
  497. * it still needs to handle a 2 page scenario, where the range
  498. * straddles across 2 virtual pages and hence need for loop
  499. */
  500. while (tot_sz > 0) {
  501. off = kstart % PAGE_SIZE;
  502. pfn = vmalloc_to_pfn((void *)kstart);
  503. phy = (pfn << PAGE_SHIFT) + off;
  504. sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
  505. __sync_icache_dcache(phy, kstart, sz);
  506. kstart += sz;
  507. tot_sz -= sz;
  508. }
  509. }
  510. /*
  511. * General purpose helper to make I and D cache lines consistent.
  512. * @paddr is phy addr of region
  513. * @vaddr is typically user or kernel vaddr (vmalloc)
  514. * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
  515. * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
  516. * use a paddr to index the cache (despite VIPT). This is fine since since a
  517. * built-in kernel page will not have any virtual mappings (not even kernel)
  518. * kprobe on loadable module is different as it will have kvaddr.
  519. */
  520. void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
  521. {
  522. unsigned long flags;
  523. local_irq_save(flags);
  524. __ic_line_inv_vaddr(paddr, vaddr, len);
  525. __dc_line_op(paddr, vaddr, len, OP_FLUSH);
  526. local_irq_restore(flags);
  527. }
  528. /* wrapper to compile time eliminate alignment checks in flush loop */
  529. void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
  530. {
  531. __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
  532. }
  533. /*
  534. * wrapper to clearout kernel or userspace mappings of a page
  535. * For kernel mappings @vaddr == @paddr
  536. */
  537. void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
  538. {
  539. __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
  540. }
  541. void flush_icache_all(void)
  542. {
  543. unsigned long flags;
  544. local_irq_save(flags);
  545. write_aux_reg(ARC_REG_IC_IVIC, 1);
  546. /* lr will not complete till the icache inv operation is not over */
  547. read_aux_reg(ARC_REG_IC_CTRL);
  548. local_irq_restore(flags);
  549. }
  550. noinline void flush_cache_all(void)
  551. {
  552. unsigned long flags;
  553. local_irq_save(flags);
  554. flush_icache_all();
  555. __dc_entire_op(OP_FLUSH_N_INV);
  556. local_irq_restore(flags);
  557. }
  558. #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
  559. void flush_cache_mm(struct mm_struct *mm)
  560. {
  561. flush_cache_all();
  562. }
  563. void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
  564. unsigned long pfn)
  565. {
  566. unsigned int paddr = pfn << PAGE_SHIFT;
  567. __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
  568. }
  569. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  570. unsigned long end)
  571. {
  572. flush_cache_all();
  573. }
  574. void copy_user_highpage(struct page *to, struct page *from,
  575. unsigned long u_vaddr, struct vm_area_struct *vma)
  576. {
  577. void *kfrom = page_address(from);
  578. void *kto = page_address(to);
  579. int clean_src_k_mappings = 0;
  580. /*
  581. * If SRC page was already mapped in userspace AND it's U-mapping is
  582. * not congruent with K-mapping, sync former to physical page so that
  583. * K-mapping in memcpy below, sees the right data
  584. *
  585. * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
  586. * equally valid for SRC page as well
  587. */
  588. if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
  589. __flush_dcache_page(kfrom, u_vaddr);
  590. clean_src_k_mappings = 1;
  591. }
  592. copy_page(kto, kfrom);
  593. /*
  594. * Mark DST page K-mapping as dirty for a later finalization by
  595. * update_mmu_cache(). Although the finalization could have been done
  596. * here as well (given that both vaddr/paddr are available).
  597. * But update_mmu_cache() already has code to do that for other
  598. * non copied user pages (e.g. read faults which wire in pagecache page
  599. * directly).
  600. */
  601. set_bit(PG_arch_1, &to->flags);
  602. /*
  603. * if SRC was already usermapped and non-congruent to kernel mapping
  604. * sync the kernel mapping back to physical page
  605. */
  606. if (clean_src_k_mappings) {
  607. __flush_dcache_page(kfrom, kfrom);
  608. } else {
  609. set_bit(PG_arch_1, &from->flags);
  610. }
  611. }
  612. void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
  613. {
  614. clear_page(to);
  615. set_bit(PG_arch_1, &page->flags);
  616. }
  617. void flush_anon_page(struct vm_area_struct *vma, struct page *page,
  618. unsigned long u_vaddr)
  619. {
  620. /* TBD: do we really need to clear the kernel mapping */
  621. __flush_dcache_page(page_address(page), u_vaddr);
  622. __flush_dcache_page(page_address(page), page_address(page));
  623. }
  624. #endif
  625. /**********************************************************************
  626. * Explicit Cache flush request from user space via syscall
  627. * Needed for JITs which generate code on the fly
  628. */
  629. SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
  630. {
  631. /* TBD: optimize this */
  632. flush_cache_all();
  633. return 0;
  634. }