cache.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7. * Copyright (C) 1999 SuSE GmbH Nuernberg
  8. * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9. *
  10. * Cache and TLB management
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <asm/pdc.h>
  21. #include <asm/cache.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/processor.h>
  27. #include <asm/sections.h>
  28. #include <asm/shmparam.h>
  29. int split_tlb __read_mostly;
  30. int dcache_stride __read_mostly;
  31. int icache_stride __read_mostly;
  32. EXPORT_SYMBOL(dcache_stride);
  33. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  34. EXPORT_SYMBOL(flush_dcache_page_asm);
  35. void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  36. /* On some machines (e.g. ones with the Merced bus), there can be
  37. * only a single PxTLB broadcast at a time; this must be guaranteed
  38. * by software. We put a spinlock around all TLB flushes to
  39. * ensure this.
  40. */
  41. DEFINE_SPINLOCK(pa_tlb_lock);
  42. struct pdc_cache_info cache_info __read_mostly;
  43. #ifndef CONFIG_PA20
  44. static struct pdc_btlb_info btlb_info __read_mostly;
  45. #endif
  46. #ifdef CONFIG_SMP
  47. void
  48. flush_data_cache(void)
  49. {
  50. on_each_cpu(flush_data_cache_local, NULL, 1);
  51. }
  52. void
  53. flush_instruction_cache(void)
  54. {
  55. on_each_cpu(flush_instruction_cache_local, NULL, 1);
  56. }
  57. #endif
  58. void
  59. flush_cache_all_local(void)
  60. {
  61. flush_instruction_cache_local(NULL);
  62. flush_data_cache_local(NULL);
  63. }
  64. EXPORT_SYMBOL(flush_cache_all_local);
  65. /* Virtual address of pfn. */
  66. #define pfn_va(pfn) __va(PFN_PHYS(pfn))
  67. void
  68. update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  69. {
  70. unsigned long pfn = pte_pfn(*ptep);
  71. struct page *page;
  72. /* We don't have pte special. As a result, we can be called with
  73. an invalid pfn and we don't need to flush the kernel dcache page.
  74. This occurs with FireGL card in C8000. */
  75. if (!pfn_valid(pfn))
  76. return;
  77. page = pfn_to_page(pfn);
  78. if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
  79. flush_kernel_dcache_page_addr(pfn_va(pfn));
  80. clear_bit(PG_dcache_dirty, &page->flags);
  81. } else if (parisc_requires_coherency())
  82. flush_kernel_dcache_page_addr(pfn_va(pfn));
  83. }
  84. void
  85. show_cache_info(struct seq_file *m)
  86. {
  87. char buf[32];
  88. seq_printf(m, "I-cache\t\t: %ld KB\n",
  89. cache_info.ic_size/1024 );
  90. if (cache_info.dc_loop != 1)
  91. snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
  92. seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
  93. cache_info.dc_size/1024,
  94. (cache_info.dc_conf.cc_wt ? "WT":"WB"),
  95. (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
  96. ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
  97. seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
  98. cache_info.it_size,
  99. cache_info.dt_size,
  100. cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
  101. );
  102. #ifndef CONFIG_PA20
  103. /* BTLB - Block TLB */
  104. if (btlb_info.max_size==0) {
  105. seq_printf(m, "BTLB\t\t: not supported\n" );
  106. } else {
  107. seq_printf(m,
  108. "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
  109. "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
  110. "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
  111. btlb_info.max_size, (int)4096,
  112. btlb_info.max_size>>8,
  113. btlb_info.fixed_range_info.num_i,
  114. btlb_info.fixed_range_info.num_d,
  115. btlb_info.fixed_range_info.num_comb,
  116. btlb_info.variable_range_info.num_i,
  117. btlb_info.variable_range_info.num_d,
  118. btlb_info.variable_range_info.num_comb
  119. );
  120. }
  121. #endif
  122. }
  123. void __init
  124. parisc_cache_init(void)
  125. {
  126. if (pdc_cache_info(&cache_info) < 0)
  127. panic("parisc_cache_init: pdc_cache_info failed");
  128. #if 0
  129. printk("ic_size %lx dc_size %lx it_size %lx\n",
  130. cache_info.ic_size,
  131. cache_info.dc_size,
  132. cache_info.it_size);
  133. printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  134. cache_info.dc_base,
  135. cache_info.dc_stride,
  136. cache_info.dc_count,
  137. cache_info.dc_loop);
  138. printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  139. *(unsigned long *) (&cache_info.dc_conf),
  140. cache_info.dc_conf.cc_alias,
  141. cache_info.dc_conf.cc_block,
  142. cache_info.dc_conf.cc_line,
  143. cache_info.dc_conf.cc_shift);
  144. printk(" wt %d sh %d cst %d hv %d\n",
  145. cache_info.dc_conf.cc_wt,
  146. cache_info.dc_conf.cc_sh,
  147. cache_info.dc_conf.cc_cst,
  148. cache_info.dc_conf.cc_hv);
  149. printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  150. cache_info.ic_base,
  151. cache_info.ic_stride,
  152. cache_info.ic_count,
  153. cache_info.ic_loop);
  154. printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  155. *(unsigned long *) (&cache_info.ic_conf),
  156. cache_info.ic_conf.cc_alias,
  157. cache_info.ic_conf.cc_block,
  158. cache_info.ic_conf.cc_line,
  159. cache_info.ic_conf.cc_shift);
  160. printk(" wt %d sh %d cst %d hv %d\n",
  161. cache_info.ic_conf.cc_wt,
  162. cache_info.ic_conf.cc_sh,
  163. cache_info.ic_conf.cc_cst,
  164. cache_info.ic_conf.cc_hv);
  165. printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  166. cache_info.dt_conf.tc_sh,
  167. cache_info.dt_conf.tc_page,
  168. cache_info.dt_conf.tc_cst,
  169. cache_info.dt_conf.tc_aid,
  170. cache_info.dt_conf.tc_pad1);
  171. printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  172. cache_info.it_conf.tc_sh,
  173. cache_info.it_conf.tc_page,
  174. cache_info.it_conf.tc_cst,
  175. cache_info.it_conf.tc_aid,
  176. cache_info.it_conf.tc_pad1);
  177. #endif
  178. split_tlb = 0;
  179. if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
  180. if (cache_info.dt_conf.tc_sh == 2)
  181. printk(KERN_WARNING "Unexpected TLB configuration. "
  182. "Will flush I/D separately (could be optimized).\n");
  183. split_tlb = 1;
  184. }
  185. /* "New and Improved" version from Jim Hull
  186. * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
  187. * The following CAFL_STRIDE is an optimized version, see
  188. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
  189. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
  190. */
  191. #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
  192. dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
  193. icache_stride = CAFL_STRIDE(cache_info.ic_conf);
  194. #undef CAFL_STRIDE
  195. #ifndef CONFIG_PA20
  196. if (pdc_btlb_info(&btlb_info) < 0) {
  197. memset(&btlb_info, 0, sizeof btlb_info);
  198. }
  199. #endif
  200. if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
  201. PDC_MODEL_NVA_UNSUPPORTED) {
  202. printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
  203. #if 0
  204. panic("SMP kernel required to avoid non-equivalent aliasing");
  205. #endif
  206. }
  207. }
  208. void disable_sr_hashing(void)
  209. {
  210. int srhash_type, retval;
  211. unsigned long space_bits;
  212. switch (boot_cpu_data.cpu_type) {
  213. case pcx: /* We shouldn't get this far. setup.c should prevent it. */
  214. BUG();
  215. return;
  216. case pcxs:
  217. case pcxt:
  218. case pcxt_:
  219. srhash_type = SRHASH_PCXST;
  220. break;
  221. case pcxl:
  222. srhash_type = SRHASH_PCXL;
  223. break;
  224. case pcxl2: /* pcxl2 doesn't support space register hashing */
  225. return;
  226. default: /* Currently all PA2.0 machines use the same ins. sequence */
  227. srhash_type = SRHASH_PA20;
  228. break;
  229. }
  230. disable_sr_hashing_asm(srhash_type);
  231. retval = pdc_spaceid_bits(&space_bits);
  232. /* If this procedure isn't implemented, don't panic. */
  233. if (retval < 0 && retval != PDC_BAD_OPTION)
  234. panic("pdc_spaceid_bits call failed.\n");
  235. if (space_bits != 0)
  236. panic("SpaceID hashing is still on!\n");
  237. }
  238. static inline void
  239. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  240. unsigned long physaddr)
  241. {
  242. preempt_disable();
  243. flush_dcache_page_asm(physaddr, vmaddr);
  244. if (vma->vm_flags & VM_EXEC)
  245. flush_icache_page_asm(physaddr, vmaddr);
  246. preempt_enable();
  247. }
  248. void flush_dcache_page(struct page *page)
  249. {
  250. struct address_space *mapping = page_mapping(page);
  251. struct vm_area_struct *mpnt;
  252. unsigned long offset;
  253. unsigned long addr, old_addr = 0;
  254. pgoff_t pgoff;
  255. if (mapping && !mapping_mapped(mapping)) {
  256. set_bit(PG_dcache_dirty, &page->flags);
  257. return;
  258. }
  259. flush_kernel_dcache_page(page);
  260. if (!mapping)
  261. return;
  262. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  263. /* We have carefully arranged in arch_get_unmapped_area() that
  264. * *any* mappings of a file are always congruently mapped (whether
  265. * declared as MAP_PRIVATE or MAP_SHARED), so we only need
  266. * to flush one address here for them all to become coherent */
  267. flush_dcache_mmap_lock(mapping);
  268. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  269. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  270. addr = mpnt->vm_start + offset;
  271. /* The TLB is the engine of coherence on parisc: The
  272. * CPU is entitled to speculate any page with a TLB
  273. * mapping, so here we kill the mapping then flush the
  274. * page along a special flush only alias mapping.
  275. * This guarantees that the page is no-longer in the
  276. * cache for any process and nor may it be
  277. * speculatively read in (until the user or kernel
  278. * specifically accesses it, of course) */
  279. flush_tlb_page(mpnt, addr);
  280. if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
  281. __flush_cache_page(mpnt, addr, page_to_phys(page));
  282. if (old_addr)
  283. printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
  284. old_addr = addr;
  285. }
  286. }
  287. flush_dcache_mmap_unlock(mapping);
  288. }
  289. EXPORT_SYMBOL(flush_dcache_page);
  290. /* Defined in arch/parisc/kernel/pacache.S */
  291. EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
  292. EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
  293. EXPORT_SYMBOL(flush_data_cache_local);
  294. EXPORT_SYMBOL(flush_kernel_icache_range_asm);
  295. #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
  296. int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
  297. void __init parisc_setup_cache_timing(void)
  298. {
  299. unsigned long rangetime, alltime;
  300. unsigned long size;
  301. alltime = mfctl(16);
  302. flush_data_cache();
  303. alltime = mfctl(16) - alltime;
  304. size = (unsigned long)(_end - _text);
  305. rangetime = mfctl(16);
  306. flush_kernel_dcache_range((unsigned long)_text, size);
  307. rangetime = mfctl(16) - rangetime;
  308. printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
  309. alltime, size, rangetime);
  310. /* Racy, but if we see an intermediate value, it's ok too... */
  311. parisc_cache_flush_threshold = size * alltime / rangetime;
  312. parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
  313. if (!parisc_cache_flush_threshold)
  314. parisc_cache_flush_threshold = FLUSH_THRESHOLD;
  315. if (parisc_cache_flush_threshold > cache_info.dc_size)
  316. parisc_cache_flush_threshold = cache_info.dc_size;
  317. printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
  318. }
  319. extern void purge_kernel_dcache_page_asm(unsigned long);
  320. extern void clear_user_page_asm(void *, unsigned long);
  321. extern void copy_user_page_asm(void *, void *, unsigned long);
  322. void flush_kernel_dcache_page_addr(void *addr)
  323. {
  324. unsigned long flags;
  325. flush_kernel_dcache_page_asm(addr);
  326. purge_tlb_start(flags);
  327. pdtlb_kernel(addr);
  328. purge_tlb_end(flags);
  329. }
  330. EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
  331. void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
  332. {
  333. clear_page_asm(vto);
  334. if (!parisc_requires_coherency())
  335. flush_kernel_dcache_page_asm(vto);
  336. }
  337. EXPORT_SYMBOL(clear_user_page);
  338. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  339. struct page *pg)
  340. {
  341. /* Copy using kernel mapping. No coherency is needed
  342. (all in kmap/kunmap) on machines that don't support
  343. non-equivalent aliasing. However, the `from' page
  344. needs to be flushed before it can be accessed through
  345. the kernel mapping. */
  346. preempt_disable();
  347. flush_dcache_page_asm(__pa(vfrom), vaddr);
  348. preempt_enable();
  349. copy_page_asm(vto, vfrom);
  350. if (!parisc_requires_coherency())
  351. flush_kernel_dcache_page_asm(vto);
  352. }
  353. EXPORT_SYMBOL(copy_user_page);
  354. #ifdef CONFIG_PA8X00
  355. void kunmap_parisc(void *addr)
  356. {
  357. if (parisc_requires_coherency())
  358. flush_kernel_dcache_page_addr(addr);
  359. }
  360. EXPORT_SYMBOL(kunmap_parisc);
  361. #endif
  362. void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
  363. {
  364. unsigned long flags;
  365. /* Note: purge_tlb_entries can be called at startup with
  366. no context. */
  367. purge_tlb_start(flags);
  368. mtsp(mm->context, 1);
  369. pdtlb(addr);
  370. pitlb(addr);
  371. purge_tlb_end(flags);
  372. }
  373. EXPORT_SYMBOL(purge_tlb_entries);
  374. void __flush_tlb_range(unsigned long sid, unsigned long start,
  375. unsigned long end)
  376. {
  377. unsigned long npages;
  378. npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  379. if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
  380. flush_tlb_all();
  381. else {
  382. unsigned long flags;
  383. purge_tlb_start(flags);
  384. mtsp(sid, 1);
  385. if (split_tlb) {
  386. while (npages--) {
  387. pdtlb(start);
  388. pitlb(start);
  389. start += PAGE_SIZE;
  390. }
  391. } else {
  392. while (npages--) {
  393. pdtlb(start);
  394. start += PAGE_SIZE;
  395. }
  396. }
  397. purge_tlb_end(flags);
  398. }
  399. }
  400. static void cacheflush_h_tmp_function(void *dummy)
  401. {
  402. flush_cache_all_local();
  403. }
  404. void flush_cache_all(void)
  405. {
  406. on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
  407. }
  408. static inline unsigned long mm_total_size(struct mm_struct *mm)
  409. {
  410. struct vm_area_struct *vma;
  411. unsigned long usize = 0;
  412. for (vma = mm->mmap; vma; vma = vma->vm_next)
  413. usize += vma->vm_end - vma->vm_start;
  414. return usize;
  415. }
  416. static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
  417. {
  418. pte_t *ptep = NULL;
  419. if (!pgd_none(*pgd)) {
  420. pud_t *pud = pud_offset(pgd, addr);
  421. if (!pud_none(*pud)) {
  422. pmd_t *pmd = pmd_offset(pud, addr);
  423. if (!pmd_none(*pmd))
  424. ptep = pte_offset_map(pmd, addr);
  425. }
  426. }
  427. return ptep;
  428. }
  429. void flush_cache_mm(struct mm_struct *mm)
  430. {
  431. struct vm_area_struct *vma;
  432. pgd_t *pgd;
  433. /* Flushing the whole cache on each cpu takes forever on
  434. rp3440, etc. So, avoid it if the mm isn't too big. */
  435. if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
  436. flush_cache_all();
  437. return;
  438. }
  439. if (mm->context == mfsp(3)) {
  440. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  441. flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
  442. if ((vma->vm_flags & VM_EXEC) == 0)
  443. continue;
  444. flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
  445. }
  446. return;
  447. }
  448. pgd = mm->pgd;
  449. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  450. unsigned long addr;
  451. for (addr = vma->vm_start; addr < vma->vm_end;
  452. addr += PAGE_SIZE) {
  453. unsigned long pfn;
  454. pte_t *ptep = get_ptep(pgd, addr);
  455. if (!ptep)
  456. continue;
  457. pfn = pte_pfn(*ptep);
  458. if (!pfn_valid(pfn))
  459. continue;
  460. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  461. }
  462. }
  463. }
  464. void
  465. flush_user_dcache_range(unsigned long start, unsigned long end)
  466. {
  467. if ((end - start) < parisc_cache_flush_threshold)
  468. flush_user_dcache_range_asm(start,end);
  469. else
  470. flush_data_cache();
  471. }
  472. void
  473. flush_user_icache_range(unsigned long start, unsigned long end)
  474. {
  475. if ((end - start) < parisc_cache_flush_threshold)
  476. flush_user_icache_range_asm(start,end);
  477. else
  478. flush_instruction_cache();
  479. }
  480. void flush_cache_range(struct vm_area_struct *vma,
  481. unsigned long start, unsigned long end)
  482. {
  483. unsigned long addr;
  484. pgd_t *pgd;
  485. BUG_ON(!vma->vm_mm->context);
  486. if ((end - start) >= parisc_cache_flush_threshold) {
  487. flush_cache_all();
  488. return;
  489. }
  490. if (vma->vm_mm->context == mfsp(3)) {
  491. flush_user_dcache_range_asm(start, end);
  492. if (vma->vm_flags & VM_EXEC)
  493. flush_user_icache_range_asm(start, end);
  494. return;
  495. }
  496. pgd = vma->vm_mm->pgd;
  497. for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
  498. unsigned long pfn;
  499. pte_t *ptep = get_ptep(pgd, addr);
  500. if (!ptep)
  501. continue;
  502. pfn = pte_pfn(*ptep);
  503. if (pfn_valid(pfn))
  504. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  505. }
  506. }
  507. void
  508. flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  509. {
  510. BUG_ON(!vma->vm_mm->context);
  511. if (pfn_valid(pfn)) {
  512. flush_tlb_page(vma, vmaddr);
  513. __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
  514. }
  515. }
  516. #ifdef CONFIG_PARISC_TMPALIAS
  517. void clear_user_highpage(struct page *page, unsigned long vaddr)
  518. {
  519. void *vto;
  520. unsigned long flags;
  521. /* Clear using TMPALIAS region. The page doesn't need to
  522. be flushed but the kernel mapping needs to be purged. */
  523. vto = kmap_atomic(page);
  524. /* The PA-RISC 2.0 Architecture book states on page F-6:
  525. "Before a write-capable translation is enabled, *all*
  526. non-equivalently-aliased translations must be removed
  527. from the page table and purged from the TLB. (Note
  528. that the caches are not required to be flushed at this
  529. time.) Before any non-equivalent aliased translation
  530. is re-enabled, the virtual address range for the writeable
  531. page (the entire page) must be flushed from the cache,
  532. and the write-capable translation removed from the page
  533. table and purged from the TLB." */
  534. purge_kernel_dcache_page_asm((unsigned long)vto);
  535. purge_tlb_start(flags);
  536. pdtlb_kernel(vto);
  537. purge_tlb_end(flags);
  538. preempt_disable();
  539. clear_user_page_asm(vto, vaddr);
  540. preempt_enable();
  541. pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
  542. }
  543. void copy_user_highpage(struct page *to, struct page *from,
  544. unsigned long vaddr, struct vm_area_struct *vma)
  545. {
  546. void *vfrom, *vto;
  547. unsigned long flags;
  548. /* Copy using TMPALIAS region. This has the advantage
  549. that the `from' page doesn't need to be flushed. However,
  550. the `to' page must be flushed in copy_user_page_asm since
  551. it can be used to bring in executable code. */
  552. vfrom = kmap_atomic(from);
  553. vto = kmap_atomic(to);
  554. purge_kernel_dcache_page_asm((unsigned long)vto);
  555. purge_tlb_start(flags);
  556. pdtlb_kernel(vto);
  557. pdtlb_kernel(vfrom);
  558. purge_tlb_end(flags);
  559. preempt_disable();
  560. copy_user_page_asm(vto, vfrom, vaddr);
  561. flush_dcache_page_asm(__pa(vto), vaddr);
  562. preempt_enable();
  563. pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
  564. pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
  565. }
  566. #endif /* CONFIG_PARISC_TMPALIAS */