cache.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7. * Copyright (C) 1999 SuSE GmbH Nuernberg
  8. * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9. *
  10. * Cache and TLB management
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <asm/pdc.h>
  21. #include <asm/cache.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/processor.h>
  27. #include <asm/sections.h>
  28. #include <asm/shmparam.h>
  29. int split_tlb __read_mostly;
  30. int dcache_stride __read_mostly;
  31. int icache_stride __read_mostly;
  32. EXPORT_SYMBOL(dcache_stride);
  33. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  34. EXPORT_SYMBOL(flush_dcache_page_asm);
  35. void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  36. /* On some machines (e.g. ones with the Merced bus), there can be
  37. * only a single PxTLB broadcast at a time; this must be guaranteed
  38. * by software. We put a spinlock around all TLB flushes to
  39. * ensure this.
  40. */
  41. DEFINE_SPINLOCK(pa_tlb_lock);
  42. struct pdc_cache_info cache_info __read_mostly;
  43. #ifndef CONFIG_PA20
  44. static struct pdc_btlb_info btlb_info __read_mostly;
  45. #endif
  46. #ifdef CONFIG_SMP
  47. void
  48. flush_data_cache(void)
  49. {
  50. on_each_cpu(flush_data_cache_local, NULL, 1);
  51. }
  52. void
  53. flush_instruction_cache(void)
  54. {
  55. on_each_cpu(flush_instruction_cache_local, NULL, 1);
  56. }
  57. #endif
  58. void
  59. flush_cache_all_local(void)
  60. {
  61. flush_instruction_cache_local(NULL);
  62. flush_data_cache_local(NULL);
  63. }
  64. EXPORT_SYMBOL(flush_cache_all_local);
  65. void
  66. update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  67. {
  68. struct page *page = pte_page(*ptep);
  69. if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
  70. test_bit(PG_dcache_dirty, &page->flags)) {
  71. flush_kernel_dcache_page(page);
  72. clear_bit(PG_dcache_dirty, &page->flags);
  73. } else if (parisc_requires_coherency())
  74. flush_kernel_dcache_page(page);
  75. }
  76. void
  77. show_cache_info(struct seq_file *m)
  78. {
  79. char buf[32];
  80. seq_printf(m, "I-cache\t\t: %ld KB\n",
  81. cache_info.ic_size/1024 );
  82. if (cache_info.dc_loop != 1)
  83. snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
  84. seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
  85. cache_info.dc_size/1024,
  86. (cache_info.dc_conf.cc_wt ? "WT":"WB"),
  87. (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
  88. ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
  89. seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
  90. cache_info.it_size,
  91. cache_info.dt_size,
  92. cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
  93. );
  94. #ifndef CONFIG_PA20
  95. /* BTLB - Block TLB */
  96. if (btlb_info.max_size==0) {
  97. seq_printf(m, "BTLB\t\t: not supported\n" );
  98. } else {
  99. seq_printf(m,
  100. "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
  101. "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
  102. "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
  103. btlb_info.max_size, (int)4096,
  104. btlb_info.max_size>>8,
  105. btlb_info.fixed_range_info.num_i,
  106. btlb_info.fixed_range_info.num_d,
  107. btlb_info.fixed_range_info.num_comb,
  108. btlb_info.variable_range_info.num_i,
  109. btlb_info.variable_range_info.num_d,
  110. btlb_info.variable_range_info.num_comb
  111. );
  112. }
  113. #endif
  114. }
  115. void __init
  116. parisc_cache_init(void)
  117. {
  118. if (pdc_cache_info(&cache_info) < 0)
  119. panic("parisc_cache_init: pdc_cache_info failed");
  120. #if 0
  121. printk("ic_size %lx dc_size %lx it_size %lx\n",
  122. cache_info.ic_size,
  123. cache_info.dc_size,
  124. cache_info.it_size);
  125. printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  126. cache_info.dc_base,
  127. cache_info.dc_stride,
  128. cache_info.dc_count,
  129. cache_info.dc_loop);
  130. printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  131. *(unsigned long *) (&cache_info.dc_conf),
  132. cache_info.dc_conf.cc_alias,
  133. cache_info.dc_conf.cc_block,
  134. cache_info.dc_conf.cc_line,
  135. cache_info.dc_conf.cc_shift);
  136. printk(" wt %d sh %d cst %d hv %d\n",
  137. cache_info.dc_conf.cc_wt,
  138. cache_info.dc_conf.cc_sh,
  139. cache_info.dc_conf.cc_cst,
  140. cache_info.dc_conf.cc_hv);
  141. printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  142. cache_info.ic_base,
  143. cache_info.ic_stride,
  144. cache_info.ic_count,
  145. cache_info.ic_loop);
  146. printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  147. *(unsigned long *) (&cache_info.ic_conf),
  148. cache_info.ic_conf.cc_alias,
  149. cache_info.ic_conf.cc_block,
  150. cache_info.ic_conf.cc_line,
  151. cache_info.ic_conf.cc_shift);
  152. printk(" wt %d sh %d cst %d hv %d\n",
  153. cache_info.ic_conf.cc_wt,
  154. cache_info.ic_conf.cc_sh,
  155. cache_info.ic_conf.cc_cst,
  156. cache_info.ic_conf.cc_hv);
  157. printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  158. cache_info.dt_conf.tc_sh,
  159. cache_info.dt_conf.tc_page,
  160. cache_info.dt_conf.tc_cst,
  161. cache_info.dt_conf.tc_aid,
  162. cache_info.dt_conf.tc_pad1);
  163. printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  164. cache_info.it_conf.tc_sh,
  165. cache_info.it_conf.tc_page,
  166. cache_info.it_conf.tc_cst,
  167. cache_info.it_conf.tc_aid,
  168. cache_info.it_conf.tc_pad1);
  169. #endif
  170. split_tlb = 0;
  171. if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
  172. if (cache_info.dt_conf.tc_sh == 2)
  173. printk(KERN_WARNING "Unexpected TLB configuration. "
  174. "Will flush I/D separately (could be optimized).\n");
  175. split_tlb = 1;
  176. }
  177. /* "New and Improved" version from Jim Hull
  178. * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
  179. * The following CAFL_STRIDE is an optimized version, see
  180. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
  181. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
  182. */
  183. #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
  184. dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
  185. icache_stride = CAFL_STRIDE(cache_info.ic_conf);
  186. #undef CAFL_STRIDE
  187. #ifndef CONFIG_PA20
  188. if (pdc_btlb_info(&btlb_info) < 0) {
  189. memset(&btlb_info, 0, sizeof btlb_info);
  190. }
  191. #endif
  192. if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
  193. PDC_MODEL_NVA_UNSUPPORTED) {
  194. printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
  195. #if 0
  196. panic("SMP kernel required to avoid non-equivalent aliasing");
  197. #endif
  198. }
  199. }
  200. void disable_sr_hashing(void)
  201. {
  202. int srhash_type, retval;
  203. unsigned long space_bits;
  204. switch (boot_cpu_data.cpu_type) {
  205. case pcx: /* We shouldn't get this far. setup.c should prevent it. */
  206. BUG();
  207. return;
  208. case pcxs:
  209. case pcxt:
  210. case pcxt_:
  211. srhash_type = SRHASH_PCXST;
  212. break;
  213. case pcxl:
  214. srhash_type = SRHASH_PCXL;
  215. break;
  216. case pcxl2: /* pcxl2 doesn't support space register hashing */
  217. return;
  218. default: /* Currently all PA2.0 machines use the same ins. sequence */
  219. srhash_type = SRHASH_PA20;
  220. break;
  221. }
  222. disable_sr_hashing_asm(srhash_type);
  223. retval = pdc_spaceid_bits(&space_bits);
  224. /* If this procedure isn't implemented, don't panic. */
  225. if (retval < 0 && retval != PDC_BAD_OPTION)
  226. panic("pdc_spaceid_bits call failed.\n");
  227. if (space_bits != 0)
  228. panic("SpaceID hashing is still on!\n");
  229. }
  230. static inline void
  231. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  232. unsigned long physaddr)
  233. {
  234. preempt_disable();
  235. flush_dcache_page_asm(physaddr, vmaddr);
  236. if (vma->vm_flags & VM_EXEC)
  237. flush_icache_page_asm(physaddr, vmaddr);
  238. preempt_enable();
  239. }
  240. void flush_dcache_page(struct page *page)
  241. {
  242. struct address_space *mapping = page_mapping(page);
  243. struct vm_area_struct *mpnt;
  244. unsigned long offset;
  245. unsigned long addr, old_addr = 0;
  246. pgoff_t pgoff;
  247. if (mapping && !mapping_mapped(mapping)) {
  248. set_bit(PG_dcache_dirty, &page->flags);
  249. return;
  250. }
  251. flush_kernel_dcache_page(page);
  252. if (!mapping)
  253. return;
  254. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  255. /* We have carefully arranged in arch_get_unmapped_area() that
  256. * *any* mappings of a file are always congruently mapped (whether
  257. * declared as MAP_PRIVATE or MAP_SHARED), so we only need
  258. * to flush one address here for them all to become coherent */
  259. flush_dcache_mmap_lock(mapping);
  260. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  261. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  262. addr = mpnt->vm_start + offset;
  263. /* The TLB is the engine of coherence on parisc: The
  264. * CPU is entitled to speculate any page with a TLB
  265. * mapping, so here we kill the mapping then flush the
  266. * page along a special flush only alias mapping.
  267. * This guarantees that the page is no-longer in the
  268. * cache for any process and nor may it be
  269. * speculatively read in (until the user or kernel
  270. * specifically accesses it, of course) */
  271. flush_tlb_page(mpnt, addr);
  272. if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
  273. __flush_cache_page(mpnt, addr, page_to_phys(page));
  274. if (old_addr)
  275. printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
  276. old_addr = addr;
  277. }
  278. }
  279. flush_dcache_mmap_unlock(mapping);
  280. }
  281. EXPORT_SYMBOL(flush_dcache_page);
  282. /* Defined in arch/parisc/kernel/pacache.S */
  283. EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
  284. EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
  285. EXPORT_SYMBOL(flush_data_cache_local);
  286. EXPORT_SYMBOL(flush_kernel_icache_range_asm);
  287. #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
  288. int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
  289. void __init parisc_setup_cache_timing(void)
  290. {
  291. unsigned long rangetime, alltime;
  292. unsigned long size;
  293. alltime = mfctl(16);
  294. flush_data_cache();
  295. alltime = mfctl(16) - alltime;
  296. size = (unsigned long)(_end - _text);
  297. rangetime = mfctl(16);
  298. flush_kernel_dcache_range((unsigned long)_text, size);
  299. rangetime = mfctl(16) - rangetime;
  300. printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
  301. alltime, size, rangetime);
  302. /* Racy, but if we see an intermediate value, it's ok too... */
  303. parisc_cache_flush_threshold = size * alltime / rangetime;
  304. parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
  305. if (!parisc_cache_flush_threshold)
  306. parisc_cache_flush_threshold = FLUSH_THRESHOLD;
  307. if (parisc_cache_flush_threshold > cache_info.dc_size)
  308. parisc_cache_flush_threshold = cache_info.dc_size;
  309. printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
  310. }
  311. extern void purge_kernel_dcache_page_asm(unsigned long);
  312. extern void clear_user_page_asm(void *, unsigned long);
  313. extern void copy_user_page_asm(void *, void *, unsigned long);
  314. void flush_kernel_dcache_page_addr(void *addr)
  315. {
  316. unsigned long flags;
  317. flush_kernel_dcache_page_asm(addr);
  318. purge_tlb_start(flags);
  319. pdtlb_kernel(addr);
  320. purge_tlb_end(flags);
  321. }
  322. EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
  323. void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
  324. {
  325. clear_page_asm(vto);
  326. if (!parisc_requires_coherency())
  327. flush_kernel_dcache_page_asm(vto);
  328. }
  329. EXPORT_SYMBOL(clear_user_page);
  330. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  331. struct page *pg)
  332. {
  333. /* Copy using kernel mapping. No coherency is needed
  334. (all in kmap/kunmap) on machines that don't support
  335. non-equivalent aliasing. However, the `from' page
  336. needs to be flushed before it can be accessed through
  337. the kernel mapping. */
  338. preempt_disable();
  339. flush_dcache_page_asm(__pa(vfrom), vaddr);
  340. preempt_enable();
  341. copy_page_asm(vto, vfrom);
  342. if (!parisc_requires_coherency())
  343. flush_kernel_dcache_page_asm(vto);
  344. }
  345. EXPORT_SYMBOL(copy_user_page);
  346. #ifdef CONFIG_PA8X00
  347. void kunmap_parisc(void *addr)
  348. {
  349. if (parisc_requires_coherency())
  350. flush_kernel_dcache_page_addr(addr);
  351. }
  352. EXPORT_SYMBOL(kunmap_parisc);
  353. #endif
  354. void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
  355. {
  356. unsigned long flags;
  357. /* Note: purge_tlb_entries can be called at startup with
  358. no context. */
  359. /* Disable preemption while we play with %sr1. */
  360. preempt_disable();
  361. mtsp(mm->context, 1);
  362. purge_tlb_start(flags);
  363. pdtlb(addr);
  364. pitlb(addr);
  365. purge_tlb_end(flags);
  366. preempt_enable();
  367. }
  368. EXPORT_SYMBOL(purge_tlb_entries);
  369. void __flush_tlb_range(unsigned long sid, unsigned long start,
  370. unsigned long end)
  371. {
  372. unsigned long npages;
  373. npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  374. if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
  375. flush_tlb_all();
  376. else {
  377. unsigned long flags;
  378. mtsp(sid, 1);
  379. purge_tlb_start(flags);
  380. if (split_tlb) {
  381. while (npages--) {
  382. pdtlb(start);
  383. pitlb(start);
  384. start += PAGE_SIZE;
  385. }
  386. } else {
  387. while (npages--) {
  388. pdtlb(start);
  389. start += PAGE_SIZE;
  390. }
  391. }
  392. purge_tlb_end(flags);
  393. }
  394. }
  395. static void cacheflush_h_tmp_function(void *dummy)
  396. {
  397. flush_cache_all_local();
  398. }
  399. void flush_cache_all(void)
  400. {
  401. on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
  402. }
  403. static inline unsigned long mm_total_size(struct mm_struct *mm)
  404. {
  405. struct vm_area_struct *vma;
  406. unsigned long usize = 0;
  407. for (vma = mm->mmap; vma; vma = vma->vm_next)
  408. usize += vma->vm_end - vma->vm_start;
  409. return usize;
  410. }
  411. static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
  412. {
  413. pte_t *ptep = NULL;
  414. if (!pgd_none(*pgd)) {
  415. pud_t *pud = pud_offset(pgd, addr);
  416. if (!pud_none(*pud)) {
  417. pmd_t *pmd = pmd_offset(pud, addr);
  418. if (!pmd_none(*pmd))
  419. ptep = pte_offset_map(pmd, addr);
  420. }
  421. }
  422. return ptep;
  423. }
  424. void flush_cache_mm(struct mm_struct *mm)
  425. {
  426. /* Flushing the whole cache on each cpu takes forever on
  427. rp3440, etc. So, avoid it if the mm isn't too big. */
  428. if (mm_total_size(mm) < parisc_cache_flush_threshold) {
  429. struct vm_area_struct *vma;
  430. if (mm->context == mfsp(3)) {
  431. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  432. flush_user_dcache_range_asm(vma->vm_start,
  433. vma->vm_end);
  434. if (vma->vm_flags & VM_EXEC)
  435. flush_user_icache_range_asm(
  436. vma->vm_start, vma->vm_end);
  437. }
  438. } else {
  439. pgd_t *pgd = mm->pgd;
  440. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  441. unsigned long addr;
  442. for (addr = vma->vm_start; addr < vma->vm_end;
  443. addr += PAGE_SIZE) {
  444. pte_t *ptep = get_ptep(pgd, addr);
  445. if (ptep != NULL) {
  446. pte_t pte = *ptep;
  447. __flush_cache_page(vma, addr,
  448. page_to_phys(pte_page(pte)));
  449. }
  450. }
  451. }
  452. }
  453. return;
  454. }
  455. #ifdef CONFIG_SMP
  456. flush_cache_all();
  457. #else
  458. flush_cache_all_local();
  459. #endif
  460. }
  461. void
  462. flush_user_dcache_range(unsigned long start, unsigned long end)
  463. {
  464. if ((end - start) < parisc_cache_flush_threshold)
  465. flush_user_dcache_range_asm(start,end);
  466. else
  467. flush_data_cache();
  468. }
  469. void
  470. flush_user_icache_range(unsigned long start, unsigned long end)
  471. {
  472. if ((end - start) < parisc_cache_flush_threshold)
  473. flush_user_icache_range_asm(start,end);
  474. else
  475. flush_instruction_cache();
  476. }
  477. void flush_cache_range(struct vm_area_struct *vma,
  478. unsigned long start, unsigned long end)
  479. {
  480. BUG_ON(!vma->vm_mm->context);
  481. if ((end - start) < parisc_cache_flush_threshold) {
  482. if (vma->vm_mm->context == mfsp(3)) {
  483. flush_user_dcache_range_asm(start, end);
  484. if (vma->vm_flags & VM_EXEC)
  485. flush_user_icache_range_asm(start, end);
  486. } else {
  487. unsigned long addr;
  488. pgd_t *pgd = vma->vm_mm->pgd;
  489. for (addr = start & PAGE_MASK; addr < end;
  490. addr += PAGE_SIZE) {
  491. pte_t *ptep = get_ptep(pgd, addr);
  492. if (ptep != NULL) {
  493. pte_t pte = *ptep;
  494. flush_cache_page(vma,
  495. addr, pte_pfn(pte));
  496. }
  497. }
  498. }
  499. } else {
  500. #ifdef CONFIG_SMP
  501. flush_cache_all();
  502. #else
  503. flush_cache_all_local();
  504. #endif
  505. }
  506. }
  507. void
  508. flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  509. {
  510. BUG_ON(!vma->vm_mm->context);
  511. flush_tlb_page(vma, vmaddr);
  512. __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
  513. }
  514. #ifdef CONFIG_PARISC_TMPALIAS
  515. void clear_user_highpage(struct page *page, unsigned long vaddr)
  516. {
  517. void *vto;
  518. unsigned long flags;
  519. /* Clear using TMPALIAS region. The page doesn't need to
  520. be flushed but the kernel mapping needs to be purged. */
  521. vto = kmap_atomic(page, KM_USER0);
  522. /* The PA-RISC 2.0 Architecture book states on page F-6:
  523. "Before a write-capable translation is enabled, *all*
  524. non-equivalently-aliased translations must be removed
  525. from the page table and purged from the TLB. (Note
  526. that the caches are not required to be flushed at this
  527. time.) Before any non-equivalent aliased translation
  528. is re-enabled, the virtual address range for the writeable
  529. page (the entire page) must be flushed from the cache,
  530. and the write-capable translation removed from the page
  531. table and purged from the TLB." */
  532. purge_kernel_dcache_page_asm((unsigned long)vto);
  533. purge_tlb_start(flags);
  534. pdtlb_kernel(vto);
  535. purge_tlb_end(flags);
  536. preempt_disable();
  537. clear_user_page_asm(vto, vaddr);
  538. preempt_enable();
  539. pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
  540. }
  541. void copy_user_highpage(struct page *to, struct page *from,
  542. unsigned long vaddr, struct vm_area_struct *vma)
  543. {
  544. void *vfrom, *vto;
  545. unsigned long flags;
  546. /* Copy using TMPALIAS region. This has the advantage
  547. that the `from' page doesn't need to be flushed. However,
  548. the `to' page must be flushed in copy_user_page_asm since
  549. it can be used to bring in executable code. */
  550. vfrom = kmap_atomic(from, KM_USER0);
  551. vto = kmap_atomic(to, KM_USER1);
  552. purge_kernel_dcache_page_asm((unsigned long)vto);
  553. purge_tlb_start(flags);
  554. pdtlb_kernel(vto);
  555. pdtlb_kernel(vfrom);
  556. purge_tlb_end(flags);
  557. preempt_disable();
  558. copy_user_page_asm(vto, vfrom, vaddr);
  559. flush_dcache_page_asm(__pa(vto), vaddr);
  560. preempt_enable();
  561. pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
  562. pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
  563. }
  564. #endif /* CONFIG_PARISC_TMPALIAS */