pg-sh4.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * arch/sh/mm/pg-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2007 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/highmem.h>
  14. #include <linux/module.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/cacheflush.h>
  17. #define kmap_get_fixmap_pte(vaddr) \
  18. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
  19. static pte_t *kmap_coherent_pte;
  20. void __init kmap_coherent_init(void)
  21. {
  22. unsigned long vaddr;
  23. /* cache the first coherent kmap pte */
  24. vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
  25. kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
  26. }
  27. static inline void *kmap_coherent(struct page *page, unsigned long addr)
  28. {
  29. enum fixed_addresses idx;
  30. unsigned long vaddr, flags;
  31. pte_t pte;
  32. inc_preempt_count();
  33. idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
  34. vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  35. pte = mk_pte(page, PAGE_KERNEL);
  36. local_irq_save(flags);
  37. flush_tlb_one(get_asid(), vaddr);
  38. local_irq_restore(flags);
  39. update_mmu_cache(NULL, vaddr, pte);
  40. set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
  41. return (void *)vaddr;
  42. }
  43. static inline void kunmap_coherent(struct page *page)
  44. {
  45. dec_preempt_count();
  46. preempt_check_resched();
  47. }
  48. /*
  49. * clear_user_page
  50. * @to: P1 address
  51. * @address: U0 address to be mapped
  52. * @page: page (virt_to_page(to))
  53. */
  54. void clear_user_page(void *to, unsigned long address, struct page *page)
  55. {
  56. clear_page(to);
  57. if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
  58. __flush_wback_region(to, PAGE_SIZE);
  59. }
  60. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  61. unsigned long vaddr, void *dst, const void *src,
  62. unsigned long len)
  63. {
  64. if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
  65. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  66. memcpy(vto, src, len);
  67. kunmap_coherent(vto);
  68. } else {
  69. memcpy(dst, src, len);
  70. set_bit(PG_dcache_dirty, &page->flags);
  71. }
  72. if (vma->vm_flags & VM_EXEC)
  73. flush_cache_page(vma, vaddr, page_to_pfn(page));
  74. }
  75. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  76. unsigned long vaddr, void *dst, const void *src,
  77. unsigned long len)
  78. {
  79. if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
  80. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  81. memcpy(dst, vfrom, len);
  82. kunmap_coherent(vfrom);
  83. } else {
  84. memcpy(dst, src, len);
  85. set_bit(PG_dcache_dirty, &page->flags);
  86. }
  87. }
  88. void copy_user_highpage(struct page *to, struct page *from,
  89. unsigned long vaddr, struct vm_area_struct *vma)
  90. {
  91. void *vfrom, *vto;
  92. vto = kmap_atomic(to, KM_USER1);
  93. if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
  94. vfrom = kmap_coherent(from, vaddr);
  95. copy_page(vto, vfrom);
  96. kunmap_coherent(vfrom);
  97. } else {
  98. vfrom = kmap_atomic(from, KM_USER0);
  99. copy_page(vto, vfrom);
  100. kunmap_atomic(vfrom, KM_USER0);
  101. }
  102. if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  103. __flush_wback_region(vto, PAGE_SIZE);
  104. kunmap_atomic(vto, KM_USER1);
  105. /* Make sure this page is cleared on other CPU's too before using it */
  106. smp_wmb();
  107. }
  108. EXPORT_SYMBOL(copy_user_highpage);