pg-sh4.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * arch/sh/mm/pg-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2007 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/highmem.h>
  14. #include <linux/module.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/cacheflush.h>
  17. #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
  18. #define kmap_get_fixmap_pte(vaddr) \
  19. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
  20. static pte_t *kmap_coherent_pte;
  21. void __init kmap_coherent_init(void)
  22. {
  23. unsigned long vaddr;
  24. /* cache the first coherent kmap pte */
  25. vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
  26. kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
  27. }
  28. static inline void *kmap_coherent(struct page *page, unsigned long addr)
  29. {
  30. enum fixed_addresses idx;
  31. unsigned long vaddr, flags;
  32. pte_t pte;
  33. inc_preempt_count();
  34. idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
  35. vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  36. pte = mk_pte(page, PAGE_KERNEL);
  37. local_irq_save(flags);
  38. flush_tlb_one(get_asid(), vaddr);
  39. local_irq_restore(flags);
  40. update_mmu_cache(NULL, vaddr, pte);
  41. set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
  42. return (void *)vaddr;
  43. }
  44. static inline void kunmap_coherent(struct page *page)
  45. {
  46. dec_preempt_count();
  47. preempt_check_resched();
  48. }
  49. /*
  50. * clear_user_page
  51. * @to: P1 address
  52. * @address: U0 address to be mapped
  53. * @page: page (virt_to_page(to))
  54. */
  55. void clear_user_page(void *to, unsigned long address, struct page *page)
  56. {
  57. __set_bit(PG_mapped, &page->flags);
  58. clear_page(to);
  59. if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
  60. __flush_wback_region(to, PAGE_SIZE);
  61. }
  62. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  63. unsigned long vaddr, void *dst, const void *src,
  64. unsigned long len)
  65. {
  66. void *vto;
  67. __set_bit(PG_mapped, &page->flags);
  68. vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  69. memcpy(vto, src, len);
  70. kunmap_coherent(vto);
  71. if (vma->vm_flags & VM_EXEC)
  72. flush_cache_page(vma, vaddr, page_to_pfn(page));
  73. }
  74. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  75. unsigned long vaddr, void *dst, const void *src,
  76. unsigned long len)
  77. {
  78. void *vfrom;
  79. __set_bit(PG_mapped, &page->flags);
  80. vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  81. memcpy(dst, vfrom, len);
  82. kunmap_coherent(vfrom);
  83. }
  84. void copy_user_highpage(struct page *to, struct page *from,
  85. unsigned long vaddr, struct vm_area_struct *vma)
  86. {
  87. void *vfrom, *vto;
  88. __set_bit(PG_mapped, &to->flags);
  89. vto = kmap_atomic(to, KM_USER1);
  90. vfrom = kmap_coherent(from, vaddr);
  91. copy_page(vto, vfrom);
  92. kunmap_coherent(vfrom);
  93. if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
  94. __flush_wback_region(vto, PAGE_SIZE);
  95. kunmap_atomic(vto, KM_USER1);
  96. /* Make sure this page is cleared on other CPU's too before using it */
  97. smp_wmb();
  98. }
  99. EXPORT_SYMBOL(copy_user_highpage);
  100. /*
  101. * For SH-4, we have our own implementation for ptep_get_and_clear
  102. */
  103. pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  104. {
  105. pte_t pte = *ptep;
  106. pte_clear(mm, addr, ptep);
  107. if (!pte_not_present(pte)) {
  108. unsigned long pfn = pte_pfn(pte);
  109. if (pfn_valid(pfn)) {
  110. struct page *page = pfn_to_page(pfn);
  111. struct address_space *mapping = page_mapping(page);
  112. if (!mapping || !mapping_writably_mapped(mapping))
  113. __clear_bit(PG_mapped, &page->flags);
  114. }
  115. }
  116. return pte;
  117. }