pg-mmu.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * arch/sh/mm/pg-mmu.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/highmem.h>
  14. #include <linux/module.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/cacheflush.h>
  17. #define kmap_get_fixmap_pte(vaddr) \
  18. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
  19. static pte_t *kmap_coherent_pte;
  20. void __init kmap_coherent_init(void)
  21. {
  22. #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
  23. unsigned long vaddr;
  24. /* cache the first coherent kmap pte */
  25. vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
  26. kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
  27. #endif
  28. }
  29. static void *kmap_coherent(struct page *page, unsigned long addr)
  30. {
  31. enum fixed_addresses idx;
  32. unsigned long vaddr, flags;
  33. pte_t pte;
  34. BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
  35. inc_preempt_count();
  36. idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
  37. vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  38. pte = mk_pte(page, PAGE_KERNEL);
  39. local_irq_save(flags);
  40. flush_tlb_one(get_asid(), vaddr);
  41. local_irq_restore(flags);
  42. update_mmu_cache(NULL, vaddr, pte);
  43. set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
  44. return (void *)vaddr;
  45. }
  46. static inline void kunmap_coherent(void)
  47. {
  48. dec_preempt_count();
  49. preempt_check_resched();
  50. }
  51. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  52. unsigned long vaddr, void *dst, const void *src,
  53. unsigned long len)
  54. {
  55. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  56. !test_bit(PG_dcache_dirty, &page->flags)) {
  57. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  58. memcpy(vto, src, len);
  59. kunmap_coherent();
  60. } else {
  61. memcpy(dst, src, len);
  62. if (boot_cpu_data.dcache.n_aliases)
  63. set_bit(PG_dcache_dirty, &page->flags);
  64. }
  65. if (vma->vm_flags & VM_EXEC)
  66. flush_cache_page(vma, vaddr, page_to_pfn(page));
  67. }
  68. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  69. unsigned long vaddr, void *dst, const void *src,
  70. unsigned long len)
  71. {
  72. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  73. !test_bit(PG_dcache_dirty, &page->flags)) {
  74. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  75. memcpy(dst, vfrom, len);
  76. kunmap_coherent();
  77. } else {
  78. memcpy(dst, src, len);
  79. if (boot_cpu_data.dcache.n_aliases)
  80. set_bit(PG_dcache_dirty, &page->flags);
  81. }
  82. }
  83. void copy_user_highpage(struct page *to, struct page *from,
  84. unsigned long vaddr, struct vm_area_struct *vma)
  85. {
  86. void *vfrom, *vto;
  87. vto = kmap_atomic(to, KM_USER1);
  88. if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
  89. !test_bit(PG_dcache_dirty, &from->flags)) {
  90. vfrom = kmap_coherent(from, vaddr);
  91. copy_page(vto, vfrom);
  92. kunmap_coherent();
  93. } else {
  94. vfrom = kmap_atomic(from, KM_USER0);
  95. copy_page(vto, vfrom);
  96. kunmap_atomic(vfrom, KM_USER0);
  97. }
  98. if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  99. __flush_wback_region(vto, PAGE_SIZE);
  100. kunmap_atomic(vto, KM_USER1);
  101. /* Make sure this page is cleared on other CPU's too before using it */
  102. smp_wmb();
  103. }
  104. EXPORT_SYMBOL(copy_user_highpage);
  105. void clear_user_highpage(struct page *page, unsigned long vaddr)
  106. {
  107. void *kaddr = kmap_atomic(page, KM_USER0);
  108. clear_page(kaddr);
  109. if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
  110. __flush_wback_region(kaddr, PAGE_SIZE);
  111. kunmap_atomic(kaddr, KM_USER0);
  112. }
  113. EXPORT_SYMBOL(clear_user_highpage);
  114. void __update_cache(struct vm_area_struct *vma,
  115. unsigned long address, pte_t pte)
  116. {
  117. struct page *page;
  118. unsigned long pfn = pte_pfn(pte);
  119. if (!boot_cpu_data.dcache.n_aliases)
  120. return;
  121. page = pfn_to_page(pfn);
  122. if (pfn_valid(pfn) && page_mapping(page)) {
  123. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  124. if (dirty) {
  125. unsigned long addr = (unsigned long)page_address(page);
  126. if (pages_do_alias(addr, address & PAGE_MASK))
  127. __flush_wback_region((void *)addr, PAGE_SIZE);
  128. }
  129. }
  130. }
  131. void __flush_anon_page(struct page *page, unsigned long vmaddr)
  132. {
  133. unsigned long addr = (unsigned long) page_address(page);
  134. if (pages_do_alias(addr, vmaddr)) {
  135. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  136. !test_bit(PG_dcache_dirty, &page->flags)) {
  137. void *kaddr;
  138. kaddr = kmap_coherent(page, vmaddr);
  139. __flush_wback_region((void *)kaddr, PAGE_SIZE);
  140. kunmap_coherent();
  141. } else
  142. __flush_wback_region((void *)addr, PAGE_SIZE);
  143. }
  144. }