pg-sh4.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * arch/sh/mm/pg-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2007 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/mutex.h>
  11. #include <linux/fs.h>
  12. #include <linux/highmem.h>
  13. #include <linux/module.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/cacheflush.h>
  16. #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
  17. static inline void *kmap_coherent(struct page *page, unsigned long addr)
  18. {
  19. enum fixed_addresses idx;
  20. unsigned long vaddr, flags;
  21. pte_t pte;
  22. inc_preempt_count();
  23. idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
  24. vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  25. pte = mk_pte(page, PAGE_KERNEL);
  26. local_irq_save(flags);
  27. flush_tlb_one(get_asid(), vaddr);
  28. local_irq_restore(flags);
  29. update_mmu_cache(NULL, vaddr, pte);
  30. return (void *)vaddr;
  31. }
  32. static inline void kunmap_coherent(struct page *page)
  33. {
  34. dec_preempt_count();
  35. preempt_check_resched();
  36. }
  37. /*
  38. * clear_user_page
  39. * @to: P1 address
  40. * @address: U0 address to be mapped
  41. * @page: page (virt_to_page(to))
  42. */
  43. void clear_user_page(void *to, unsigned long address, struct page *page)
  44. {
  45. __set_bit(PG_mapped, &page->flags);
  46. clear_page(to);
  47. if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
  48. __flush_wback_region(to, PAGE_SIZE);
  49. }
  50. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  51. unsigned long vaddr, void *dst, const void *src,
  52. unsigned long len)
  53. {
  54. void *vto;
  55. __set_bit(PG_mapped, &page->flags);
  56. vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  57. memcpy(vto, src, len);
  58. kunmap_coherent(vto);
  59. if (vma->vm_flags & VM_EXEC)
  60. flush_cache_page(vma, vaddr, page_to_pfn(page));
  61. }
  62. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  63. unsigned long vaddr, void *dst, const void *src,
  64. unsigned long len)
  65. {
  66. void *vfrom;
  67. __set_bit(PG_mapped, &page->flags);
  68. vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  69. memcpy(dst, vfrom, len);
  70. kunmap_coherent(vfrom);
  71. }
  72. void copy_user_highpage(struct page *to, struct page *from,
  73. unsigned long vaddr, struct vm_area_struct *vma)
  74. {
  75. void *vfrom, *vto;
  76. __set_bit(PG_mapped, &to->flags);
  77. vto = kmap_atomic(to, KM_USER1);
  78. vfrom = kmap_coherent(from, vaddr);
  79. copy_page(vto, vfrom);
  80. kunmap_coherent(vfrom);
  81. if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
  82. __flush_wback_region(vto, PAGE_SIZE);
  83. kunmap_atomic(vto, KM_USER1);
  84. /* Make sure this page is cleared on other CPU's too before using it */
  85. smp_wmb();
  86. }
  87. EXPORT_SYMBOL(copy_user_highpage);
  88. /*
  89. * For SH-4, we have our own implementation for ptep_get_and_clear
  90. */
  91. pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  92. {
  93. pte_t pte = *ptep;
  94. pte_clear(mm, addr, ptep);
  95. if (!pte_not_present(pte)) {
  96. unsigned long pfn = pte_pfn(pte);
  97. if (pfn_valid(pfn)) {
  98. struct page *page = pfn_to_page(pfn);
  99. struct address_space *mapping = page_mapping(page);
  100. if (!mapping || !mapping_writably_mapped(mapping))
  101. __clear_bit(PG_mapped, &page->flags);
  102. }
  103. }
  104. return pte;
  105. }