pg-sh4.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. /*
  2. * arch/sh/mm/pg-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2005 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/mutex.h>
  11. #include <asm/mmu_context.h>
  12. #include <asm/cacheflush.h>
  13. extern struct mutex p3map_mutex[];
  14. #define CACHE_ALIAS (cpu_data->dcache.alias_mask)
  15. /*
  16. * clear_user_page
  17. * @to: P1 address
  18. * @address: U0 address to be mapped
  19. * @page: page (virt_to_page(to))
  20. */
  21. void clear_user_page(void *to, unsigned long address, struct page *page)
  22. {
  23. __set_bit(PG_mapped, &page->flags);
  24. if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
  25. clear_page(to);
  26. else {
  27. unsigned long phys_addr = PHYSADDR(to);
  28. unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
  29. pgd_t *pgd = pgd_offset_k(p3_addr);
  30. pud_t *pud = pud_offset(pgd, p3_addr);
  31. pmd_t *pmd = pmd_offset(pud, p3_addr);
  32. pte_t *pte = pte_offset_kernel(pmd, p3_addr);
  33. pte_t entry;
  34. unsigned long flags;
  35. entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
  36. mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
  37. set_pte(pte, entry);
  38. local_irq_save(flags);
  39. __flush_tlb_page(get_asid(), p3_addr);
  40. local_irq_restore(flags);
  41. update_mmu_cache(NULL, p3_addr, entry);
  42. __clear_user_page((void *)p3_addr, to);
  43. pte_clear(&init_mm, p3_addr, pte);
  44. mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
  45. }
  46. }
  47. /*
  48. * copy_user_page
  49. * @to: P1 address
  50. * @from: P1 address
  51. * @address: U0 address to be mapped
  52. * @page: page (virt_to_page(to))
  53. */
  54. void copy_user_page(void *to, void *from, unsigned long address,
  55. struct page *page)
  56. {
  57. __set_bit(PG_mapped, &page->flags);
  58. if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
  59. copy_page(to, from);
  60. else {
  61. unsigned long phys_addr = PHYSADDR(to);
  62. unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
  63. pgd_t *pgd = pgd_offset_k(p3_addr);
  64. pud_t *pud = pud_offset(pgd, p3_addr);
  65. pmd_t *pmd = pmd_offset(pud, p3_addr);
  66. pte_t *pte = pte_offset_kernel(pmd, p3_addr);
  67. pte_t entry;
  68. unsigned long flags;
  69. entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
  70. mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
  71. set_pte(pte, entry);
  72. local_irq_save(flags);
  73. __flush_tlb_page(get_asid(), p3_addr);
  74. local_irq_restore(flags);
  75. update_mmu_cache(NULL, p3_addr, entry);
  76. __copy_user_page((void *)p3_addr, from, to);
  77. pte_clear(&init_mm, p3_addr, pte);
  78. mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
  79. }
  80. }
  81. /*
  82. * For SH-4, we have our own implementation for ptep_get_and_clear
  83. */
  84. inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  85. {
  86. pte_t pte = *ptep;
  87. pte_clear(mm, addr, ptep);
  88. if (!pte_not_present(pte)) {
  89. unsigned long pfn = pte_pfn(pte);
  90. if (pfn_valid(pfn)) {
  91. struct page *page = pfn_to_page(pfn);
  92. struct address_space *mapping = page_mapping(page);
  93. if (!mapping || !mapping_writably_mapped(mapping))
  94. __clear_bit(PG_mapped, &page->flags);
  95. }
  96. }
  97. return pte;
  98. }