pg-sh7705.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /*
  2. * arch/sh/mm/pg-sh7705.c
  3. *
  4. * Copyright (C) 1999, 2000 Niibe Yutaka
  5. * Copyright (C) 2004 Alex Song
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. *
  11. */
  12. #include <linux/init.h>
  13. #include <linux/mman.h>
  14. #include <linux/mm.h>
  15. #include <linux/threads.h>
  16. #include <linux/fs.h>
  17. #include <asm/addrspace.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/processor.h>
  21. #include <asm/cache.h>
  22. #include <asm/io.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/mmu_context.h>
  26. #include <asm/cacheflush.h>
  27. static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
  28. {
  29. unsigned long v;
  30. unsigned long begin, end;
  31. unsigned long p1_begin;
  32. begin = L1_CACHE_ALIGN((unsigned long)virt);
  33. end = L1_CACHE_ALIGN((unsigned long)virt + size);
  34. p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1);
  35. /* do this the slow way as we may not have TLB entries
  36. * for virt yet. */
  37. for (v = begin; v < end; v += L1_CACHE_BYTES) {
  38. unsigned long p;
  39. unsigned long ways, addr;
  40. p = __pa(p1_begin);
  41. ways = current_cpu_data.dcache.ways;
  42. addr = CACHE_OC_ADDRESS_ARRAY;
  43. do {
  44. unsigned long data;
  45. addr |= (v & current_cpu_data.dcache.entry_mask);
  46. data = ctrl_inl(addr);
  47. if ((data & CACHE_PHYSADDR_MASK) ==
  48. (p & CACHE_PHYSADDR_MASK)) {
  49. data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID);
  50. ctrl_outl(data, addr);
  51. }
  52. addr += current_cpu_data.dcache.way_incr;
  53. } while (--ways);
  54. p1_begin += L1_CACHE_BYTES;
  55. }
  56. }
  57. /*
  58. * clear_user_page
  59. * @to: P1 address
  60. * @address: U0 address to be mapped
  61. */
  62. void clear_user_page(void *to, unsigned long address, struct page *pg)
  63. {
  64. struct page *page = virt_to_page(to);
  65. __set_bit(PG_mapped, &page->flags);
  66. if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
  67. clear_page(to);
  68. __flush_wback_region(to, PAGE_SIZE);
  69. } else {
  70. __flush_purge_virtual_region(to,
  71. (void *)(address & 0xfffff000),
  72. PAGE_SIZE);
  73. clear_page(to);
  74. __flush_wback_region(to, PAGE_SIZE);
  75. }
  76. }
  77. /*
  78. * copy_user_page
  79. * @to: P1 address
  80. * @from: P1 address
  81. * @address: U0 address to be mapped
  82. */
  83. void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
  84. {
  85. struct page *page = virt_to_page(to);
  86. __set_bit(PG_mapped, &page->flags);
  87. if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
  88. copy_page(to, from);
  89. __flush_wback_region(to, PAGE_SIZE);
  90. } else {
  91. __flush_purge_virtual_region(to,
  92. (void *)(address & 0xfffff000),
  93. PAGE_SIZE);
  94. copy_page(to, from);
  95. __flush_wback_region(to, PAGE_SIZE);
  96. }
  97. }
  98. /*
  99. * For SH7705, we have our own implementation for ptep_get_and_clear
  100. * Copied from pg-sh4.c
  101. */
  102. pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  103. {
  104. pte_t pte = *ptep;
  105. pte_clear(mm, addr, ptep);
  106. if (!pte_not_present(pte)) {
  107. unsigned long pfn = pte_pfn(pte);
  108. if (pfn_valid(pfn)) {
  109. struct page *page = pfn_to_page(pfn);
  110. struct address_space *mapping = page_mapping(page);
  111. if (!mapping || !mapping_writably_mapped(mapping))
  112. __clear_bit(PG_mapped, &page->flags);
  113. }
  114. }
  115. return pte;
  116. }