cache.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * arch/sh/mm/pg-mmu.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/highmem.h>
  14. #include <linux/module.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/cacheflush.h>
  17. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  18. unsigned long vaddr, void *dst, const void *src,
  19. unsigned long len)
  20. {
  21. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  22. !test_bit(PG_dcache_dirty, &page->flags)) {
  23. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  24. memcpy(vto, src, len);
  25. kunmap_coherent();
  26. } else {
  27. memcpy(dst, src, len);
  28. if (boot_cpu_data.dcache.n_aliases)
  29. set_bit(PG_dcache_dirty, &page->flags);
  30. }
  31. if (vma->vm_flags & VM_EXEC)
  32. flush_cache_page(vma, vaddr, page_to_pfn(page));
  33. }
  34. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  35. unsigned long vaddr, void *dst, const void *src,
  36. unsigned long len)
  37. {
  38. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  39. !test_bit(PG_dcache_dirty, &page->flags)) {
  40. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  41. memcpy(dst, vfrom, len);
  42. kunmap_coherent();
  43. } else {
  44. memcpy(dst, src, len);
  45. if (boot_cpu_data.dcache.n_aliases)
  46. set_bit(PG_dcache_dirty, &page->flags);
  47. }
  48. }
  49. void copy_user_highpage(struct page *to, struct page *from,
  50. unsigned long vaddr, struct vm_area_struct *vma)
  51. {
  52. void *vfrom, *vto;
  53. vto = kmap_atomic(to, KM_USER1);
  54. if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
  55. !test_bit(PG_dcache_dirty, &from->flags)) {
  56. vfrom = kmap_coherent(from, vaddr);
  57. copy_page(vto, vfrom);
  58. kunmap_coherent();
  59. } else {
  60. vfrom = kmap_atomic(from, KM_USER0);
  61. copy_page(vto, vfrom);
  62. kunmap_atomic(vfrom, KM_USER0);
  63. }
  64. if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  65. __flush_wback_region(vto, PAGE_SIZE);
  66. kunmap_atomic(vto, KM_USER1);
  67. /* Make sure this page is cleared on other CPU's too before using it */
  68. smp_wmb();
  69. }
  70. EXPORT_SYMBOL(copy_user_highpage);
  71. void clear_user_highpage(struct page *page, unsigned long vaddr)
  72. {
  73. void *kaddr = kmap_atomic(page, KM_USER0);
  74. clear_page(kaddr);
  75. if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
  76. __flush_wback_region(kaddr, PAGE_SIZE);
  77. kunmap_atomic(kaddr, KM_USER0);
  78. }
  79. EXPORT_SYMBOL(clear_user_highpage);
  80. void __update_cache(struct vm_area_struct *vma,
  81. unsigned long address, pte_t pte)
  82. {
  83. struct page *page;
  84. unsigned long pfn = pte_pfn(pte);
  85. if (!boot_cpu_data.dcache.n_aliases)
  86. return;
  87. page = pfn_to_page(pfn);
  88. if (pfn_valid(pfn) && page_mapping(page)) {
  89. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  90. if (dirty) {
  91. unsigned long addr = (unsigned long)page_address(page);
  92. if (pages_do_alias(addr, address & PAGE_MASK))
  93. __flush_wback_region((void *)addr, PAGE_SIZE);
  94. }
  95. }
  96. }
  97. void __flush_anon_page(struct page *page, unsigned long vmaddr)
  98. {
  99. unsigned long addr = (unsigned long) page_address(page);
  100. if (pages_do_alias(addr, vmaddr)) {
  101. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  102. !test_bit(PG_dcache_dirty, &page->flags)) {
  103. void *kaddr;
  104. kaddr = kmap_coherent(page, vmaddr);
  105. __flush_wback_region((void *)kaddr, PAGE_SIZE);
  106. kunmap_coherent();
  107. } else
  108. __flush_wback_region((void *)addr, PAGE_SIZE);
  109. }
  110. }
  111. void __init cpu_cache_init(void)
  112. {
  113. if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
  114. (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
  115. (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
  116. extern void __weak sh4_cache_init(void);
  117. sh4_cache_init();
  118. }
  119. }