cache-flush-icache.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /* Flush dcache and invalidate icache when the dcache is in writeback mode
  2. *
  3. * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/mm.h>
  13. #include <asm/cacheflush.h>
  14. /**
  15. * flush_icache_page - Flush a page from the dcache and invalidate the icache
  16. * @vma: The VMA the page is part of.
  17. * @page: The page to be flushed.
  18. *
  19. * Write a page back from the dcache and invalidate the icache so that we can
  20. * run code from it that we've just written into it
  21. */
  22. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  23. {
  24. unsigned long start = page_to_phys(page);
  25. mn10300_dcache_flush_page(start);
  26. mn10300_icache_inv_page(start);
  27. }
  28. EXPORT_SYMBOL(flush_icache_page);
  29. /**
  30. * flush_icache_page_range - Flush dcache and invalidate icache for part of a
  31. * single page
  32. * @start: The starting virtual address of the page part.
  33. * @end: The ending virtual address of the page part.
  34. *
  35. * Flush the dcache and invalidate the icache for part of a single page, as
  36. * determined by the virtual addresses given. The page must be in the paged
  37. * area.
  38. */
  39. static void flush_icache_page_range(unsigned long start, unsigned long end)
  40. {
  41. unsigned long addr, size, off;
  42. struct page *page;
  43. pgd_t *pgd;
  44. pud_t *pud;
  45. pmd_t *pmd;
  46. pte_t *ppte, pte;
  47. /* work out how much of the page to flush */
  48. off = start & ~PAGE_MASK;
  49. size = end - start;
  50. /* get the physical address the page is mapped to from the page
  51. * tables */
  52. pgd = pgd_offset(current->mm, start);
  53. if (!pgd || !pgd_val(*pgd))
  54. return;
  55. pud = pud_offset(pgd, start);
  56. if (!pud || !pud_val(*pud))
  57. return;
  58. pmd = pmd_offset(pud, start);
  59. if (!pmd || !pmd_val(*pmd))
  60. return;
  61. ppte = pte_offset_map(pmd, start);
  62. if (!ppte)
  63. return;
  64. pte = *ppte;
  65. pte_unmap(ppte);
  66. if (pte_none(pte))
  67. return;
  68. page = pte_page(pte);
  69. if (!page)
  70. return;
  71. addr = page_to_phys(page);
  72. /* flush the dcache and invalidate the icache coverage on that
  73. * region */
  74. mn10300_dcache_flush_range2(addr + off, size);
  75. mn10300_icache_inv_range2(addr + off, size);
  76. }
  77. /**
  78. * flush_icache_range - Globally flush dcache and invalidate icache for region
  79. * @start: The starting virtual address of the region.
  80. * @end: The ending virtual address of the region.
  81. *
  82. * This is used by the kernel to globally flush some code it has just written
  83. * from the dcache back to RAM and then to globally invalidate the icache over
  84. * that region so that that code can be run on all CPUs in the system.
  85. */
  86. void flush_icache_range(unsigned long start, unsigned long end)
  87. {
  88. unsigned long start_page, end_page;
  89. if (end > 0x80000000UL) {
  90. /* addresses above 0xa0000000 do not go through the cache */
  91. if (end > 0xa0000000UL) {
  92. end = 0xa0000000UL;
  93. if (start >= end)
  94. return;
  95. }
  96. /* kernel addresses between 0x80000000 and 0x9fffffff do not
  97. * require page tables, so we just map such addresses
  98. * directly */
  99. start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
  100. mn10300_dcache_flush_range(start_page, end);
  101. mn10300_icache_inv_range(start_page, end);
  102. if (start_page == start)
  103. return;
  104. end = start_page;
  105. }
  106. start_page = start & PAGE_MASK;
  107. end_page = end & PAGE_MASK;
  108. if (start_page == end_page) {
  109. /* the first and last bytes are on the same page */
  110. flush_icache_page_range(start, end);
  111. } else if (start_page + 1 == end_page) {
  112. /* split over two virtually contiguous pages */
  113. flush_icache_page_range(start, end_page);
  114. flush_icache_page_range(end_page, end);
  115. } else {
  116. /* more than 2 pages; just flush the entire cache */
  117. mn10300_dcache_flush();
  118. mn10300_icache_inv();
  119. }
  120. }
  121. EXPORT_SYMBOL(flush_icache_range);