cacheflush.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /*
  2. * include/asm-sh/cpu-sh3/cacheflush.h
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef __ASM_CPU_SH3_CACHEFLUSH_H
  11. #define __ASM_CPU_SH3_CACHEFLUSH_H
  12. /*
  13. * Cache flushing:
  14. *
  15. * - flush_cache_all() flushes entire cache
  16. * - flush_cache_mm(mm) flushes the specified mm context's cache lines
  17. * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
  18. * - flush_cache_range(vma, start, end) flushes a range of pages
  19. *
  20. * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
  21. * - flush_icache_range(start, end) flushes(invalidates) a range for icache
  22. * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
  23. *
  24. * Caches are indexed (effectively) by physical address on SH-3, so
  25. * we don't need them.
  26. */
  27. #if defined(CONFIG_SH7705_CACHE_32KB)
  28. /* SH7705 is an SH3 processor with 32KB cache. This has alias issues like the
  29. * SH4. Unlike the SH4 this is a unified cache so we need to do some work
  30. * in mmap when 'exec'ing a new binary
  31. */
  32. /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
  33. #define CACHE_ALIAS 0x00001000
  34. struct page;
  35. struct mm_struct;
  36. struct vm_area_struct;
  37. extern void flush_cache_all(void);
  38. extern void flush_cache_mm(struct mm_struct *mm);
  39. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  40. unsigned long end);
  41. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
  42. extern void flush_dcache_page(struct page *pg);
  43. extern void flush_icache_range(unsigned long start, unsigned long end);
  44. extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
  45. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  46. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  47. /* SH3 has unified cache so no special action needed here */
  48. #define flush_cache_sigtramp(vaddr) do { } while (0)
  49. #define flush_page_to_ram(page) do { } while (0)
  50. #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
  51. #define p3_cache_init() do { } while (0)
  52. #define PG_mapped PG_arch_1
  53. /* We provide our own get_unmapped_area to avoid cache alias issue */
  54. #define HAVE_ARCH_UNMAPPED_AREA
  55. #else
  56. #define flush_cache_all() do { } while (0)
  57. #define flush_cache_mm(mm) do { } while (0)
  58. #define flush_cache_range(vma, start, end) do { } while (0)
  59. #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  60. #define flush_dcache_page(page) do { } while (0)
  61. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  62. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  63. #define flush_icache_range(start, end) do { } while (0)
  64. #define flush_icache_page(vma,pg) do { } while (0)
  65. #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
  66. #define flush_cache_sigtramp(vaddr) do { } while (0)
  67. #define p3_cache_init() do { } while (0)
  68. #define HAVE_ARCH_UNMAPPED_AREA
  69. #endif
  70. #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */