123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255 |
- /*
- * Cache control for MicroBlaze cache memories
- *
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- */
- #include <asm/cacheflush.h>
- #include <linux/cache.h>
- #include <asm/cpuinfo.h>
- /* Exported functions */
- void _enable_icache(void)
- {
- if (cpuinfo.use_icache) {
- #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory");
- #else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
- #endif
- }
- }
- void _disable_icache(void)
- {
- if (cpuinfo.use_icache) {
- #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory");
- #else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
- #endif
- }
- }
- void _invalidate_icache(unsigned int addr)
- {
- if (cpuinfo.use_icache) {
- __asm__ __volatile__ (" \
- wic %0, r0" \
- : \
- : "r" (addr));
- }
- }
- void _enable_dcache(void)
- {
- if (cpuinfo.use_dcache) {
- #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
- #else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
- #endif
- }
- }
- void _disable_dcache(void)
- {
- #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
- #else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
- #endif
- }
- void _invalidate_dcache(unsigned int addr)
- {
- __asm__ __volatile__ (" \
- wdc %0, r0" \
- : \
- : "r" (addr));
- }
- void __invalidate_icache_all(void)
- {
- unsigned int i;
- unsigned flags;
- if (cpuinfo.use_icache) {
- local_irq_save(flags);
- __disable_icache();
- /* Just loop through cache size and invalidate, no need to add
- CACHE_BASE address */
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line)
- __invalidate_icache(i);
- __enable_icache();
- local_irq_restore(flags);
- }
- }
- void __invalidate_icache_range(unsigned long start, unsigned long end)
- {
- unsigned int i;
- unsigned flags;
- unsigned int align;
- if (cpuinfo.use_icache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.icache_size, end);
- align = ~(cpuinfo.icache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.icache_line);
- local_irq_save(flags);
- __disable_icache();
- for (i = start; i < end; i += cpuinfo.icache_line)
- __invalidate_icache(i);
- __enable_icache();
- local_irq_restore(flags);
- }
- }
- void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
- {
- __invalidate_icache_all();
- }
- void __invalidate_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
- {
- __invalidate_icache_all();
- }
- void __invalidate_cache_sigtramp(unsigned long addr)
- {
- __invalidate_icache_range(addr, addr + 8);
- }
- void __invalidate_dcache_all(void)
- {
- unsigned int i;
- unsigned flags;
- if (cpuinfo.use_dcache) {
- local_irq_save(flags);
- __disable_dcache();
- /*
- * Just loop through cache size and invalidate,
- * no need to add CACHE_BASE address
- */
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
- __enable_dcache();
- local_irq_restore(flags);
- }
- }
- void __invalidate_dcache_range(unsigned long start, unsigned long end)
- {
- unsigned int i;
- unsigned flags;
- unsigned int align;
- if (cpuinfo.use_dcache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.dcache_size, end);
- align = ~(cpuinfo.dcache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.dcache_line);
- local_irq_save(flags);
- __disable_dcache();
- for (i = start; i < end; i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
- __enable_dcache();
- local_irq_restore(flags);
- }
- }
- void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
- {
- __invalidate_dcache_all();
- }
- void __invalidate_dcache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
- {
- __invalidate_dcache_all();
- }
|