123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253 |
- /*
- * linux/arch/arm/mm/cache-v7.S
- *
- * Copyright (C) 2001 Deep Blue Solutions Ltd.
- * Copyright (C) 2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This is the "shell" of the ARMv7 processor support.
- */
- #include <linux/linkage.h>
- #include <linux/init.h>
- #include <asm/assembler.h>
- #include "proc-macros.S"
- /*
- * v7_flush_dcache_all()
- *
- * Flush the whole D-cache.
- *
- * Corrupted registers: r0-r5, r7, r9-r11
- *
- * - mm - mm_struct describing address space
- */
- ENTRY(v7_flush_dcache_all)
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 0
- loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt skip @ skip if no cache, or just i-cache
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- loop2:
- mov r9, r4 @ create working copy of max way size
- loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge loop3
- subs r7, r7, #1 @ decrement the index
- bge loop2
- skip:
- add r10, r10, #2 @ increment cache number
- cmp r3, r10
- bgt loop1
- finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb
- mov pc, lr
- /*
- * v7_flush_cache_all()
- *
- * Flush the entire cache system.
- * The data cache flush is now achieved using atomic clean / invalidates
- * working outwards from L1 cache. This is done using Set/Way based cache
- * maintainance instructions.
- * The instruction cache can still be invalidated back to the point of
- * unification in a single instruction.
- *
- */
- ENTRY(v7_flush_kern_cache_all)
- stmfd sp!, {r4-r5, r7, r9-r11, lr}
- bl v7_flush_dcache_all
- mov r0, #0
- mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
- ldmfd sp!, {r4-r5, r7, r9-r11, lr}
- mov pc, lr
- /*
- * v7_flush_cache_all()
- *
- * Flush all TLB entries in a particular address space
- *
- * - mm - mm_struct describing address space
- */
- ENTRY(v7_flush_user_cache_all)
- /*FALLTHROUGH*/
- /*
- * v7_flush_cache_range(start, end, flags)
- *
- * Flush a range of TLB entries in the specified address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vm_area_struct flags describing address space
- *
- * It is assumed that:
- * - we have a VIPT cache.
- */
- ENTRY(v7_flush_user_cache_range)
- mov pc, lr
- /*
- * v7_coherent_kern_range(start,end)
- *
- * Ensure that the I and D caches are coherent within specified
- * region. This is typically used when code has been written to
- * a memory region, and will be executed.
- *
- * - start - virtual start address of region
- * - end - virtual end address of region
- *
- * It is assumed that:
- * - the Icache does not read data from the write buffer
- */
- ENTRY(v7_coherent_kern_range)
- /* FALLTHROUGH */
- /*
- * v7_coherent_user_range(start,end)
- *
- * Ensure that the I and D caches are coherent within specified
- * region. This is typically used when code has been written to
- * a memory region, and will be executed.
- *
- * - start - virtual start address of region
- * - end - virtual end address of region
- *
- * It is assumed that:
- * - the Icache does not read data from the write buffer
- */
- ENTRY(v7_coherent_user_range)
- dcache_line_size r2, r3
- sub r3, r2, #1
- bic r0, r0, r3
- 1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
- dsb
- mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
- add r0, r0, r2
- cmp r0, r1
- blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
- dsb
- isb
- mov pc, lr
- /*
- * v7_flush_kern_dcache_page(kaddr)
- *
- * Ensure that the data held in the page kaddr is written back
- * to the page in question.
- *
- * - kaddr - kernel address (guaranteed to be page aligned)
- */
- ENTRY(v7_flush_kern_dcache_page)
- dcache_line_size r2, r3
- add r1, r0, #PAGE_SZ
- 1:
- mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
- add r0, r0, r2
- cmp r0, r1
- blo 1b
- dsb
- mov pc, lr
- /*
- * v7_dma_inv_range(start,end)
- *
- * Invalidate the data cache within the specified region; we will
- * be performing a DMA operation in this region and we want to
- * purge old data in the cache.
- *
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
- ENTRY(v7_dma_inv_range)
- dcache_line_size r2, r3
- sub r3, r2, #1
- tst r0, r3
- bic r0, r0, r3
- mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
- tst r1, r3
- bic r1, r1, r3
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
- 1:
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
- add r0, r0, r2
- cmp r0, r1
- blo 1b
- dsb
- mov pc, lr
- /*
- * v7_dma_clean_range(start,end)
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
- ENTRY(v7_dma_clean_range)
- dcache_line_size r2, r3
- sub r3, r2, #1
- bic r0, r0, r3
- 1:
- mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
- add r0, r0, r2
- cmp r0, r1
- blo 1b
- dsb
- mov pc, lr
- /*
- * v7_dma_flush_range(start,end)
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
- ENTRY(v7_dma_flush_range)
- dcache_line_size r2, r3
- sub r3, r2, #1
- bic r0, r0, r3
- 1:
- mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
- add r0, r0, r2
- cmp r0, r1
- blo 1b
- dsb
- mov pc, lr
- __INITDATA
- .type v7_cache_fns, #object
- ENTRY(v7_cache_fns)
- .long v7_flush_kern_cache_all
- .long v7_flush_user_cache_all
- .long v7_flush_user_cache_range
- .long v7_coherent_kern_range
- .long v7_coherent_user_range
- .long v7_flush_kern_dcache_page
- .long v7_dma_inv_range
- .long v7_dma_clean_range
- .long v7_dma_flush_range
- .size v7_cache_fns, . - v7_cache_fns
|