|
@@ -60,13 +60,13 @@ static unsigned long scache_size __read_mostly;
|
|
|
/*
|
|
|
* Dummy cache handling routines for machines without boardcaches
|
|
|
*/
|
|
|
-static void no_sc_noop(void) {}
|
|
|
+static void cache_noop(void) {}
|
|
|
|
|
|
static struct bcache_ops no_sc_ops = {
|
|
|
- .bc_enable = (void *)no_sc_noop,
|
|
|
- .bc_disable = (void *)no_sc_noop,
|
|
|
- .bc_wback_inv = (void *)no_sc_noop,
|
|
|
- .bc_inv = (void *)no_sc_noop
|
|
|
+ .bc_enable = (void *)cache_noop,
|
|
|
+ .bc_disable = (void *)cache_noop,
|
|
|
+ .bc_wback_inv = (void *)cache_noop,
|
|
|
+ .bc_inv = (void *)cache_noop
|
|
|
};
|
|
|
|
|
|
struct bcache_ops *bcops = &no_sc_ops;
|
|
@@ -94,7 +94,9 @@ static inline void r4k_blast_dcache_page_setup(void)
|
|
|
{
|
|
|
unsigned long dc_lsize = cpu_dcache_line_size();
|
|
|
|
|
|
- if (dc_lsize == 16)
|
|
|
+ if (dc_lsize == 0)
|
|
|
+ r4k_blast_dcache_page = (void *)cache_noop;
|
|
|
+ else if (dc_lsize == 16)
|
|
|
r4k_blast_dcache_page = blast_dcache16_page;
|
|
|
else if (dc_lsize == 32)
|
|
|
r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
|
|
@@ -106,7 +108,9 @@ static inline void r4k_blast_dcache_page_indexed_setup(void)
|
|
|
{
|
|
|
unsigned long dc_lsize = cpu_dcache_line_size();
|
|
|
|
|
|
- if (dc_lsize == 16)
|
|
|
+ if (dc_lsize == 0)
|
|
|
+ r4k_blast_dcache_page_indexed = (void *)cache_noop;
|
|
|
+ else if (dc_lsize == 16)
|
|
|
r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
|
|
|
else if (dc_lsize == 32)
|
|
|
r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
|
|
@@ -118,7 +122,9 @@ static inline void r4k_blast_dcache_setup(void)
|
|
|
{
|
|
|
unsigned long dc_lsize = cpu_dcache_line_size();
|
|
|
|
|
|
- if (dc_lsize == 16)
|
|
|
+ if (dc_lsize == 0)
|
|
|
+ r4k_blast_dcache = (void *)cache_noop;
|
|
|
+ else if (dc_lsize == 16)
|
|
|
r4k_blast_dcache = blast_dcache16;
|
|
|
else if (dc_lsize == 32)
|
|
|
r4k_blast_dcache = blast_dcache32;
|
|
@@ -201,7 +207,9 @@ static inline void r4k_blast_icache_page_setup(void)
|
|
|
{
|
|
|
unsigned long ic_lsize = cpu_icache_line_size();
|
|
|
|
|
|
- if (ic_lsize == 16)
|
|
|
+ if (ic_lsize == 0)
|
|
|
+ r4k_blast_icache_page = (void *)cache_noop;
|
|
|
+ else if (ic_lsize == 16)
|
|
|
r4k_blast_icache_page = blast_icache16_page;
|
|
|
else if (ic_lsize == 32)
|
|
|
r4k_blast_icache_page = blast_icache32_page;
|
|
@@ -216,7 +224,9 @@ static inline void r4k_blast_icache_page_indexed_setup(void)
|
|
|
{
|
|
|
unsigned long ic_lsize = cpu_icache_line_size();
|
|
|
|
|
|
- if (ic_lsize == 16)
|
|
|
+ if (ic_lsize == 0)
|
|
|
+ r4k_blast_icache_page_indexed = (void *)cache_noop;
|
|
|
+ else if (ic_lsize == 16)
|
|
|
r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
|
|
|
else if (ic_lsize == 32) {
|
|
|
if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
|
|
@@ -238,7 +248,9 @@ static inline void r4k_blast_icache_setup(void)
|
|
|
{
|
|
|
unsigned long ic_lsize = cpu_icache_line_size();
|
|
|
|
|
|
- if (ic_lsize == 16)
|
|
|
+ if (ic_lsize == 0)
|
|
|
+ r4k_blast_icache = (void *)cache_noop;
|
|
|
+ else if (ic_lsize == 16)
|
|
|
r4k_blast_icache = blast_icache16;
|
|
|
else if (ic_lsize == 32) {
|
|
|
if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
|
|
@@ -258,7 +270,7 @@ static inline void r4k_blast_scache_page_setup(void)
|
|
|
unsigned long sc_lsize = cpu_scache_line_size();
|
|
|
|
|
|
if (scache_size == 0)
|
|
|
- r4k_blast_scache_page = (void *)no_sc_noop;
|
|
|
+ r4k_blast_scache_page = (void *)cache_noop;
|
|
|
else if (sc_lsize == 16)
|
|
|
r4k_blast_scache_page = blast_scache16_page;
|
|
|
else if (sc_lsize == 32)
|
|
@@ -276,7 +288,7 @@ static inline void r4k_blast_scache_page_indexed_setup(void)
|
|
|
unsigned long sc_lsize = cpu_scache_line_size();
|
|
|
|
|
|
if (scache_size == 0)
|
|
|
- r4k_blast_scache_page_indexed = (void *)no_sc_noop;
|
|
|
+ r4k_blast_scache_page_indexed = (void *)cache_noop;
|
|
|
else if (sc_lsize == 16)
|
|
|
r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
|
|
|
else if (sc_lsize == 32)
|
|
@@ -294,7 +306,7 @@ static inline void r4k_blast_scache_setup(void)
|
|
|
unsigned long sc_lsize = cpu_scache_line_size();
|
|
|
|
|
|
if (scache_size == 0)
|
|
|
- r4k_blast_scache = (void *)no_sc_noop;
|
|
|
+ r4k_blast_scache = (void *)cache_noop;
|
|
|
else if (sc_lsize == 16)
|
|
|
r4k_blast_scache = blast_scache16;
|
|
|
else if (sc_lsize == 32)
|
|
@@ -508,7 +520,7 @@ static inline void local_r4k_flush_icache_range(void *args)
|
|
|
unsigned long end = fir_args->end;
|
|
|
|
|
|
if (!cpu_has_ic_fills_f_dc) {
|
|
|
- if (end - start > dcache_size) {
|
|
|
+ if (end - start >= dcache_size) {
|
|
|
r4k_blast_dcache();
|
|
|
} else {
|
|
|
R4600_HIT_CACHEOP_WAR_IMPL;
|
|
@@ -683,10 +695,12 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
|
|
|
unsigned long addr = (unsigned long) arg;
|
|
|
|
|
|
R4600_HIT_CACHEOP_WAR_IMPL;
|
|
|
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
|
|
|
+ if (dc_lsize)
|
|
|
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
|
|
|
if (!cpu_icache_snoops_remote_store && scache_size)
|
|
|
protected_writeback_scache_line(addr & ~(sc_lsize - 1));
|
|
|
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
|
|
|
+ if (ic_lsize)
|
|
|
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
|
|
|
if (MIPS4K_ICACHE_REFILL_WAR) {
|
|
|
__asm__ __volatile__ (
|
|
|
".set push\n\t"
|
|
@@ -973,8 +987,10 @@ static void __init probe_pcache(void)
|
|
|
c->icache.waysize = icache_size / c->icache.ways;
|
|
|
c->dcache.waysize = dcache_size / c->dcache.ways;
|
|
|
|
|
|
- c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
|
|
|
- c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
|
|
|
+ c->icache.sets = c->icache.linesz ?
|
|
|
+ icache_size / (c->icache.linesz * c->icache.ways) : 0;
|
|
|
+ c->dcache.sets = c->dcache.linesz ?
|
|
|
+ dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
|
|
|
|
|
|
/*
|
|
|
* R10000 and R12000 P-caches are odd in a positive way. They're 32kB
|
|
@@ -1259,10 +1275,12 @@ void __init r4k_cache_init(void)
|
|
|
* This code supports virtually indexed processors and will be
|
|
|
* unnecessarily inefficient on physically indexed processors.
|
|
|
*/
|
|
|
- shm_align_mask = max_t( unsigned long,
|
|
|
- c->dcache.sets * c->dcache.linesz - 1,
|
|
|
- PAGE_SIZE - 1);
|
|
|
-
|
|
|
+ if (c->dcache.linesz)
|
|
|
+ shm_align_mask = max_t( unsigned long,
|
|
|
+ c->dcache.sets * c->dcache.linesz - 1,
|
|
|
+ PAGE_SIZE - 1);
|
|
|
+ else
|
|
|
+ shm_align_mask = PAGE_SIZE-1;
|
|
|
flush_cache_all = r4k_flush_cache_all;
|
|
|
__flush_cache_all = r4k___flush_cache_all;
|
|
|
flush_cache_mm = r4k_flush_cache_mm;
|