Browse Source

ARC: [mm] refactor the core (i|d)cache line ops loops

Nothing semantical
* simplify the alignement code by using & operation only
* rename variables clearly as paddr

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Vineet Gupta 12 years ago
parent
commit
a690984d60
1 changed files with 24 additions and 29 deletions
  1. 24 29
      arch/arc/mm/cache_arc700.c

+ 24 - 29
arch/arc/mm/cache_arc700.c

@@ -270,21 +270,20 @@ static inline void __dc_entire_op(const int cacheop)
  * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  * It's sole purpose is to help gcc generate ZOL
  */
-static inline void __dc_line_loop(unsigned long start, unsigned long sz,
-					  int aux_reg)
+static inline void __dc_line_loop(unsigned long paddr, unsigned long sz,
+				  int aux_reg)
 {
-	int num_lines, slack;
+	int num_lines;
 
 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
-	 * and have @start - aligned to cache line and integral @num_lines.
+	 * and have @paddr - aligned to cache line and integral @num_lines.
 	 * This however can be avoided for page sized since:
-	 *  -@start will be cache-line aligned already (being page aligned)
+	 *  -@paddr will be cache-line aligned already (being page aligned)
 	 *  -@sz will be integral multiple of line size (being page sized).
 	 */
 	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-		slack = start & ~DCACHE_LINE_MASK;
-		sz += slack;
-		start -= slack;
+		sz += paddr & ~DCACHE_LINE_MASK;
+		paddr &= DCACHE_LINE_MASK;
 	}
 
 	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
@@ -298,17 +297,17 @@ static inline void __dc_line_loop(unsigned long start, unsigned long sz,
 		 * doesn't support aliasing configs for D$, yet.
 		 * Thus paddr is enough to provide both tag and index.
 		 */
-		write_aux_reg(ARC_REG_DC_PTAG, start);
+		write_aux_reg(ARC_REG_DC_PTAG, paddr);
 #endif
-		write_aux_reg(aux_reg, start);
-		start += ARC_DCACHE_LINE_LEN;
+		write_aux_reg(aux_reg, paddr);
+		paddr += ARC_DCACHE_LINE_LEN;
 	}
 }
 
 /*
  * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long start, unsigned long sz,
+static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
 					const int cacheop)
 {
 	unsigned long flags, tmp = tmp;
@@ -332,7 +331,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 	else
 		aux = ARC_REG_DC_FLDL;
 
-	__dc_line_loop(start, sz, aux);
+	__dc_line_loop(paddr, sz, aux);
 
 	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
 		wait_for_flush();
@@ -347,7 +346,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 #else
 
 #define __dc_entire_op(cacheop)
-#define __dc_line_op(start, sz, cacheop)
+#define __dc_line_op(paddr, sz, cacheop)
 
 #endif /* CONFIG_ARC_HAS_DCACHE */
 
@@ -399,49 +398,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
 				unsigned long sz)
 {
 	unsigned long flags;
-	int num_lines, slack;
-	unsigned int addr;
+	int num_lines;
 
 	/*
 	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
 	 * However page sized flushes can be compile time optimised.
-	 *  -@phy_start will be cache-line aligned already (being page aligned)
+	 *  -@paddr will be cache-line aligned already (being page aligned)
 	 *  -@sz will be integral multiple of line size (being page sized).
 	 */
 	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-		slack = phy_start & ~ICACHE_LINE_MASK;
-		sz += slack;
-		phy_start -= slack;
+		sz += paddr & ~ICACHE_LINE_MASK;
+		paddr &= ICACHE_LINE_MASK;
+		vaddr &= ICACHE_LINE_MASK;
 	}
 
 	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
 
-#if (CONFIG_ARC_MMU_VER > 2)
-	vaddr &= ICACHE_LINE_MASK;
-	addr = phy_start;
-#else
+#if (CONFIG_ARC_MMU_VER <= 2)
 	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
-	addr = phy_start | ((vaddr >> 13) & 0x1F);
+	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
 #endif
 
 	local_irq_save(flags);
 	while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
 		/* tag comes from phy addr */
-		write_aux_reg(ARC_REG_IC_PTAG, addr);
+		write_aux_reg(ARC_REG_IC_PTAG, paddr);
 
 		/* index bits come from vaddr */
 		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
 		vaddr += ARC_ICACHE_LINE_LEN;
 #else
 		/* paddr contains stuffed vaddrs bits */
-		write_aux_reg(ARC_REG_IC_IVIL, addr);
+		write_aux_reg(ARC_REG_IC_IVIL, paddr);
 #endif
-		addr += ARC_ICACHE_LINE_LEN;
+		paddr += ARC_ICACHE_LINE_LEN;
 	}
 	local_irq_restore(flags);
 }