|
@@ -0,0 +1,35 @@
|
|
|
+#ifndef ASM_EDAC_H
|
|
|
+#define ASM_EDAC_H
|
|
|
+
|
|
|
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
|
|
|
+
|
|
|
+static inline void atomic_scrub(void *va, u32 size)
|
|
|
+{
|
|
|
+ unsigned long *virt_addr = va;
|
|
|
+ unsigned long temp;
|
|
|
+ u32 i;
|
|
|
+
|
|
|
+ for (i = 0; i < size / sizeof(unsigned long); i++, virt_addr++) {
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Very carefully read and write to memory atomically
|
|
|
+ * so we are interrupt, DMA and SMP safe.
|
|
|
+ *
|
|
|
+ * Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
|
|
|
+ */
|
|
|
+
|
|
|
+ __asm__ __volatile__ (
|
|
|
+ " .set mips3 \n"
|
|
|
+ "1: ll %0, %1 # atomic_add \n"
|
|
|
+ " ll %0, %1 # atomic_add \n"
|
|
|
+ " addu %0, $0 \n"
|
|
|
+ " sc %0, %1 \n"
|
|
|
+ " beqz %0, 1b \n"
|
|
|
+ " .set mips0 \n"
|
|
|
+ : "=&r" (temp), "=m" (*virt_addr)
|
|
|
+ : "m" (*virt_addr));
|
|
|
+
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|