|
@@ -1,32 +1,30 @@
|
|
|
-#include <linux/types.h>
|
|
|
-#include <linux/string.h>
|
|
|
-#include <linux/sched.h>
|
|
|
-#include <linux/hardirq.h>
|
|
|
-#include <linux/module.h>
|
|
|
-
|
|
|
-#include <asm/asm.h>
|
|
|
-#include <asm/i387.h>
|
|
|
-
|
|
|
-
|
|
|
/*
|
|
|
* MMX 3DNow! library helper functions
|
|
|
*
|
|
|
* To do:
|
|
|
- * We can use MMX just for prefetch in IRQ's. This may be a win.
|
|
|
+ * We can use MMX just for prefetch in IRQ's. This may be a win.
|
|
|
* (reported so on K6-III)
|
|
|
* We should use a better code neutral filler for the short jump
|
|
|
* leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
|
|
|
* We also want to clobber the filler register so we don't get any
|
|
|
- * register forwarding stalls on the filler.
|
|
|
+ * register forwarding stalls on the filler.
|
|
|
*
|
|
|
* Add *user handling. Checksums are not a win with MMX on any CPU
|
|
|
* tested so far for any MMX solution figured.
|
|
|
*
|
|
|
- * 22/09/2000 - Arjan van de Ven
|
|
|
- * Improved for non-egineering-sample Athlons
|
|
|
+ * 22/09/2000 - Arjan van de Ven
|
|
|
+ * Improved for non-egineering-sample Athlons
|
|
|
*
|
|
|
*/
|
|
|
-
|
|
|
+#include <linux/hardirq.h>
|
|
|
+#include <linux/string.h>
|
|
|
+#include <linux/module.h>
|
|
|
+#include <linux/sched.h>
|
|
|
+#include <linux/types.h>
|
|
|
+
|
|
|
+#include <asm/i387.h>
|
|
|
+#include <asm/asm.h>
|
|
|
+
|
|
|
void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
|
{
|
|
|
void *p;
|
|
@@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
|
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from) );
|
|
|
-
|
|
|
-
|
|
|
- for(; i>5; i--)
|
|
|
- {
|
|
|
+ _ASM_EXTABLE(1b, 3b)
|
|
|
+ : : "r" (from));
|
|
|
+
|
|
|
+ for ( ; i > 5; i--) {
|
|
|
__asm__ __volatile__ (
|
|
|
"1: prefetch 320(%0)\n"
|
|
|
"2: movq (%0), %%mm0\n"
|
|
@@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
|
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
|
- from+=64;
|
|
|
- to+=64;
|
|
|
+ _ASM_EXTABLE(1b, 3b)
|
|
|
+ : : "r" (from), "r" (to) : "memory");
|
|
|
+
|
|
|
+ from += 64;
|
|
|
+ to += 64;
|
|
|
}
|
|
|
|
|
|
- for(; i>0; i--)
|
|
|
- {
|
|
|
+ for ( ; i > 0; i--) {
|
|
|
__asm__ __volatile__ (
|
|
|
" movq (%0), %%mm0\n"
|
|
|
" movq 8(%0), %%mm1\n"
|
|
@@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
|
" movq %%mm1, 40(%1)\n"
|
|
|
" movq %%mm2, 48(%1)\n"
|
|
|
" movq %%mm3, 56(%1)\n"
|
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
|
- from+=64;
|
|
|
- to+=64;
|
|
|
+ : : "r" (from), "r" (to) : "memory");
|
|
|
+
|
|
|
+ from += 64;
|
|
|
+ to += 64;
|
|
|
}
|
|
|
/*
|
|
|
- * Now do the tail of the block
|
|
|
+ * Now do the tail of the block:
|
|
|
*/
|
|
|
- __memcpy(to, from, len&63);
|
|
|
+ __memcpy(to, from, len & 63);
|
|
|
kernel_fpu_end();
|
|
|
+
|
|
|
return p;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(_mmx_memcpy);
|
|
|
|
|
|
#ifdef CONFIG_MK7
|
|
|
|
|
@@ -128,13 +127,12 @@ static void fast_clear_page(void *page)
|
|
|
int i;
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
-
|
|
|
+
|
|
|
__asm__ __volatile__ (
|
|
|
" pxor %%mm0, %%mm0\n" : :
|
|
|
);
|
|
|
|
|
|
- for(i=0;i<4096/64;i++)
|
|
|
- {
|
|
|
+ for (i = 0; i < 4096/64; i++) {
|
|
|
__asm__ __volatile__ (
|
|
|
" movntq %%mm0, (%0)\n"
|
|
|
" movntq %%mm0, 8(%0)\n"
|
|
@@ -145,14 +143,15 @@ static void fast_clear_page(void *page)
|
|
|
" movntq %%mm0, 48(%0)\n"
|
|
|
" movntq %%mm0, 56(%0)\n"
|
|
|
: : "r" (page) : "memory");
|
|
|
- page+=64;
|
|
|
+ page += 64;
|
|
|
}
|
|
|
- /* since movntq is weakly-ordered, a "sfence" is needed to become
|
|
|
- * ordered again.
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Since movntq is weakly-ordered, a "sfence" is needed to become
|
|
|
+ * ordered again:
|
|
|
*/
|
|
|
- __asm__ __volatile__ (
|
|
|
- " sfence \n" : :
|
|
|
- );
|
|
|
+ __asm__ __volatile__("sfence\n"::);
|
|
|
+
|
|
|
kernel_fpu_end();
|
|
|
}
|
|
|
|
|
@@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from)
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
|
|
- /* maybe the prefetch stuff can go before the expensive fnsave...
|
|
|
+ /*
|
|
|
+ * maybe the prefetch stuff can go before the expensive fnsave...
|
|
|
* but that is for later. -AV
|
|
|
*/
|
|
|
- __asm__ __volatile__ (
|
|
|
+ __asm__ __volatile__(
|
|
|
"1: prefetch (%0)\n"
|
|
|
" prefetch 64(%0)\n"
|
|
|
" prefetch 128(%0)\n"
|
|
@@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from)
|
|
|
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from) );
|
|
|
+ _ASM_EXTABLE(1b, 3b) : : "r" (from));
|
|
|
|
|
|
- for(i=0; i<(4096-320)/64; i++)
|
|
|
- {
|
|
|
+ for (i = 0; i < (4096-320)/64; i++) {
|
|
|
__asm__ __volatile__ (
|
|
|
"1: prefetch 320(%0)\n"
|
|
|
"2: movq (%0), %%mm0\n"
|
|
@@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from)
|
|
|
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
|
- from+=64;
|
|
|
- to+=64;
|
|
|
+ _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
|
|
|
+
|
|
|
+ from += 64;
|
|
|
+ to += 64;
|
|
|
}
|
|
|
- for(i=(4096-320)/64; i<4096/64; i++)
|
|
|
- {
|
|
|
+
|
|
|
+ for (i = (4096-320)/64; i < 4096/64; i++) {
|
|
|
__asm__ __volatile__ (
|
|
|
"2: movq (%0), %%mm0\n"
|
|
|
" movntq %%mm0, (%1)\n"
|
|
@@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from)
|
|
|
" movntq %%mm6, 48(%1)\n"
|
|
|
" movq 56(%0), %%mm7\n"
|
|
|
" movntq %%mm7, 56(%1)\n"
|
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
|
- from+=64;
|
|
|
- to+=64;
|
|
|
+ : : "r" (from), "r" (to) : "memory");
|
|
|
+ from += 64;
|
|
|
+ to += 64;
|
|
|
}
|
|
|
- /* since movntq is weakly-ordered, a "sfence" is needed to become
|
|
|
- * ordered again.
|
|
|
+ /*
|
|
|
+ * Since movntq is weakly-ordered, a "sfence" is needed to become
|
|
|
+ * ordered again:
|
|
|
*/
|
|
|
- __asm__ __volatile__ (
|
|
|
- " sfence \n" : :
|
|
|
- );
|
|
|
+ __asm__ __volatile__("sfence \n"::);
|
|
|
kernel_fpu_end();
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
+#else /* CONFIG_MK7 */
|
|
|
|
|
|
/*
|
|
|
* Generic MMX implementation without K7 specific streaming
|
|
|
*/
|
|
|
-
|
|
|
static void fast_clear_page(void *page)
|
|
|
{
|
|
|
int i;
|
|
|
-
|
|
|
+
|
|
|
kernel_fpu_begin();
|
|
|
-
|
|
|
+
|
|
|
__asm__ __volatile__ (
|
|
|
" pxor %%mm0, %%mm0\n" : :
|
|
|
);
|
|
|
|
|
|
- for(i=0;i<4096/128;i++)
|
|
|
- {
|
|
|
+ for (i = 0; i < 4096/128; i++) {
|
|
|
__asm__ __volatile__ (
|
|
|
" movq %%mm0, (%0)\n"
|
|
|
" movq %%mm0, 8(%0)\n"
|
|
@@ -275,8 +270,8 @@ static void fast_clear_page(void *page)
|
|
|
" movq %%mm0, 104(%0)\n"
|
|
|
" movq %%mm0, 112(%0)\n"
|
|
|
" movq %%mm0, 120(%0)\n"
|
|
|
- : : "r" (page) : "memory");
|
|
|
- page+=128;
|
|
|
+ : : "r" (page) : "memory");
|
|
|
+ page += 128;
|
|
|
}
|
|
|
|
|
|
kernel_fpu_end();
|
|
@@ -285,8 +280,7 @@ static void fast_clear_page(void *page)
|
|
|
static void fast_copy_page(void *to, void *from)
|
|
|
{
|
|
|
int i;
|
|
|
-
|
|
|
-
|
|
|
+
|
|
|
kernel_fpu_begin();
|
|
|
|
|
|
__asm__ __volatile__ (
|
|
@@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from)
|
|
|
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from) );
|
|
|
+ _ASM_EXTABLE(1b, 3b) : : "r" (from));
|
|
|
|
|
|
- for(i=0; i<4096/64; i++)
|
|
|
- {
|
|
|
+ for (i = 0; i < 4096/64; i++) {
|
|
|
__asm__ __volatile__ (
|
|
|
"1: prefetch 320(%0)\n"
|
|
|
"2: movq (%0), %%mm0\n"
|
|
@@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from)
|
|
|
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
|
" jmp 2b\n"
|
|
|
".previous\n"
|
|
|
- _ASM_EXTABLE(1b,3b)
|
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
|
- from+=64;
|
|
|
- to+=64;
|
|
|
+ _ASM_EXTABLE(1b, 3b)
|
|
|
+ : : "r" (from), "r" (to) : "memory");
|
|
|
+
|
|
|
+ from += 64;
|
|
|
+ to += 64;
|
|
|
}
|
|
|
kernel_fpu_end();
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-#endif
|
|
|
+#endif /* !CONFIG_MK7 */
|
|
|
|
|
|
/*
|
|
|
- * Favour MMX for page clear and copy.
|
|
|
+ * Favour MMX for page clear and copy:
|
|
|
*/
|
|
|
-
|
|
|
-static void slow_zero_page(void * page)
|
|
|
+static void slow_zero_page(void *page)
|
|
|
{
|
|
|
int d0, d1;
|
|
|
- __asm__ __volatile__( \
|
|
|
- "cld\n\t" \
|
|
|
- "rep ; stosl" \
|
|
|
- : "=&c" (d0), "=&D" (d1)
|
|
|
- :"a" (0),"1" (page),"0" (1024)
|
|
|
- :"memory");
|
|
|
+
|
|
|
+ __asm__ __volatile__(
|
|
|
+ "cld\n\t"
|
|
|
+ "rep ; stosl"
|
|
|
+
|
|
|
+ : "=&c" (d0), "=&D" (d1)
|
|
|
+ :"a" (0), "1" (page), "0" (1024)
|
|
|
+ :"memory");
|
|
|
}
|
|
|
-
|
|
|
-void mmx_clear_page(void * page)
|
|
|
+
|
|
|
+void mmx_clear_page(void *page)
|
|
|
{
|
|
|
- if(unlikely(in_interrupt()))
|
|
|
+ if (unlikely(in_interrupt()))
|
|
|
slow_zero_page(page);
|
|
|
else
|
|
|
fast_clear_page(page);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(mmx_clear_page);
|
|
|
|
|
|
static void slow_copy_page(void *to, void *from)
|
|
|
{
|
|
|
int d0, d1, d2;
|
|
|
- __asm__ __volatile__( \
|
|
|
- "cld\n\t" \
|
|
|
- "rep ; movsl" \
|
|
|
- : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
|
|
|
- : "0" (1024),"1" ((long) to),"2" ((long) from) \
|
|
|
+
|
|
|
+ __asm__ __volatile__(
|
|
|
+ "cld\n\t"
|
|
|
+ "rep ; movsl"
|
|
|
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
|
+ : "0" (1024), "1" ((long) to), "2" ((long) from)
|
|
|
: "memory");
|
|
|
}
|
|
|
-
|
|
|
|
|
|
void mmx_copy_page(void *to, void *from)
|
|
|
{
|
|
|
- if(unlikely(in_interrupt()))
|
|
|
+ if (unlikely(in_interrupt()))
|
|
|
slow_copy_page(to, from);
|
|
|
else
|
|
|
fast_copy_page(to, from);
|
|
|
}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(_mmx_memcpy);
|
|
|
-EXPORT_SYMBOL(mmx_clear_page);
|
|
|
EXPORT_SYMBOL(mmx_copy_page);
|