|
@@ -3,6 +3,8 @@
|
|
|
|
|
|
#include <asm/asm.h>
|
|
#include <asm/asm.h>
|
|
|
|
|
|
|
|
+#include <linux/kernel.h>
|
|
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
# include "system_32.h"
|
|
# include "system_32.h"
|
|
#else
|
|
#else
|
|
@@ -38,6 +40,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
|
|
#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
|
|
#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
|
|
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
|
|
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
|
|
|
|
|
|
|
|
+extern void load_gs_index(unsigned);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Load a segment. Fall back on loading the zero
|
|
* Load a segment. Fall back on loading the zero
|
|
* segment if something goes wrong..
|
|
* segment if something goes wrong..
|
|
@@ -72,6 +76,112 @@ static inline unsigned long get_limit(unsigned long segment)
|
|
:"=r" (__limit):"r" (segment));
|
|
:"=r" (__limit):"r" (segment));
|
|
return __limit+1;
|
|
return __limit+1;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static inline void native_clts(void)
|
|
|
|
+{
|
|
|
|
+ asm volatile ("clts");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Volatile isn't enough to prevent the compiler from reordering the
|
|
|
|
+ * read/write functions for the control registers and messing everything up.
|
|
|
|
+ * A memory clobber would solve the problem, but would prevent reordering of
|
|
|
|
+ * all loads stores around it, which can hurt performance. Solution is to
|
|
|
|
+ * use a variable and mimic reads and writes to it to enforce serialization
|
|
|
|
+ */
|
|
|
|
+static unsigned long __force_order;
|
|
|
|
+
|
|
|
|
+static inline unsigned long native_read_cr0(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long val;
|
|
|
|
+ asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
|
|
|
|
+ return val;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void native_write_cr0(unsigned long val)
|
|
|
|
+{
|
|
|
|
+ asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned long native_read_cr2(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long val;
|
|
|
|
+ asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
|
|
|
|
+ return val;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void native_write_cr2(unsigned long val)
|
|
|
|
+{
|
|
|
|
+ asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned long native_read_cr3(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long val;
|
|
|
|
+ asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
|
|
|
|
+ return val;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void native_write_cr3(unsigned long val)
|
|
|
|
+{
|
|
|
|
+ asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned long native_read_cr4(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long val;
|
|
|
|
+ asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
|
|
|
|
+ return val;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned long native_read_cr4_safe(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long val;
|
|
|
|
+ /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
|
|
|
|
+ * exists, so it will never fail. */
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
|
+ asm volatile("1: mov %%cr4, %0 \n"
|
|
|
|
+ "2: \n"
|
|
|
|
+ ".section __ex_table,\"a\" \n"
|
|
|
|
+ ".long 1b,2b \n"
|
|
|
|
+ ".previous \n"
|
|
|
|
+ : "=r" (val), "=m" (__force_order) : "0" (0));
|
|
|
|
+#else
|
|
|
|
+ val = native_read_cr4();
|
|
|
|
+#endif
|
|
|
|
+ return val;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void native_write_cr4(unsigned long val)
|
|
|
|
+{
|
|
|
|
+ asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void native_wbinvd(void)
|
|
|
|
+{
|
|
|
|
+ asm volatile("wbinvd": : :"memory");
|
|
|
|
+}
|
|
|
|
+#ifdef CONFIG_PARAVIRT
|
|
|
|
+#include <asm/paravirt.h>
|
|
|
|
+#else
|
|
|
|
+#define read_cr0() (native_read_cr0())
|
|
|
|
+#define write_cr0(x) (native_write_cr0(x))
|
|
|
|
+#define read_cr2() (native_read_cr2())
|
|
|
|
+#define write_cr2(x) (native_write_cr2(x))
|
|
|
|
+#define read_cr3() (native_read_cr3())
|
|
|
|
+#define write_cr3(x) (native_write_cr3(x))
|
|
|
|
+#define read_cr4() (native_read_cr4())
|
|
|
|
+#define read_cr4_safe() (native_read_cr4_safe())
|
|
|
|
+#define write_cr4(x) (native_write_cr4(x))
|
|
|
|
+#define wbinvd() (native_wbinvd())
|
|
|
|
+
|
|
|
|
+/* Clear the 'TS' bit */
|
|
|
|
+#define clts() (native_clts())
|
|
|
|
+
|
|
|
|
+#endif/* CONFIG_PARAVIRT */
|
|
|
|
+
|
|
|
|
+#define stts() write_cr0(8 | read_cr0())
|
|
|
|
+
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
static inline void clflush(void *__p)
|
|
static inline void clflush(void *__p)
|