|
@@ -9,6 +9,10 @@
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
#ifndef __ASSEMBLY__
|
|
|
+
|
|
|
+#include <asm/asm.h>
|
|
|
+#include <asm/errno.h>
|
|
|
+
|
|
|
static inline unsigned long long native_read_tscp(int *aux)
|
|
|
{
|
|
|
unsigned long low, high;
|
|
@@ -17,37 +21,36 @@ static inline unsigned long long native_read_tscp(int *aux)
|
|
|
return low | ((u64)high >> 32);
|
|
|
}
|
|
|
|
|
|
-#define rdtscp(low, high, aux) \
|
|
|
- do { \
|
|
|
- unsigned long long _val = native_read_tscp(&(aux)); \
|
|
|
- (low) = (u32)_val; \
|
|
|
- (high) = (u32)(_val >> 32); \
|
|
|
- } while (0)
|
|
|
-
|
|
|
-#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
|
|
|
-#endif
|
|
|
+/*
|
|
|
+ * i386 calling convention returns 64-bit value in edx:eax, while
|
|
|
+ * x86_64 returns at rax. Also, the "A" constraint does not really
|
|
|
+ * mean rdx:rax in x86_64, so we need specialized behaviour for each
|
|
|
+ * architecture
|
|
|
+ */
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+#define DECLARE_ARGS(val, low, high) unsigned low, high
|
|
|
+#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32))
|
|
|
+#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
|
|
|
+#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
|
|
|
+#else
|
|
|
+#define DECLARE_ARGS(val, low, high) unsigned long long val
|
|
|
+#define EAX_EDX_VAL(val, low, high) (val)
|
|
|
+#define EAX_EDX_ARGS(val, low, high) "A" (val)
|
|
|
+#define EAX_EDX_RET(val, low, high) "=A" (val)
|
|
|
#endif
|
|
|
|
|
|
-#ifdef __i386__
|
|
|
-
|
|
|
-#ifdef __KERNEL__
|
|
|
-#ifndef __ASSEMBLY__
|
|
|
-
|
|
|
-#include <asm/asm.h>
|
|
|
-#include <asm/errno.h>
|
|
|
-
|
|
|
static inline unsigned long long native_read_msr(unsigned int msr)
|
|
|
{
|
|
|
- unsigned long long val;
|
|
|
+ DECLARE_ARGS(val, low, high);
|
|
|
|
|
|
- asm volatile("rdmsr" : "=A" (val) : "c" (msr));
|
|
|
- return val;
|
|
|
+ asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
|
|
|
+ return EAX_EDX_VAL(val, low, high);
|
|
|
}
|
|
|
|
|
|
static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
|
|
int *err)
|
|
|
{
|
|
|
- unsigned long long val;
|
|
|
+ DECLARE_ARGS(val, low, high);
|
|
|
|
|
|
asm volatile("2: rdmsr ; xor %0,%0\n"
|
|
|
"1:\n\t"
|
|
@@ -58,10 +61,9 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
|
|
_ASM_ALIGN "\n\t"
|
|
|
_ASM_PTR " 2b,3b\n\t"
|
|
|
".previous"
|
|
|
- : "=r" (*err), "=A" (val)
|
|
|
+ : "=r" (*err), EAX_EDX_RET(val, low, high)
|
|
|
: "c" (msr), "i" (-EFAULT));
|
|
|
-
|
|
|
- return val;
|
|
|
+ return EAX_EDX_VAL(val, low, high);
|
|
|
}
|
|
|
|
|
|
static inline void native_write_msr(unsigned int msr,
|
|
@@ -91,16 +93,18 @@ static inline int native_write_msr_safe(unsigned int msr,
|
|
|
|
|
|
static inline unsigned long long native_read_tsc(void)
|
|
|
{
|
|
|
- unsigned long long val;
|
|
|
- asm volatile("rdtsc" : "=A" (val));
|
|
|
- return val;
|
|
|
+ DECLARE_ARGS(val, low, high);
|
|
|
+
|
|
|
+ asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
|
|
|
+ return EAX_EDX_VAL(val, low, high);
|
|
|
}
|
|
|
|
|
|
static inline unsigned long long native_read_pmc(int counter)
|
|
|
{
|
|
|
- unsigned long long val;
|
|
|
- asm volatile("rdpmc" : "=A" (val) : "c" (counter));
|
|
|
- return val;
|
|
|
+ DECLARE_ARGS(val, low, high);
|
|
|
+
|
|
|
+ asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
|
|
|
+ return EAX_EDX_VAL(val, low, high);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
@@ -128,7 +132,8 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
|
|
#define rdmsrl(msr,val) \
|
|
|
((val) = native_read_msr(msr))
|
|
|
|
|
|
-#define wrmsrl(msr, val) native_write_msr(msr, (u32)val, (u32)(val >> 32))
|
|
|
+#define wrmsrl(msr, val) \
|
|
|
+ native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32))
|
|
|
|
|
|
/* wrmsr with exception handling */
|
|
|
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
|
@@ -160,104 +165,25 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
|
|
(low) = (u32)_l; \
|
|
|
(high) = (u32)(_l >> 32); \
|
|
|
} while(0)
|
|
|
-#endif /* !CONFIG_PARAVIRT */
|
|
|
-
|
|
|
-#endif /* ! __ASSEMBLY__ */
|
|
|
-#endif /* __KERNEL__ */
|
|
|
-
|
|
|
-#else /* __i386__ */
|
|
|
-
|
|
|
-#ifndef __ASSEMBLY__
|
|
|
-#include <linux/errno.h>
|
|
|
-/*
|
|
|
- * Access to machine-specific registers (available on 586 and better only)
|
|
|
- * Note: the rd* operations modify the parameters directly (without using
|
|
|
- * pointer indirection), this allows gcc to optimize better
|
|
|
- */
|
|
|
-
|
|
|
-#define rdmsr(msr,val1,val2) \
|
|
|
- __asm__ __volatile__("rdmsr" \
|
|
|
- : "=a" (val1), "=d" (val2) \
|
|
|
- : "c" (msr))
|
|
|
-
|
|
|
|
|
|
-#define rdmsrl(msr,val) do { unsigned long a__,b__; \
|
|
|
- __asm__ __volatile__("rdmsr" \
|
|
|
- : "=a" (a__), "=d" (b__) \
|
|
|
- : "c" (msr)); \
|
|
|
- val = a__ | (b__<<32); \
|
|
|
-} while(0)
|
|
|
-
|
|
|
-#define wrmsr(msr,val1,val2) \
|
|
|
- __asm__ __volatile__("wrmsr" \
|
|
|
- : /* no outputs */ \
|
|
|
- : "c" (msr), "a" (val1), "d" (val2))
|
|
|
-
|
|
|
-#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
|
|
|
+#define rdtscp(low, high, aux) \
|
|
|
+ do { \
|
|
|
+ unsigned long long _val = native_read_tscp(&(aux)); \
|
|
|
+ (low) = (u32)_val; \
|
|
|
+ (high) = (u32)(_val >> 32); \
|
|
|
+ } while (0)
|
|
|
|
|
|
-#define rdtsc(low,high) \
|
|
|
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
|
|
|
+#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
|
|
|
|
|
|
-#define rdtscl(low) \
|
|
|
- __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
|
|
|
+#endif /* !CONFIG_PARAVIRT */
|
|
|
|
|
|
|
|
|
-#define rdtscll(val) do { \
|
|
|
- unsigned int __a,__d; \
|
|
|
- __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
|
|
|
- (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
|
|
|
-} while(0)
|
|
|
+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
|
|
|
|
|
|
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
|
|
|
|
|
#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
|
|
|
|
|
|
-#define rdpmc(counter,low,high) \
|
|
|
- __asm__ __volatile__("rdpmc" \
|
|
|
- : "=a" (low), "=d" (high) \
|
|
|
- : "c" (counter))
|
|
|
-
|
|
|
-
|
|
|
-#ifdef __KERNEL__
|
|
|
-
|
|
|
-/* wrmsr with exception handling */
|
|
|
-#define wrmsr_safe(msr,a,b) ({ int ret__; \
|
|
|
- asm volatile("2: wrmsr ; xorl %0,%0\n" \
|
|
|
- "1:\n\t" \
|
|
|
- ".section .fixup,\"ax\"\n\t" \
|
|
|
- "3: movl %4,%0 ; jmp 1b\n\t" \
|
|
|
- ".previous\n\t" \
|
|
|
- ".section __ex_table,\"a\"\n" \
|
|
|
- " .align 8\n\t" \
|
|
|
- " .quad 2b,3b\n\t" \
|
|
|
- ".previous" \
|
|
|
- : "=a" (ret__) \
|
|
|
- : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
|
|
|
- ret__; })
|
|
|
-
|
|
|
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
|
|
|
-
|
|
|
-#define rdmsr_safe(msr,a,b) \
|
|
|
- ({ int ret__; \
|
|
|
- asm volatile ("1: rdmsr\n" \
|
|
|
- "2:\n" \
|
|
|
- ".section .fixup,\"ax\"\n" \
|
|
|
- "3: movl %4,%0\n" \
|
|
|
- " jmp 2b\n" \
|
|
|
- ".previous\n" \
|
|
|
- ".section __ex_table,\"a\"\n" \
|
|
|
- " .align 8\n" \
|
|
|
- " .quad 1b,3b\n" \
|
|
|
- ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
|
|
|
- :"c"(msr), "i"(-EIO), "0"(0)); \
|
|
|
- ret__; })
|
|
|
-
|
|
|
-#endif /* __ASSEMBLY__ */
|
|
|
-
|
|
|
-#endif /* !__i386__ */
|
|
|
-
|
|
|
-#ifndef __ASSEMBLY__
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
|
|
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
|
@@ -281,7 +207,8 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
|
return wrmsr_safe(msr_no, l, h);
|
|
|
}
|
|
|
#endif /* CONFIG_SMP */
|
|
|
-#endif /* __KERNEL__ */
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
+#endif /* __KERNEL__ */
|
|
|
+
|
|
|
|
|
|
#endif
|