123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205 |
- #ifndef _ASM_X86_SYSTEM_H_
- #define _ASM_X86_SYSTEM_H_
- #include <asm/asm.h>
- #include <linux/kernel.h>
- #ifdef CONFIG_X86_32
- # include "system_32.h"
- #else
- # include "system_64.h"
- #endif
- #ifdef __KERNEL__
- #define _set_base(addr, base) do { unsigned long __pr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while (0)
- #define _set_limit(addr, limit) do { unsigned long __lr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while (0)
- #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
- #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
- extern void load_gs_index(unsigned);
- /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
- #define loadsegment(seg, value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %k0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "movl %k1, %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- _ASM_ALIGN "\n\t" \
- _ASM_PTR " 1b,3b\n" \
- ".previous" \
- : :"r" (value), "r" (0))
- /*
- * Save a segment register away
- */
- #define savesegment(seg, value) \
- asm volatile("mov %%" #seg ",%0":"=rm" (value))
- static inline unsigned long get_limit(unsigned long segment)
- {
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
- }
- static inline void native_clts(void)
- {
- asm volatile ("clts");
- }
- /*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialization
- */
- static unsigned long __force_order;
- static inline unsigned long native_read_cr0(void)
- {
- unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
- return val;
- }
- static inline void native_write_cr0(unsigned long val)
- {
- asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
- }
- static inline unsigned long native_read_cr2(void)
- {
- unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
- return val;
- }
- static inline void native_write_cr2(unsigned long val)
- {
- asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
- }
- static inline unsigned long native_read_cr3(void)
- {
- unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
- return val;
- }
- static inline void native_write_cr3(unsigned long val)
- {
- asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
- }
- static inline unsigned long native_read_cr4(void)
- {
- unsigned long val;
- asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
- return val;
- }
- static inline unsigned long native_read_cr4_safe(void)
- {
- unsigned long val;
- /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
- * exists, so it will never fail. */
- #ifdef CONFIG_X86_32
- asm volatile("1: mov %%cr4, %0 \n"
- "2: \n"
- ".section __ex_table,\"a\" \n"
- ".long 1b,2b \n"
- ".previous \n"
- : "=r" (val), "=m" (__force_order) : "0" (0));
- #else
- val = native_read_cr4();
- #endif
- return val;
- }
- static inline void native_write_cr4(unsigned long val)
- {
- asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
- }
- static inline void native_wbinvd(void)
- {
- asm volatile("wbinvd": : :"memory");
- }
- #ifdef CONFIG_PARAVIRT
- #include <asm/paravirt.h>
- #else
- #define read_cr0() (native_read_cr0())
- #define write_cr0(x) (native_write_cr0(x))
- #define read_cr2() (native_read_cr2())
- #define write_cr2(x) (native_write_cr2(x))
- #define read_cr3() (native_read_cr3())
- #define write_cr3(x) (native_write_cr3(x))
- #define read_cr4() (native_read_cr4())
- #define read_cr4_safe() (native_read_cr4_safe())
- #define write_cr4(x) (native_write_cr4(x))
- #define wbinvd() (native_wbinvd())
- /* Clear the 'TS' bit */
- #define clts() (native_clts())
- #endif/* CONFIG_PARAVIRT */
- #define stts() write_cr0(8 | read_cr0())
- #endif /* __KERNEL__ */
- static inline void clflush(void *__p)
- {
- asm volatile("clflush %0" : "+m" (*(char __force *)__p));
- }
- #define nop() __asm__ __volatile__ ("nop")
- void disable_hlt(void);
- void enable_hlt(void);
- extern int es7000_plat;
- void cpu_idle_wait(void);
- extern unsigned long arch_align_stack(unsigned long sp);
- extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
- void default_idle(void);
- #endif
|