desc_32.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. #ifndef __ARCH_DESC_H
  2. #define __ARCH_DESC_H
  3. #include <asm/ldt.h>
  4. #include <asm/segment.h>
  5. #include <asm/desc_defs.h>
  6. #ifndef __ASSEMBLY__
  7. #include <linux/preempt.h>
  8. #include <linux/smp.h>
  9. #include <linux/percpu.h>
  10. struct gdt_page
  11. {
  12. struct desc_struct gdt[GDT_ENTRIES];
  13. } __attribute__((aligned(PAGE_SIZE)));
  14. DECLARE_PER_CPU(struct gdt_page, gdt_page);
  15. static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
  16. {
  17. return per_cpu(gdt_page, cpu).gdt;
  18. }
  19. extern void set_intr_gate(unsigned int irq, void * addr);
  20. static inline void pack_descriptor(struct desc_struct *desc,
  21. unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
  22. {
  23. desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
  24. desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
  25. (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
  26. desc->p = 1;
  27. }
  28. static inline void pack_gate(gate_desc *gate,
  29. unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
  30. {
  31. gate->a = (seg << 16) | (base & 0xffff);
  32. gate->b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
  33. }
  34. #ifdef CONFIG_PARAVIRT
  35. #include <asm/paravirt.h>
  36. #else
  37. #define load_TR_desc() native_load_tr_desc()
  38. #define load_gdt(dtr) native_load_gdt(dtr)
  39. #define load_idt(dtr) native_load_idt(dtr)
  40. #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
  41. #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
  42. #define store_gdt(dtr) native_store_gdt(dtr)
  43. #define store_idt(dtr) native_store_idt(dtr)
  44. #define store_tr(tr) (tr = native_store_tr())
  45. #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
  46. #define load_TLS(t, cpu) native_load_tls(t, cpu)
  47. #define set_ldt native_set_ldt
  48. #define write_ldt_entry(dt, entry, desc) \
  49. native_write_ldt_entry(dt, entry, desc)
  50. #define write_gdt_entry(dt, entry, desc, type) \
  51. native_write_gdt_entry(dt, entry, desc, type)
  52. #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
  53. #endif
  54. static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
  55. const void *desc)
  56. {
  57. memcpy(&ldt[entry], desc, sizeof(struct desc_struct));
  58. }
  59. static inline void native_write_idt_entry(gate_desc *idt, int entry,
  60. const gate_desc *gate)
  61. {
  62. memcpy(&idt[entry], gate, sizeof(*gate));
  63. }
  64. static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
  65. const void *desc, int type)
  66. {
  67. memcpy(&gdt[entry], desc, sizeof(struct desc_struct));
  68. }
  69. static inline void write_dt_entry(struct desc_struct *dt,
  70. int entry, u32 entry_low, u32 entry_high)
  71. {
  72. dt[entry].a = entry_low;
  73. dt[entry].b = entry_high;
  74. }
  75. static inline void native_set_ldt(const void *addr, unsigned int entries)
  76. {
  77. if (likely(entries == 0))
  78. __asm__ __volatile__("lldt %w0"::"q" (0));
  79. else {
  80. unsigned cpu = smp_processor_id();
  81. ldt_desc ldt;
  82. pack_descriptor(&ldt, (unsigned long)addr,
  83. entries * sizeof(struct desc_struct) - 1,
  84. DESC_LDT, 0);
  85. write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
  86. &ldt, DESC_LDT);
  87. __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
  88. }
  89. }
  90. static inline void native_load_tr_desc(void)
  91. {
  92. asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
  93. }
  94. static inline void native_load_gdt(const struct desc_ptr *dtr)
  95. {
  96. asm volatile("lgdt %0"::"m" (*dtr));
  97. }
  98. static inline void native_load_idt(const struct desc_ptr *dtr)
  99. {
  100. asm volatile("lidt %0"::"m" (*dtr));
  101. }
  102. static inline void native_store_gdt(struct desc_ptr *dtr)
  103. {
  104. asm ("sgdt %0":"=m" (*dtr));
  105. }
  106. static inline void native_store_idt(struct desc_ptr *dtr)
  107. {
  108. asm ("sidt %0":"=m" (*dtr));
  109. }
  110. static inline unsigned long native_store_tr(void)
  111. {
  112. unsigned long tr;
  113. asm ("str %0":"=r" (tr));
  114. return tr;
  115. }
  116. static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
  117. {
  118. unsigned int i;
  119. struct desc_struct *gdt = get_cpu_gdt_table(cpu);
  120. for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
  121. gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
  122. }
  123. static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
  124. {
  125. gate_desc g;
  126. pack_gate(&g, (unsigned long)addr, seg, type, 0);
  127. write_idt_entry(idt_table, gate, &g);
  128. }
  129. static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
  130. {
  131. tss_desc tss;
  132. pack_descriptor(&tss, (unsigned long)addr,
  133. offsetof(struct tss_struct, __cacheline_filler) - 1,
  134. DESC_TSS, 0);
  135. write_gdt_entry(get_cpu_gdt_table(cpu), entry, &tss, DESC_TSS);
  136. }
  137. #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
  138. static inline unsigned long get_desc_base(unsigned long *desc)
  139. {
  140. unsigned long base;
  141. base = ((desc[0] >> 16) & 0x0000ffff) |
  142. ((desc[1] << 16) & 0x00ff0000) |
  143. (desc[1] & 0xff000000);
  144. return base;
  145. }
  146. #endif /* !__ASSEMBLY__ */
  147. #endif