desc_64.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /* Written 2000 by Andi Kleen */
  2. #ifndef __ARCH_DESC_H
  3. #define __ARCH_DESC_H
  4. #include <linux/threads.h>
  5. #include <asm/ldt.h>
  6. #ifndef __ASSEMBLY__
  7. #include <linux/string.h>
  8. #include <linux/smp.h>
  9. #include <asm/desc_defs.h>
  10. #include <asm/segment.h>
  11. #include <asm/mmu.h>
  12. extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
  13. #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
  14. #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
  15. #define clear_LDT() asm volatile("lldt %w0"::"r" (0))
  16. static inline unsigned long __store_tr(void)
  17. {
  18. unsigned long tr;
  19. asm volatile ("str %w0":"=r" (tr));
  20. return tr;
  21. }
  22. #define store_tr(tr) (tr) = __store_tr()
  23. extern gate_desc idt_table[];
  24. extern struct desc_ptr cpu_gdt_descr[];
  25. static inline void write_ldt_entry(struct desc_struct *ldt,
  26. int entry, void *ptr)
  27. {
  28. memcpy(&ldt[entry], ptr, 8);
  29. }
  30. /* the cpu gdt accessor */
  31. #define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
  32. static inline void load_gdt(const struct desc_ptr *ptr)
  33. {
  34. asm volatile("lgdt %w0"::"m" (*ptr));
  35. }
  36. static inline void store_gdt(struct desc_ptr *ptr)
  37. {
  38. asm("sgdt %w0":"=m" (*ptr));
  39. }
  40. static inline void _set_gate(void *adr, unsigned type, unsigned long func,
  41. unsigned dpl, unsigned ist)
  42. {
  43. gate_desc s;
  44. s.offset_low = PTR_LOW(func);
  45. s.segment = __KERNEL_CS;
  46. s.ist = ist;
  47. s.p = 1;
  48. s.dpl = dpl;
  49. s.zero0 = 0;
  50. s.zero1 = 0;
  51. s.type = type;
  52. s.offset_middle = PTR_MIDDLE(func);
  53. s.offset_high = PTR_HIGH(func);
  54. /*
  55. * does not need to be atomic because it is only done once at
  56. * setup time
  57. */
  58. memcpy(adr, &s, 16);
  59. }
  60. static inline void set_intr_gate(int nr, void *func)
  61. {
  62. BUG_ON((unsigned)nr > 0xFF);
  63. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
  64. }
  65. static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
  66. {
  67. BUG_ON((unsigned)nr > 0xFF);
  68. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
  69. }
  70. static inline void set_system_gate(int nr, void *func)
  71. {
  72. BUG_ON((unsigned)nr > 0xFF);
  73. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
  74. }
  75. static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
  76. {
  77. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
  78. }
  79. static inline void load_idt(const struct desc_ptr *ptr)
  80. {
  81. asm volatile("lidt %w0"::"m" (*ptr));
  82. }
  83. static inline void store_idt(struct desc_ptr *dtr)
  84. {
  85. asm("sidt %w0":"=m" (*dtr));
  86. }
  87. static inline void set_tssldt_descriptor(void *ptr, unsigned long tss,
  88. unsigned type, unsigned size)
  89. {
  90. struct ldttss_desc64 d;
  91. memset(&d, 0, sizeof(d));
  92. d.limit0 = size & 0xFFFF;
  93. d.base0 = PTR_LOW(tss);
  94. d.base1 = PTR_MIDDLE(tss) & 0xFF;
  95. d.type = type;
  96. d.p = 1;
  97. d.limit1 = (size >> 16) & 0xF;
  98. d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
  99. d.base3 = PTR_HIGH(tss);
  100. memcpy(ptr, &d, 16);
  101. }
  102. static inline void set_tss_desc(unsigned cpu, void *addr)
  103. {
  104. /*
  105. * sizeof(unsigned long) coming from an extra "long" at the end
  106. * of the iobitmap. See tss_struct definition in processor.h
  107. *
  108. * -1? seg base+limit should be pointing to the address of the
  109. * last valid byte
  110. */
  111. set_tssldt_descriptor(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS],
  112. (unsigned long)addr, DESC_TSS,
  113. IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
  114. }
  115. static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
  116. {
  117. set_tssldt_descriptor(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
  118. (unsigned long)addr, DESC_LDT, size * 8 - 1);
  119. }
  120. #define LDT_empty(info) (\
  121. (info)->base_addr == 0 && \
  122. (info)->limit == 0 && \
  123. (info)->contents == 0 && \
  124. (info)->read_exec_only == 1 && \
  125. (info)->seg_32bit == 0 && \
  126. (info)->limit_in_pages == 0 && \
  127. (info)->seg_not_present == 1 && \
  128. (info)->useable == 0 && \
  129. (info)->lm == 0)
  130. static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
  131. {
  132. unsigned int i;
  133. u64 *gdt = (u64 *)(get_cpu_gdt_table(cpu) + GDT_ENTRY_TLS_MIN);
  134. for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
  135. gdt[i] = t->tls_array[i];
  136. }
  137. /*
  138. * load one particular LDT into the current CPU
  139. */
  140. static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
  141. {
  142. int count = pc->size;
  143. if (likely(!count)) {
  144. clear_LDT();
  145. return;
  146. }
  147. set_ldt_desc(cpu, pc->ldt, count);
  148. load_LDT_desc();
  149. }
  150. static inline void load_LDT(mm_context_t *pc)
  151. {
  152. int cpu = get_cpu();
  153. load_LDT_nolock(pc, cpu);
  154. put_cpu();
  155. }
  156. extern struct desc_ptr idt_descr;
  157. static inline unsigned long get_desc_base(const void *ptr)
  158. {
  159. const u32 *desc = ptr;
  160. unsigned long base;
  161. base = ((desc[0] >> 16) & 0x0000ffff) |
  162. ((desc[1] << 16) & 0x00ff0000) |
  163. (desc[1] & 0xff000000);
  164. return base;
  165. }
  166. #endif /* !__ASSEMBLY__ */
  167. #endif