desc_64.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /* Written 2000 by Andi Kleen */
  2. #ifndef __ARCH_DESC_H
  3. #define __ARCH_DESC_H
  4. #include <linux/threads.h>
  5. #include <asm/ldt.h>
  6. #ifndef __ASSEMBLY__
  7. #include <linux/string.h>
  8. #include <linux/smp.h>
  9. #include <asm/segment.h>
  10. extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
  11. #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
  12. #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
  13. static inline unsigned long __store_tr(void)
  14. {
  15. unsigned long tr;
  16. asm volatile ("str %w0":"=r" (tr));
  17. return tr;
  18. }
  19. #define store_tr(tr) (tr) = __store_tr()
  20. extern struct desc_ptr cpu_gdt_descr[];
  21. static inline void write_ldt_entry(struct desc_struct *ldt,
  22. int entry, void *ptr)
  23. {
  24. memcpy(&ldt[entry], ptr, 8);
  25. }
  26. /* the cpu gdt accessor */
  27. #define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
  28. static inline void load_gdt(const struct desc_ptr *ptr)
  29. {
  30. asm volatile("lgdt %w0"::"m" (*ptr));
  31. }
  32. static inline void store_gdt(struct desc_ptr *ptr)
  33. {
  34. asm("sgdt %w0":"=m" (*ptr));
  35. }
  36. static inline void _set_gate(void *adr, unsigned type, unsigned long func,
  37. unsigned dpl, unsigned ist)
  38. {
  39. gate_desc s;
  40. s.offset_low = PTR_LOW(func);
  41. s.segment = __KERNEL_CS;
  42. s.ist = ist;
  43. s.p = 1;
  44. s.dpl = dpl;
  45. s.zero0 = 0;
  46. s.zero1 = 0;
  47. s.type = type;
  48. s.offset_middle = PTR_MIDDLE(func);
  49. s.offset_high = PTR_HIGH(func);
  50. /*
  51. * does not need to be atomic because it is only done once at
  52. * setup time
  53. */
  54. memcpy(adr, &s, 16);
  55. }
  56. static inline void set_intr_gate(int nr, void *func)
  57. {
  58. BUG_ON((unsigned)nr > 0xFF);
  59. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
  60. }
  61. static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
  62. {
  63. BUG_ON((unsigned)nr > 0xFF);
  64. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
  65. }
  66. static inline void set_system_gate(int nr, void *func)
  67. {
  68. BUG_ON((unsigned)nr > 0xFF);
  69. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
  70. }
  71. static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
  72. {
  73. _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
  74. }
  75. static inline void load_idt(const struct desc_ptr *ptr)
  76. {
  77. asm volatile("lidt %w0"::"m" (*ptr));
  78. }
  79. static inline void store_idt(struct desc_ptr *dtr)
  80. {
  81. asm("sidt %w0":"=m" (*dtr));
  82. }
  83. static inline void set_tssldt_descriptor(void *ptr, unsigned long tss,
  84. unsigned type, unsigned size)
  85. {
  86. struct ldttss_desc64 d;
  87. memset(&d, 0, sizeof(d));
  88. d.limit0 = size & 0xFFFF;
  89. d.base0 = PTR_LOW(tss);
  90. d.base1 = PTR_MIDDLE(tss) & 0xFF;
  91. d.type = type;
  92. d.p = 1;
  93. d.limit1 = (size >> 16) & 0xF;
  94. d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
  95. d.base3 = PTR_HIGH(tss);
  96. memcpy(ptr, &d, 16);
  97. }
  98. static inline void set_tss_desc(unsigned cpu, void *addr)
  99. {
  100. /*
  101. * sizeof(unsigned long) coming from an extra "long" at the end
  102. * of the iobitmap. See tss_struct definition in processor.h
  103. *
  104. * -1? seg base+limit should be pointing to the address of the
  105. * last valid byte
  106. */
  107. set_tssldt_descriptor(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS],
  108. (unsigned long)addr, DESC_TSS,
  109. IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
  110. }
  111. static inline void set_ldt(void *addr, int entries)
  112. {
  113. if (likely(entries == 0))
  114. __asm__ __volatile__("lldt %w0"::"q" (0));
  115. else {
  116. unsigned cpu = smp_processor_id();
  117. set_tssldt_descriptor(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
  118. (unsigned long)addr, DESC_LDT, entries * 8 - 1);
  119. __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
  120. }
  121. }
  122. static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
  123. {
  124. unsigned int i;
  125. u64 *gdt = (u64 *)(get_cpu_gdt_table(cpu) + GDT_ENTRY_TLS_MIN);
  126. for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
  127. gdt[i] = t->tls_array[i];
  128. }
  129. static inline unsigned long get_desc_base(const void *ptr)
  130. {
  131. const u32 *desc = ptr;
  132. unsigned long base;
  133. base = ((desc[0] >> 16) & 0x0000ffff) |
  134. ((desc[1] << 16) & 0x00ff0000) |
  135. (desc[1] & 0xff000000);
  136. return base;
  137. }
  138. #endif /* !__ASSEMBLY__ */
  139. #endif