desc.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. #ifndef __ARCH_DESC_H
  2. #define __ARCH_DESC_H
  3. #include <asm/ldt.h>
  4. #include <asm/segment.h>
  5. #define CPU_16BIT_STACK_SIZE 1024
  6. #ifndef __ASSEMBLY__
  7. #include <linux/preempt.h>
  8. #include <linux/smp.h>
  9. #include <linux/percpu.h>
  10. #include <asm/mmu.h>
  11. extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
  12. DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
  13. struct Xgt_desc_struct {
  14. unsigned short size;
  15. unsigned long address __attribute__((packed));
  16. unsigned short pad;
  17. } __attribute__ ((packed));
  18. extern struct Xgt_desc_struct idt_descr;
  19. DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
  20. static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
  21. {
  22. return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
  23. }
  24. #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
  25. #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
  26. #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
  27. #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
  28. #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
  29. #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
  30. #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
  31. #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
  32. #define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
  33. #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
  34. /*
  35. * This is the ldt that every process will get unless we need
  36. * something other than this.
  37. */
  38. extern struct desc_struct default_ldt[];
  39. extern void set_intr_gate(unsigned int irq, void * addr);
  40. #define _set_tssldt_desc(n,addr,limit,type) \
  41. __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
  42. "movw %w1,2(%2)\n\t" \
  43. "rorl $16,%1\n\t" \
  44. "movb %b1,4(%2)\n\t" \
  45. "movb %4,5(%2)\n\t" \
  46. "movb $0,6(%2)\n\t" \
  47. "movb %h1,7(%2)\n\t" \
  48. "rorl $16,%1" \
  49. : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
  50. static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
  51. {
  52. _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
  53. offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
  54. }
  55. #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
  56. static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
  57. {
  58. _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
  59. }
  60. #define LDT_entry_a(info) \
  61. ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
  62. #define LDT_entry_b(info) \
  63. (((info)->base_addr & 0xff000000) | \
  64. (((info)->base_addr & 0x00ff0000) >> 16) | \
  65. ((info)->limit & 0xf0000) | \
  66. (((info)->read_exec_only ^ 1) << 9) | \
  67. ((info)->contents << 10) | \
  68. (((info)->seg_not_present ^ 1) << 15) | \
  69. ((info)->seg_32bit << 22) | \
  70. ((info)->limit_in_pages << 23) | \
  71. ((info)->useable << 20) | \
  72. 0x7000)
  73. #define LDT_empty(info) (\
  74. (info)->base_addr == 0 && \
  75. (info)->limit == 0 && \
  76. (info)->contents == 0 && \
  77. (info)->read_exec_only == 1 && \
  78. (info)->seg_32bit == 0 && \
  79. (info)->limit_in_pages == 0 && \
  80. (info)->seg_not_present == 1 && \
  81. (info)->useable == 0 )
  82. static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
  83. {
  84. __u32 *lp = (__u32 *)((char *)ldt + entry*8);
  85. *lp = entry_a;
  86. *(lp+1) = entry_b;
  87. }
  88. #if TLS_SIZE != 24
  89. # error update this code.
  90. #endif
  91. static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
  92. {
  93. #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
  94. C(0); C(1); C(2);
  95. #undef C
  96. }
  97. static inline void clear_LDT(void)
  98. {
  99. int cpu = get_cpu();
  100. set_ldt_desc(cpu, &default_ldt[0], 5);
  101. load_LDT_desc();
  102. put_cpu();
  103. }
  104. /*
  105. * load one particular LDT into the current CPU
  106. */
  107. static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
  108. {
  109. void *segments = pc->ldt;
  110. int count = pc->size;
  111. if (likely(!count)) {
  112. segments = &default_ldt[0];
  113. count = 5;
  114. }
  115. set_ldt_desc(cpu, segments, count);
  116. load_LDT_desc();
  117. }
  118. static inline void load_LDT(mm_context_t *pc)
  119. {
  120. int cpu = get_cpu();
  121. load_LDT_nolock(pc, cpu);
  122. put_cpu();
  123. }
  124. static inline unsigned long get_desc_base(unsigned long *desc)
  125. {
  126. unsigned long base;
  127. base = ((desc[0] >> 16) & 0x0000ffff) |
  128. ((desc[1] << 16) & 0x00ff0000) |
  129. (desc[1] & 0xff000000);
  130. return base;
  131. }
  132. #endif /* !__ASSEMBLY__ */
  133. #endif