interrupts_and_traps.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. #include <linux/uaccess.h>
  2. #include "lg.h"
  3. static unsigned long idt_address(u32 lo, u32 hi)
  4. {
  5. return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
  6. }
  7. static int idt_type(u32 lo, u32 hi)
  8. {
  9. return (hi >> 8) & 0xF;
  10. }
  11. static int idt_present(u32 lo, u32 hi)
  12. {
  13. return (hi & 0x8000);
  14. }
  15. static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
  16. {
  17. *gstack -= 4;
  18. lgwrite_u32(lg, *gstack, val);
  19. }
  20. static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
  21. {
  22. unsigned long gstack;
  23. u32 eflags, ss, irq_enable;
  24. /* If they want a ring change, we use new stack and push old ss/esp */
  25. if ((lg->regs->ss&0x3) != GUEST_PL) {
  26. gstack = guest_pa(lg, lg->esp1);
  27. ss = lg->ss1;
  28. push_guest_stack(lg, &gstack, lg->regs->ss);
  29. push_guest_stack(lg, &gstack, lg->regs->esp);
  30. } else {
  31. gstack = guest_pa(lg, lg->regs->esp);
  32. ss = lg->regs->ss;
  33. }
  34. /* We use IF bit in eflags to indicate whether irqs were disabled
  35. (it's always 0, since irqs are enabled when guest is running). */
  36. eflags = lg->regs->eflags;
  37. if (get_user(irq_enable, &lg->lguest_data->irq_enabled))
  38. irq_enable = 0;
  39. eflags |= (irq_enable & X86_EFLAGS_IF);
  40. push_guest_stack(lg, &gstack, eflags);
  41. push_guest_stack(lg, &gstack, lg->regs->cs);
  42. push_guest_stack(lg, &gstack, lg->regs->eip);
  43. if (has_err)
  44. push_guest_stack(lg, &gstack, lg->regs->errcode);
  45. /* Change the real stack so switcher returns to trap handler */
  46. lg->regs->ss = ss;
  47. lg->regs->esp = gstack + lg->page_offset;
  48. lg->regs->cs = (__KERNEL_CS|GUEST_PL);
  49. lg->regs->eip = idt_address(lo, hi);
  50. /* Disable interrupts for an interrupt gate. */
  51. if (idt_type(lo, hi) == 0xE)
  52. if (put_user(0, &lg->lguest_data->irq_enabled))
  53. kill_guest(lg, "Disabling interrupts");
  54. }
  55. void maybe_do_interrupt(struct lguest *lg)
  56. {
  57. unsigned int irq;
  58. DECLARE_BITMAP(blk, LGUEST_IRQS);
  59. struct desc_struct *idt;
  60. if (!lg->lguest_data)
  61. return;
  62. /* Mask out any interrupts they have blocked. */
  63. if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
  64. sizeof(blk)))
  65. return;
  66. bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
  67. irq = find_first_bit(blk, LGUEST_IRQS);
  68. if (irq >= LGUEST_IRQS)
  69. return;
  70. if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
  71. return;
  72. /* If they're halted, we re-enable interrupts. */
  73. if (lg->halted) {
  74. /* Re-enable interrupts. */
  75. if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
  76. kill_guest(lg, "Re-enabling interrupts");
  77. lg->halted = 0;
  78. } else {
  79. /* Maybe they have interrupts disabled? */
  80. u32 irq_enabled;
  81. if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
  82. irq_enabled = 0;
  83. if (!irq_enabled)
  84. return;
  85. }
  86. idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq];
  87. if (idt_present(idt->a, idt->b)) {
  88. clear_bit(irq, lg->irqs_pending);
  89. set_guest_interrupt(lg, idt->a, idt->b, 0);
  90. }
  91. }
  92. static int has_err(unsigned int trap)
  93. {
  94. return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
  95. }
  96. int deliver_trap(struct lguest *lg, unsigned int num)
  97. {
  98. u32 lo = lg->idt[num].a, hi = lg->idt[num].b;
  99. if (!idt_present(lo, hi))
  100. return 0;
  101. set_guest_interrupt(lg, lo, hi, has_err(num));
  102. return 1;
  103. }
  104. static int direct_trap(const struct lguest *lg,
  105. const struct desc_struct *trap,
  106. unsigned int num)
  107. {
  108. /* Hardware interrupts don't go to guest (except syscall). */
  109. if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR)
  110. return 0;
  111. /* We intercept page fault (demand shadow paging & cr2 saving)
  112. protection fault (in/out emulation) and device not
  113. available (TS handling), and hypercall */
  114. if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY)
  115. return 0;
  116. /* Interrupt gates (0xE) or not present (0x0) can't go direct. */
  117. return idt_type(trap->a, trap->b) == 0xF;
  118. }
  119. void pin_stack_pages(struct lguest *lg)
  120. {
  121. unsigned int i;
  122. for (i = 0; i < lg->stack_pages; i++)
  123. pin_page(lg, lg->esp1 - i * PAGE_SIZE);
  124. }
  125. void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
  126. {
  127. /* You cannot have a stack segment with priv level 0. */
  128. if ((seg & 0x3) != GUEST_PL)
  129. kill_guest(lg, "bad stack segment %i", seg);
  130. if (pages > 2)
  131. kill_guest(lg, "bad stack pages %u", pages);
  132. lg->ss1 = seg;
  133. lg->esp1 = esp;
  134. lg->stack_pages = pages;
  135. pin_stack_pages(lg);
  136. }
  137. /* Set up trap in IDT. */
  138. static void set_trap(struct lguest *lg, struct desc_struct *trap,
  139. unsigned int num, u32 lo, u32 hi)
  140. {
  141. u8 type = idt_type(lo, hi);
  142. if (!idt_present(lo, hi)) {
  143. trap->a = trap->b = 0;
  144. return;
  145. }
  146. if (type != 0xE && type != 0xF)
  147. kill_guest(lg, "bad IDT type %i", type);
  148. trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
  149. trap->b = (hi&0xFFFFEF00);
  150. }
  151. void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
  152. {
  153. /* Guest never handles: NMI, doublefault, hypercall, spurious irq. */
  154. if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
  155. return;
  156. lg->changed |= CHANGED_IDT;
  157. if (num < ARRAY_SIZE(lg->idt))
  158. set_trap(lg, &lg->idt[num], num, lo, hi);
  159. else if (num == SYSCALL_VECTOR)
  160. set_trap(lg, &lg->syscall_idt, num, lo, hi);
  161. }
  162. static void default_idt_entry(struct desc_struct *idt,
  163. int trap,
  164. const unsigned long handler)
  165. {
  166. u32 flags = 0x8e00;
  167. /* They can't "int" into any of them except hypercall. */
  168. if (trap == LGUEST_TRAP_ENTRY)
  169. flags |= (GUEST_PL << 13);
  170. idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
  171. idt->b = (handler&0xFFFF0000) | flags;
  172. }
  173. void setup_default_idt_entries(struct lguest_ro_state *state,
  174. const unsigned long *def)
  175. {
  176. unsigned int i;
  177. for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
  178. default_idt_entry(&state->guest_idt[i], i, def[i]);
  179. }
  180. void copy_traps(const struct lguest *lg, struct desc_struct *idt,
  181. const unsigned long *def)
  182. {
  183. unsigned int i;
  184. /* All hardware interrupts are same whatever the guest: only the
  185. * traps might be different. */
  186. for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) {
  187. if (direct_trap(lg, &lg->idt[i], i))
  188. idt[i] = lg->idt[i];
  189. else
  190. default_idt_entry(&idt[i], i, def[i]);
  191. }
  192. i = SYSCALL_VECTOR;
  193. if (direct_trap(lg, &lg->syscall_idt, i))
  194. idt[i] = lg->syscall_idt;
  195. else
  196. default_idt_entry(&idt[i], i, def[i]);
  197. }
  198. void guest_set_clockevent(struct lguest *lg, unsigned long delta)
  199. {
  200. ktime_t expires;
  201. if (unlikely(delta == 0)) {
  202. /* Clock event device is shutting down. */
  203. hrtimer_cancel(&lg->hrt);
  204. return;
  205. }
  206. expires = ktime_add_ns(ktime_get_real(), delta);
  207. hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
  208. }
  209. static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
  210. {
  211. struct lguest *lg = container_of(timer, struct lguest, hrt);
  212. set_bit(0, lg->irqs_pending);
  213. if (lg->halted)
  214. wake_up_process(lg->tsk);
  215. return HRTIMER_NORESTART;
  216. }
  217. void init_clockdev(struct lguest *lg)
  218. {
  219. hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
  220. lg->hrt.function = clockdev_fn;
  221. }