ia32_support.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * IA32 helper functions
  3. *
  4. * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
  5. * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2001-2002 Hewlett-Packard Co
  7. * David Mosberger-Tang <davidm@hpl.hp.com>
  8. *
  9. * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
  10. * 02/19/01 D. Mosberger dropped tssd; it's not needed
  11. * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
  12. * 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/mm.h>
  17. #include <linux/personality.h>
  18. #include <linux/sched.h>
  19. #include <asm/intrinsics.h>
  20. #include <asm/page.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/system.h>
  23. #include <asm/processor.h>
  24. #include <asm/uaccess.h>
  25. #include "ia32priv.h"
  26. extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
  27. struct exec_domain ia32_exec_domain;
  28. struct page *ia32_shared_page[NR_CPUS];
  29. unsigned long *ia32_boot_gdt;
  30. unsigned long *cpu_gdt_table[NR_CPUS];
  31. struct page *ia32_gate_page;
  32. static unsigned long
  33. load_desc (u16 selector)
  34. {
  35. unsigned long *table, limit, index;
  36. if (!selector)
  37. return 0;
  38. if (selector & IA32_SEGSEL_TI) {
  39. table = (unsigned long *) IA32_LDT_OFFSET;
  40. limit = IA32_LDT_ENTRIES;
  41. } else {
  42. table = cpu_gdt_table[smp_processor_id()];
  43. limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
  44. }
  45. index = selector >> IA32_SEGSEL_INDEX_SHIFT;
  46. if (index >= limit)
  47. return 0;
  48. return IA32_SEG_UNSCRAMBLE(table[index]);
  49. }
  50. void
  51. ia32_load_segment_descriptors (struct task_struct *task)
  52. {
  53. struct pt_regs *regs = ia64_task_regs(task);
  54. /* Setup the segment descriptors */
  55. regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
  56. regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
  57. regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
  58. regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
  59. regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
  60. regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
  61. }
  62. int
  63. ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
  64. {
  65. struct desc_struct *desc;
  66. struct ia32_user_desc info;
  67. int idx;
  68. if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
  69. return -EFAULT;
  70. if (LDT_empty(&info))
  71. return -EINVAL;
  72. idx = info.entry_number;
  73. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  74. return -EINVAL;
  75. desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  76. desc->a = LDT_entry_a(&info);
  77. desc->b = LDT_entry_b(&info);
  78. /* XXX: can this be done in a cleaner way ? */
  79. load_TLS(&child->thread, smp_processor_id());
  80. ia32_load_segment_descriptors(child);
  81. load_TLS(&current->thread, smp_processor_id());
  82. return 0;
  83. }
  84. void
  85. ia32_save_state (struct task_struct *t)
  86. {
  87. t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
  88. t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
  89. t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
  90. t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
  91. t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
  92. ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
  93. ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
  94. }
  95. void
  96. ia32_load_state (struct task_struct *t)
  97. {
  98. unsigned long eflag, fsr, fcr, fir, fdr, tssd;
  99. struct pt_regs *regs = ia64_task_regs(t);
  100. eflag = t->thread.eflag;
  101. fsr = t->thread.fsr;
  102. fcr = t->thread.fcr;
  103. fir = t->thread.fir;
  104. fdr = t->thread.fdr;
  105. tssd = load_desc(_TSS); /* TSSD */
  106. ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
  107. ia64_setreg(_IA64_REG_AR_FSR, fsr);
  108. ia64_setreg(_IA64_REG_AR_FCR, fcr);
  109. ia64_setreg(_IA64_REG_AR_FIR, fir);
  110. ia64_setreg(_IA64_REG_AR_FDR, fdr);
  111. current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
  112. current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
  113. ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
  114. ia64_set_kr(IA64_KR_TSSD, tssd);
  115. regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
  116. regs->r30 = load_desc(_LDT); /* LDTD */
  117. load_TLS(&t->thread, smp_processor_id());
  118. }
  119. /*
  120. * Setup IA32 GDT and TSS
  121. */
  122. void
  123. ia32_gdt_init (void)
  124. {
  125. int cpu = smp_processor_id();
  126. ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
  127. if (!ia32_shared_page[cpu])
  128. panic("failed to allocate ia32_shared_page[%d]\n", cpu);
  129. cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
  130. /* Copy from the boot cpu's GDT */
  131. memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
  132. }
  133. /*
  134. * Setup IA32 GDT and TSS
  135. */
  136. static void
  137. ia32_boot_gdt_init (void)
  138. {
  139. unsigned long ldt_size;
  140. ia32_shared_page[0] = alloc_page(GFP_KERNEL);
  141. if (!ia32_shared_page[0])
  142. panic("failed to allocate ia32_shared_page[0]\n");
  143. ia32_boot_gdt = page_address(ia32_shared_page[0]);
  144. cpu_gdt_table[0] = ia32_boot_gdt;
  145. /* CS descriptor in IA-32 (scrambled) format */
  146. ia32_boot_gdt[__USER_CS >> 3]
  147. = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
  148. 0xb, 1, 3, 1, 1, 1, 1);
  149. /* DS descriptor in IA-32 (scrambled) format */
  150. ia32_boot_gdt[__USER_DS >> 3]
  151. = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
  152. 0x3, 1, 3, 1, 1, 1, 1);
  153. ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
  154. ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
  155. 0xb, 0, 3, 1, 1, 1, 0);
  156. ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
  157. 0x2, 0, 3, 1, 1, 1, 0);
  158. }
  159. static void
  160. ia32_gate_page_init(void)
  161. {
  162. unsigned long *sr;
  163. ia32_gate_page = alloc_page(GFP_KERNEL);
  164. sr = page_address(ia32_gate_page);
  165. /* This is popl %eax ; movl $,%eax ; int $0x80 */
  166. *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
  167. /* This is movl $,%eax ; int $0x80 */
  168. *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
  169. }
  170. void
  171. ia32_mem_init(void)
  172. {
  173. ia32_boot_gdt_init();
  174. ia32_gate_page_init();
  175. }
  176. /*
  177. * Handle bad IA32 interrupt via syscall
  178. */
  179. void
  180. ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
  181. {
  182. siginfo_t siginfo;
  183. die_if_kernel("Bad IA-32 interrupt", regs, int_num);
  184. siginfo.si_signo = SIGTRAP;
  185. siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
  186. siginfo.si_flags = 0;
  187. siginfo.si_isr = 0;
  188. siginfo.si_addr = NULL;
  189. siginfo.si_imm = 0;
  190. siginfo.si_code = TRAP_BRKPT;
  191. force_sig_info(SIGTRAP, &siginfo, current);
  192. }
  193. void
  194. ia32_cpu_init (void)
  195. {
  196. /* initialize global ia32 state - CR0 and CR4 */
  197. ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
  198. }
  199. static int __init
  200. ia32_init (void)
  201. {
  202. ia32_exec_domain.name = "Linux/x86";
  203. ia32_exec_domain.handler = NULL;
  204. ia32_exec_domain.pers_low = PER_LINUX32;
  205. ia32_exec_domain.pers_high = PER_LINUX32;
  206. ia32_exec_domain.signal_map = default_exec_domain.signal_map;
  207. ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
  208. register_exec_domain(&ia32_exec_domain);
  209. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  210. {
  211. extern kmem_cache_t *partial_page_cachep;
  212. partial_page_cachep = kmem_cache_create("partial_page_cache",
  213. sizeof(struct partial_page), 0, 0,
  214. NULL, NULL);
  215. if (!partial_page_cachep)
  216. panic("Cannot create partial page SLAB cache");
  217. }
  218. #endif
  219. return 0;
  220. }
  221. __initcall(ia32_init);