tls_32.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
  3. * Licensed under the GPL
  4. */
  5. #include <linux/percpu.h>
  6. #include <linux/sched.h>
  7. #include <linux/syscalls.h>
  8. #include <asm/uaccess.h>
  9. #include <os.h>
  10. #include <skas.h>
  11. #include <sysdep/tls.h>
  12. /*
  13. * If needed we can detect when it's uninitialized.
  14. *
  15. * These are initialized in an initcall and unchanged thereafter.
  16. */
  17. static int host_supports_tls = -1;
  18. int host_gdt_entry_tls_min;
  19. int do_set_thread_area(struct user_desc *info)
  20. {
  21. int ret;
  22. u32 cpu;
  23. cpu = get_cpu();
  24. ret = os_set_thread_area(info, userspace_pid[cpu]);
  25. put_cpu();
  26. if (ret)
  27. printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
  28. "index = %d\n", ret, info->entry_number);
  29. return ret;
  30. }
  31. int do_get_thread_area(struct user_desc *info)
  32. {
  33. int ret;
  34. u32 cpu;
  35. cpu = get_cpu();
  36. ret = os_get_thread_area(info, userspace_pid[cpu]);
  37. put_cpu();
  38. if (ret)
  39. printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
  40. "index = %d\n", ret, info->entry_number);
  41. return ret;
  42. }
  43. /*
  44. * sys_get_thread_area: get a yet unused TLS descriptor index.
  45. * XXX: Consider leaving one free slot for glibc usage at first place. This must
  46. * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
  47. *
  48. * Also, this must be tested when compiling in SKAS mode with dynamic linking
  49. * and running against NPTL.
  50. */
  51. static int get_free_idx(struct task_struct* task)
  52. {
  53. struct thread_struct *t = &task->thread;
  54. int idx;
  55. if (!t->arch.tls_array)
  56. return GDT_ENTRY_TLS_MIN;
  57. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  58. if (!t->arch.tls_array[idx].present)
  59. return idx + GDT_ENTRY_TLS_MIN;
  60. return -ESRCH;
  61. }
  62. static inline void clear_user_desc(struct user_desc* info)
  63. {
  64. /* Postcondition: LDT_empty(info) returns true. */
  65. memset(info, 0, sizeof(*info));
  66. /*
  67. * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
  68. * indeed an empty user_desc.
  69. */
  70. info->read_exec_only = 1;
  71. info->seg_not_present = 1;
  72. }
  73. #define O_FORCE 1
  74. static int load_TLS(int flags, struct task_struct *to)
  75. {
  76. int ret = 0;
  77. int idx;
  78. for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
  79. struct uml_tls_struct* curr =
  80. &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
  81. /*
  82. * Actually, now if it wasn't flushed it gets cleared and
  83. * flushed to the host, which will clear it.
  84. */
  85. if (!curr->present) {
  86. if (!curr->flushed) {
  87. clear_user_desc(&curr->tls);
  88. curr->tls.entry_number = idx;
  89. } else {
  90. WARN_ON(!LDT_empty(&curr->tls));
  91. continue;
  92. }
  93. }
  94. if (!(flags & O_FORCE) && curr->flushed)
  95. continue;
  96. ret = do_set_thread_area(&curr->tls);
  97. if (ret)
  98. goto out;
  99. curr->flushed = 1;
  100. }
  101. out:
  102. return ret;
  103. }
  104. /*
  105. * Verify if we need to do a flush for the new process, i.e. if there are any
  106. * present desc's, only if they haven't been flushed.
  107. */
  108. static inline int needs_TLS_update(struct task_struct *task)
  109. {
  110. int i;
  111. int ret = 0;
  112. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  113. struct uml_tls_struct* curr =
  114. &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  115. /*
  116. * Can't test curr->present, we may need to clear a descriptor
  117. * which had a value.
  118. */
  119. if (curr->flushed)
  120. continue;
  121. ret = 1;
  122. break;
  123. }
  124. return ret;
  125. }
  126. /*
  127. * On a newly forked process, the TLS descriptors haven't yet been flushed. So
  128. * we mark them as such and the first switch_to will do the job.
  129. */
  130. void clear_flushed_tls(struct task_struct *task)
  131. {
  132. int i;
  133. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  134. struct uml_tls_struct* curr =
  135. &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  136. /*
  137. * Still correct to do this, if it wasn't present on the host it
  138. * will remain as flushed as it was.
  139. */
  140. if (!curr->present)
  141. continue;
  142. curr->flushed = 0;
  143. }
  144. }
  145. /*
  146. * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
  147. * common host process. So this is needed in SKAS0 too.
  148. *
  149. * However, if each thread had a different host process (and this was discussed
  150. * for SMP support) this won't be needed.
  151. *
  152. * And this will not need be used when (and if) we'll add support to the host
  153. * SKAS patch.
  154. */
  155. int arch_switch_tls(struct task_struct *to)
  156. {
  157. if (!host_supports_tls)
  158. return 0;
  159. /*
  160. * We have no need whatsoever to switch TLS for kernel threads; beyond
  161. * that, that would also result in us calling os_set_thread_area with
  162. * userspace_pid[cpu] == 0, which gives an error.
  163. */
  164. if (likely(to->mm))
  165. return load_TLS(O_FORCE, to);
  166. return 0;
  167. }
  168. static int set_tls_entry(struct task_struct* task, struct user_desc *info,
  169. int idx, int flushed)
  170. {
  171. struct thread_struct *t = &task->thread;
  172. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  173. return -EINVAL;
  174. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
  175. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
  176. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
  177. return 0;
  178. }
  179. int arch_copy_tls(struct task_struct *new)
  180. {
  181. struct user_desc info;
  182. int idx, ret = -EFAULT;
  183. if (copy_from_user(&info,
  184. (void __user *) UPT_SI(&new->thread.regs.regs),
  185. sizeof(info)))
  186. goto out;
  187. ret = -EINVAL;
  188. if (LDT_empty(&info))
  189. goto out;
  190. idx = info.entry_number;
  191. ret = set_tls_entry(new, &info, idx, 0);
  192. out:
  193. return ret;
  194. }
  195. /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
  196. static int get_tls_entry(struct task_struct *task, struct user_desc *info,
  197. int idx)
  198. {
  199. struct thread_struct *t = &task->thread;
  200. if (!t->arch.tls_array)
  201. goto clear;
  202. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  203. return -EINVAL;
  204. if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
  205. goto clear;
  206. *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
  207. out:
  208. /*
  209. * Temporary debugging check, to make sure that things have been
  210. * flushed. This could be triggered if load_TLS() failed.
  211. */
  212. if (unlikely(task == current &&
  213. !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
  214. printk(KERN_ERR "get_tls_entry: task with pid %d got here "
  215. "without flushed TLS.", current->pid);
  216. }
  217. return 0;
  218. clear:
  219. /*
  220. * When the TLS entry has not been set, the values read to user in the
  221. * tls_array are 0 (because it's cleared at boot, see
  222. * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
  223. */
  224. clear_user_desc(info);
  225. info->entry_number = idx;
  226. goto out;
  227. }
  228. SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
  229. {
  230. struct user_desc info;
  231. int idx, ret;
  232. if (!host_supports_tls)
  233. return -ENOSYS;
  234. if (copy_from_user(&info, user_desc, sizeof(info)))
  235. return -EFAULT;
  236. idx = info.entry_number;
  237. if (idx == -1) {
  238. idx = get_free_idx(current);
  239. if (idx < 0)
  240. return idx;
  241. info.entry_number = idx;
  242. /* Tell the user which slot we chose for him.*/
  243. if (put_user(idx, &user_desc->entry_number))
  244. return -EFAULT;
  245. }
  246. ret = do_set_thread_area(&info);
  247. if (ret)
  248. return ret;
  249. return set_tls_entry(current, &info, idx, 1);
  250. }
  251. /*
  252. * Perform set_thread_area on behalf of the traced child.
  253. * Note: error handling is not done on the deferred load, and this differ from
  254. * i386. However the only possible error are caused by bugs.
  255. */
  256. int ptrace_set_thread_area(struct task_struct *child, int idx,
  257. struct user_desc __user *user_desc)
  258. {
  259. struct user_desc info;
  260. if (!host_supports_tls)
  261. return -EIO;
  262. if (copy_from_user(&info, user_desc, sizeof(info)))
  263. return -EFAULT;
  264. return set_tls_entry(child, &info, idx, 0);
  265. }
  266. SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
  267. {
  268. struct user_desc info;
  269. int idx, ret;
  270. if (!host_supports_tls)
  271. return -ENOSYS;
  272. if (get_user(idx, &user_desc->entry_number))
  273. return -EFAULT;
  274. ret = get_tls_entry(current, &info, idx);
  275. if (ret < 0)
  276. goto out;
  277. if (copy_to_user(user_desc, &info, sizeof(info)))
  278. ret = -EFAULT;
  279. out:
  280. return ret;
  281. }
  282. /*
  283. * Perform get_thread_area on behalf of the traced child.
  284. */
  285. int ptrace_get_thread_area(struct task_struct *child, int idx,
  286. struct user_desc __user *user_desc)
  287. {
  288. struct user_desc info;
  289. int ret;
  290. if (!host_supports_tls)
  291. return -EIO;
  292. ret = get_tls_entry(child, &info, idx);
  293. if (ret < 0)
  294. goto out;
  295. if (copy_to_user(user_desc, &info, sizeof(info)))
  296. ret = -EFAULT;
  297. out:
  298. return ret;
  299. }
  300. /*
  301. * This code is really i386-only, but it detects and logs x86_64 GDT indexes
  302. * if a 32-bit UML is running on a 64-bit host.
  303. */
  304. static int __init __setup_host_supports_tls(void)
  305. {
  306. check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
  307. if (host_supports_tls) {
  308. printk(KERN_INFO "Host TLS support detected\n");
  309. printk(KERN_INFO "Detected host type: ");
  310. switch (host_gdt_entry_tls_min) {
  311. case GDT_ENTRY_TLS_MIN_I386:
  312. printk(KERN_CONT "i386");
  313. break;
  314. case GDT_ENTRY_TLS_MIN_X86_64:
  315. printk(KERN_CONT "x86_64");
  316. break;
  317. }
  318. printk(KERN_CONT " (GDT indexes %d to %d)\n",
  319. host_gdt_entry_tls_min,
  320. host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
  321. } else
  322. printk(KERN_ERR " Host TLS support NOT detected! "
  323. "TLS support inside UML will not work\n");
  324. return 0;
  325. }
  326. __initcall(__setup_host_supports_tls);