tls.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
  3. * Licensed under the GPL
  4. */
  5. #include "linux/config.h"
  6. #include "linux/kernel.h"
  7. #include "linux/sched.h"
  8. #include "linux/slab.h"
  9. #include "linux/types.h"
  10. #include "asm/uaccess.h"
  11. #include "asm/ptrace.h"
  12. #include "asm/segment.h"
  13. #include "asm/smp.h"
  14. #include "asm/desc.h"
  15. #include "choose-mode.h"
  16. #include "kern.h"
  17. #include "kern_util.h"
  18. #include "mode_kern.h"
  19. #include "os.h"
  20. #include "mode.h"
  21. #ifdef CONFIG_MODE_SKAS
  22. #include "skas.h"
  23. #endif
  24. /* If needed we can detect when it's uninitialized. */
  25. static int host_supports_tls = -1;
  26. int host_gdt_entry_tls_min = -1;
  27. #ifdef CONFIG_MODE_SKAS
  28. int do_set_thread_area_skas(struct user_desc *info)
  29. {
  30. int ret;
  31. u32 cpu;
  32. cpu = get_cpu();
  33. ret = os_set_thread_area(info, userspace_pid[cpu]);
  34. put_cpu();
  35. return ret;
  36. }
  37. int do_get_thread_area_skas(struct user_desc *info)
  38. {
  39. int ret;
  40. u32 cpu;
  41. cpu = get_cpu();
  42. ret = os_get_thread_area(info, userspace_pid[cpu]);
  43. put_cpu();
  44. return ret;
  45. }
  46. #endif
  47. /*
  48. * sys_get_thread_area: get a yet unused TLS descriptor index.
  49. * XXX: Consider leaving one free slot for glibc usage at first place. This must
  50. * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
  51. *
  52. * Also, this must be tested when compiling in SKAS mode with dinamic linking
  53. * and running against NPTL.
  54. */
  55. static int get_free_idx(struct task_struct* task)
  56. {
  57. struct thread_struct *t = &task->thread;
  58. int idx;
  59. if (!t->arch.tls_array)
  60. return GDT_ENTRY_TLS_MIN;
  61. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  62. if (!t->arch.tls_array[idx].present)
  63. return idx + GDT_ENTRY_TLS_MIN;
  64. return -ESRCH;
  65. }
  66. static inline void clear_user_desc(struct user_desc* info)
  67. {
  68. /* Postcondition: LDT_empty(info) returns true. */
  69. memset(info, 0, sizeof(*info));
  70. /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
  71. * indeed an empty user_desc.
  72. */
  73. info->read_exec_only = 1;
  74. info->seg_not_present = 1;
  75. }
  76. #define O_FORCE 1
  77. static int load_TLS(int flags, struct task_struct *to)
  78. {
  79. int ret = 0;
  80. int idx;
  81. for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
  82. struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
  83. /* Actually, now if it wasn't flushed it gets cleared and
  84. * flushed to the host, which will clear it.*/
  85. if (!curr->present) {
  86. if (!curr->flushed) {
  87. clear_user_desc(&curr->tls);
  88. curr->tls.entry_number = idx;
  89. } else {
  90. WARN_ON(!LDT_empty(&curr->tls));
  91. continue;
  92. }
  93. }
  94. if (!(flags & O_FORCE) && curr->flushed)
  95. continue;
  96. ret = do_set_thread_area(&curr->tls);
  97. if (ret)
  98. goto out;
  99. curr->flushed = 1;
  100. }
  101. out:
  102. return ret;
  103. }
  104. /* Verify if we need to do a flush for the new process, i.e. if there are any
  105. * present desc's, only if they haven't been flushed.
  106. */
  107. static inline int needs_TLS_update(struct task_struct *task)
  108. {
  109. int i;
  110. int ret = 0;
  111. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  112. struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  113. /* Can't test curr->present, we may need to clear a descriptor
  114. * which had a value. */
  115. if (curr->flushed)
  116. continue;
  117. ret = 1;
  118. break;
  119. }
  120. return ret;
  121. }
  122. /* On a newly forked process, the TLS descriptors haven't yet been flushed. So
  123. * we mark them as such and the first switch_to will do the job.
  124. */
  125. void clear_flushed_tls(struct task_struct *task)
  126. {
  127. int i;
  128. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  129. struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  130. /* Still correct to do this, if it wasn't present on the host it
  131. * will remain as flushed as it was. */
  132. if (!curr->present)
  133. continue;
  134. curr->flushed = 0;
  135. }
  136. }
  137. /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
  138. * common host process. So this is needed in SKAS0 too.
  139. *
  140. * However, if each thread had a different host process (and this was discussed
  141. * for SMP support) this won't be needed.
  142. *
  143. * And this will not need be used when (and if) we'll add support to the host
  144. * SKAS patch. */
  145. int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
  146. {
  147. if (!host_supports_tls)
  148. return 0;
  149. /* We have no need whatsoever to switch TLS for kernel threads; beyond
  150. * that, that would also result in us calling os_set_thread_area with
  151. * userspace_pid[cpu] == 0, which gives an error. */
  152. if (likely(to->mm))
  153. return load_TLS(O_FORCE, to);
  154. return 0;
  155. }
  156. int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
  157. {
  158. if (!host_supports_tls)
  159. return 0;
  160. if (needs_TLS_update(to))
  161. return load_TLS(0, to);
  162. return 0;
  163. }
  164. static int set_tls_entry(struct task_struct* task, struct user_desc *info,
  165. int idx, int flushed)
  166. {
  167. struct thread_struct *t = &task->thread;
  168. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  169. return -EINVAL;
  170. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
  171. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
  172. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
  173. return 0;
  174. }
  175. int arch_copy_tls(struct task_struct *new)
  176. {
  177. struct user_desc info;
  178. int idx, ret = -EFAULT;
  179. if (copy_from_user(&info,
  180. (void __user *) UPT_ESI(&new->thread.regs.regs),
  181. sizeof(info)))
  182. goto out;
  183. ret = -EINVAL;
  184. if (LDT_empty(&info))
  185. goto out;
  186. idx = info.entry_number;
  187. ret = set_tls_entry(new, &info, idx, 0);
  188. out:
  189. return ret;
  190. }
  191. /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
  192. static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
  193. {
  194. struct thread_struct *t = &task->thread;
  195. if (!t->arch.tls_array)
  196. goto clear;
  197. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  198. return -EINVAL;
  199. if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
  200. goto clear;
  201. *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
  202. out:
  203. /* Temporary debugging check, to make sure that things have been
  204. * flushed. This could be triggered if load_TLS() failed.
  205. */
  206. if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
  207. printk(KERN_ERR "get_tls_entry: task with pid %d got here "
  208. "without flushed TLS.", current->pid);
  209. }
  210. return 0;
  211. clear:
  212. /* When the TLS entry has not been set, the values read to user in the
  213. * tls_array are 0 (because it's cleared at boot, see
  214. * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
  215. */
  216. clear_user_desc(info);
  217. info->entry_number = idx;
  218. goto out;
  219. }
  220. asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
  221. {
  222. struct user_desc info;
  223. int idx, ret;
  224. if (!host_supports_tls)
  225. return -ENOSYS;
  226. if (copy_from_user(&info, user_desc, sizeof(info)))
  227. return -EFAULT;
  228. idx = info.entry_number;
  229. if (idx == -1) {
  230. idx = get_free_idx(current);
  231. if (idx < 0)
  232. return idx;
  233. info.entry_number = idx;
  234. /* Tell the user which slot we chose for him.*/
  235. if (put_user(idx, &user_desc->entry_number))
  236. return -EFAULT;
  237. }
  238. ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
  239. if (ret)
  240. return ret;
  241. return set_tls_entry(current, &info, idx, 1);
  242. }
  243. /*
  244. * Perform set_thread_area on behalf of the traced child.
  245. * Note: error handling is not done on the deferred load, and this differ from
  246. * i386. However the only possible error are caused by bugs.
  247. */
  248. int ptrace_set_thread_area(struct task_struct *child, int idx,
  249. struct user_desc __user *user_desc)
  250. {
  251. struct user_desc info;
  252. if (!host_supports_tls)
  253. return -EIO;
  254. if (copy_from_user(&info, user_desc, sizeof(info)))
  255. return -EFAULT;
  256. return set_tls_entry(child, &info, idx, 0);
  257. }
  258. asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
  259. {
  260. struct user_desc info;
  261. int idx, ret;
  262. if (!host_supports_tls)
  263. return -ENOSYS;
  264. if (get_user(idx, &user_desc->entry_number))
  265. return -EFAULT;
  266. ret = get_tls_entry(current, &info, idx);
  267. if (ret < 0)
  268. goto out;
  269. if (copy_to_user(user_desc, &info, sizeof(info)))
  270. ret = -EFAULT;
  271. out:
  272. return ret;
  273. }
  274. /*
  275. * Perform get_thread_area on behalf of the traced child.
  276. */
  277. int ptrace_get_thread_area(struct task_struct *child, int idx,
  278. struct user_desc __user *user_desc)
  279. {
  280. struct user_desc info;
  281. int ret;
  282. if (!host_supports_tls)
  283. return -EIO;
  284. ret = get_tls_entry(child, &info, idx);
  285. if (ret < 0)
  286. goto out;
  287. if (copy_to_user(user_desc, &info, sizeof(info)))
  288. ret = -EFAULT;
  289. out:
  290. return ret;
  291. }
  292. /* XXX: This part is probably common to i386 and x86-64. Don't create a common
  293. * file for now, do that when implementing x86-64 support.*/
  294. static int __init __setup_host_supports_tls(void) {
  295. check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
  296. if (host_supports_tls) {
  297. printk(KERN_INFO "Host TLS support detected\n");
  298. printk(KERN_INFO "Detected host type: ");
  299. switch (host_gdt_entry_tls_min) {
  300. case GDT_ENTRY_TLS_MIN_I386:
  301. printk("i386\n");
  302. break;
  303. case GDT_ENTRY_TLS_MIN_X86_64:
  304. printk("x86_64\n");
  305. break;
  306. }
  307. } else
  308. printk(KERN_ERR " Host TLS support NOT detected! "
  309. "TLS support inside UML will not work\n");
  310. return 0;
  311. }
  312. __initcall(__setup_host_supports_tls);