tls.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/sched.h>
  4. #include <linux/user.h>
  5. #include <asm/uaccess.h>
  6. #include <asm/desc.h>
  7. #include <asm/system.h>
  8. #include <asm/ldt.h>
  9. #include <asm/processor.h>
  10. #include <asm/proto.h>
  11. /*
  12. * sys_alloc_thread_area: get a yet unused TLS descriptor index.
  13. */
  14. static int get_free_idx(void)
  15. {
  16. struct thread_struct *t = &current->thread;
  17. int idx;
  18. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  19. if (desc_empty(&t->tls_array[idx]))
  20. return idx + GDT_ENTRY_TLS_MIN;
  21. return -ESRCH;
  22. }
  23. /*
  24. * Set a given TLS descriptor:
  25. */
  26. int do_set_thread_area(struct task_struct *p, int idx,
  27. struct user_desc __user *u_info,
  28. int can_allocate)
  29. {
  30. struct thread_struct *t = &p->thread;
  31. struct user_desc info;
  32. u32 *desc;
  33. int cpu;
  34. if (copy_from_user(&info, u_info, sizeof(info)))
  35. return -EFAULT;
  36. if (idx == -1)
  37. idx = info.entry_number;
  38. /*
  39. * index -1 means the kernel should try to find and
  40. * allocate an empty descriptor:
  41. */
  42. if (idx == -1 && can_allocate) {
  43. idx = get_free_idx();
  44. if (idx < 0)
  45. return idx;
  46. if (put_user(idx, &u_info->entry_number))
  47. return -EFAULT;
  48. }
  49. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  50. return -EINVAL;
  51. desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
  52. /*
  53. * We must not get preempted while modifying the TLS.
  54. */
  55. cpu = get_cpu();
  56. if (LDT_empty(&info)) {
  57. desc[0] = 0;
  58. desc[1] = 0;
  59. } else {
  60. desc[0] = LDT_entry_a(&info);
  61. desc[1] = LDT_entry_b(&info);
  62. }
  63. if (t == &current->thread)
  64. load_TLS(t, cpu);
  65. put_cpu();
  66. return 0;
  67. }
  68. asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
  69. {
  70. return do_set_thread_area(current, -1, u_info, 1);
  71. }
  72. /*
  73. * Get the current Thread-Local Storage area:
  74. */
  75. #define GET_LIMIT(desc) (((desc)[0] & 0x0ffff) | ((desc)[1] & 0xf0000))
  76. #define GET_32BIT(desc) (((desc)[1] >> 22) & 1)
  77. #define GET_CONTENTS(desc) (((desc)[1] >> 10) & 3)
  78. #define GET_WRITABLE(desc) (((desc)[1] >> 9) & 1)
  79. #define GET_LIMIT_PAGES(desc) (((desc)[1] >> 23) & 1)
  80. #define GET_PRESENT(desc) (((desc)[1] >> 15) & 1)
  81. #define GET_USEABLE(desc) (((desc)[1] >> 20) & 1)
  82. #define GET_LONGMODE(desc) (((desc)[1] >> 21) & 1)
  83. int do_get_thread_area(struct task_struct *p, int idx,
  84. struct user_desc __user *u_info)
  85. {
  86. struct thread_struct *t = &p->thread;
  87. struct user_desc info;
  88. u32 *desc;
  89. if (idx == -1 && get_user(idx, &u_info->entry_number))
  90. return -EFAULT;
  91. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  92. return -EINVAL;
  93. desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
  94. memset(&info, 0, sizeof(struct user_desc));
  95. info.entry_number = idx;
  96. info.base_addr = get_desc_base((void *)desc);
  97. info.limit = GET_LIMIT(desc);
  98. info.seg_32bit = GET_32BIT(desc);
  99. info.contents = GET_CONTENTS(desc);
  100. info.read_exec_only = !GET_WRITABLE(desc);
  101. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  102. info.seg_not_present = !GET_PRESENT(desc);
  103. info.useable = GET_USEABLE(desc);
  104. #ifdef CONFIG_X86_64
  105. info.lm = GET_LONGMODE(desc);
  106. #endif
  107. if (copy_to_user(u_info, &info, sizeof(info)))
  108. return -EFAULT;
  109. return 0;
  110. }
  111. asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
  112. {
  113. return do_get_thread_area(current, -1, u_info);
  114. }