ldt.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
  3. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  4. * Copyright (C) 2002 Andi Kleen
  5. *
  6. * This handles calls from both 32bit and 64bit mode.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/gfp.h>
  10. #include <linux/sched.h>
  11. #include <linux/string.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/system.h>
  17. #include <asm/ldt.h>
  18. #include <asm/desc.h>
  19. #include <asm/mmu_context.h>
  20. #include <asm/syscalls.h>
  21. #ifdef CONFIG_SMP
  22. static void flush_ldt(void *current_mm)
  23. {
  24. if (current->active_mm == current_mm)
  25. load_LDT(&current->active_mm->context);
  26. }
  27. #endif
  28. static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
  29. {
  30. void *oldldt, *newldt;
  31. int oldsize;
  32. if (mincount <= pc->size)
  33. return 0;
  34. oldsize = pc->size;
  35. mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
  36. (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
  37. if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
  38. newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
  39. else
  40. newldt = (void *)__get_free_page(GFP_KERNEL);
  41. if (!newldt)
  42. return -ENOMEM;
  43. if (oldsize)
  44. memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
  45. oldldt = pc->ldt;
  46. memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
  47. (mincount - oldsize) * LDT_ENTRY_SIZE);
  48. paravirt_alloc_ldt(newldt, mincount);
  49. #ifdef CONFIG_X86_64
  50. /* CHECKME: Do we really need this ? */
  51. wmb();
  52. #endif
  53. pc->ldt = newldt;
  54. wmb();
  55. pc->size = mincount;
  56. wmb();
  57. if (reload) {
  58. #ifdef CONFIG_SMP
  59. preempt_disable();
  60. load_LDT(pc);
  61. if (!cpumask_equal(mm_cpumask(current->mm),
  62. cpumask_of(smp_processor_id())))
  63. smp_call_function(flush_ldt, current->mm, 1);
  64. preempt_enable();
  65. #else
  66. load_LDT(pc);
  67. #endif
  68. }
  69. if (oldsize) {
  70. paravirt_free_ldt(oldldt, oldsize);
  71. if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
  72. vfree(oldldt);
  73. else
  74. put_page(virt_to_page(oldldt));
  75. }
  76. return 0;
  77. }
  78. static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
  79. {
  80. int err = alloc_ldt(new, old->size, 0);
  81. int i;
  82. if (err < 0)
  83. return err;
  84. for (i = 0; i < old->size; i++)
  85. write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
  86. return 0;
  87. }
  88. /*
  89. * we do not have to muck with descriptors here, that is
  90. * done in switch_mm() as needed.
  91. */
  92. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  93. {
  94. struct mm_struct *old_mm;
  95. int retval = 0;
  96. mutex_init(&mm->context.lock);
  97. mm->context.size = 0;
  98. old_mm = current->mm;
  99. if (old_mm && old_mm->context.size > 0) {
  100. mutex_lock(&old_mm->context.lock);
  101. retval = copy_ldt(&mm->context, &old_mm->context);
  102. mutex_unlock(&old_mm->context.lock);
  103. }
  104. return retval;
  105. }
  106. /*
  107. * No need to lock the MM as we are the last user
  108. *
  109. * 64bit: Don't touch the LDT register - we're already in the next thread.
  110. */
  111. void destroy_context(struct mm_struct *mm)
  112. {
  113. if (mm->context.size) {
  114. #ifdef CONFIG_X86_32
  115. /* CHECKME: Can this ever happen ? */
  116. if (mm == current->active_mm)
  117. clear_LDT();
  118. #endif
  119. paravirt_free_ldt(mm->context.ldt, mm->context.size);
  120. if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
  121. vfree(mm->context.ldt);
  122. else
  123. put_page(virt_to_page(mm->context.ldt));
  124. mm->context.size = 0;
  125. }
  126. }
  127. static int read_ldt(void __user *ptr, unsigned long bytecount)
  128. {
  129. int err;
  130. unsigned long size;
  131. struct mm_struct *mm = current->mm;
  132. if (!mm->context.size)
  133. return 0;
  134. if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
  135. bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
  136. mutex_lock(&mm->context.lock);
  137. size = mm->context.size * LDT_ENTRY_SIZE;
  138. if (size > bytecount)
  139. size = bytecount;
  140. err = 0;
  141. if (copy_to_user(ptr, mm->context.ldt, size))
  142. err = -EFAULT;
  143. mutex_unlock(&mm->context.lock);
  144. if (err < 0)
  145. goto error_return;
  146. if (size != bytecount) {
  147. /* zero-fill the rest */
  148. if (clear_user(ptr + size, bytecount - size) != 0) {
  149. err = -EFAULT;
  150. goto error_return;
  151. }
  152. }
  153. return bytecount;
  154. error_return:
  155. return err;
  156. }
  157. static int read_default_ldt(void __user *ptr, unsigned long bytecount)
  158. {
  159. /* CHECKME: Can we use _one_ random number ? */
  160. #ifdef CONFIG_X86_32
  161. unsigned long size = 5 * sizeof(struct desc_struct);
  162. #else
  163. unsigned long size = 128;
  164. #endif
  165. if (bytecount > size)
  166. bytecount = size;
  167. if (clear_user(ptr, bytecount))
  168. return -EFAULT;
  169. return bytecount;
  170. }
  171. static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
  172. {
  173. struct mm_struct *mm = current->mm;
  174. struct desc_struct ldt;
  175. int error;
  176. struct user_desc ldt_info;
  177. error = -EINVAL;
  178. if (bytecount != sizeof(ldt_info))
  179. goto out;
  180. error = -EFAULT;
  181. if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
  182. goto out;
  183. error = -EINVAL;
  184. if (ldt_info.entry_number >= LDT_ENTRIES)
  185. goto out;
  186. if (ldt_info.contents == 3) {
  187. if (oldmode)
  188. goto out;
  189. if (ldt_info.seg_not_present == 0)
  190. goto out;
  191. }
  192. mutex_lock(&mm->context.lock);
  193. if (ldt_info.entry_number >= mm->context.size) {
  194. error = alloc_ldt(&current->mm->context,
  195. ldt_info.entry_number + 1, 1);
  196. if (error < 0)
  197. goto out_unlock;
  198. }
  199. /* Allow LDTs to be cleared by the user. */
  200. if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  201. if (oldmode || LDT_empty(&ldt_info)) {
  202. memset(&ldt, 0, sizeof(ldt));
  203. goto install;
  204. }
  205. }
  206. fill_ldt(&ldt, &ldt_info);
  207. if (oldmode)
  208. ldt.avl = 0;
  209. /* Install the new entry ... */
  210. install:
  211. write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
  212. error = 0;
  213. out_unlock:
  214. mutex_unlock(&mm->context.lock);
  215. out:
  216. return error;
  217. }
  218. asmlinkage int sys_modify_ldt(int func, void __user *ptr,
  219. unsigned long bytecount)
  220. {
  221. int ret = -ENOSYS;
  222. switch (func) {
  223. case 0:
  224. ret = read_ldt(ptr, bytecount);
  225. break;
  226. case 1:
  227. ret = write_ldt(ptr, bytecount, 1);
  228. break;
  229. case 2:
  230. ret = read_default_ldt(ptr, bytecount);
  231. break;
  232. case 0x11:
  233. ret = write_ldt(ptr, bytecount, 0);
  234. break;
  235. }
  236. return ret;
  237. }