vdso.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * vdso setup for s390
  3. *
  4. * Copyright IBM Corp. 2008
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License (version 2 only)
  9. * as published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/errno.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/stddef.h>
  18. #include <linux/unistd.h>
  19. #include <linux/slab.h>
  20. #include <linux/user.h>
  21. #include <linux/elf.h>
  22. #include <linux/security.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/compat.h>
  25. #include <asm/asm-offsets.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/processor.h>
  28. #include <asm/mmu.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/sections.h>
  31. #include <asm/vdso.h>
  32. #include <asm/facility.h>
  33. #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
  34. extern char vdso32_start, vdso32_end;
  35. static void *vdso32_kbase = &vdso32_start;
  36. static unsigned int vdso32_pages;
  37. static struct page **vdso32_pagelist;
  38. #endif
  39. #ifdef CONFIG_64BIT
  40. extern char vdso64_start, vdso64_end;
  41. static void *vdso64_kbase = &vdso64_start;
  42. static unsigned int vdso64_pages;
  43. static struct page **vdso64_pagelist;
  44. #endif /* CONFIG_64BIT */
  45. /*
  46. * Should the kernel map a VDSO page into processes and pass its
  47. * address down to glibc upon exec()?
  48. */
  49. unsigned int __read_mostly vdso_enabled = 1;
  50. static int __init vdso_setup(char *s)
  51. {
  52. unsigned long val;
  53. int rc;
  54. rc = 0;
  55. if (strncmp(s, "on", 3) == 0)
  56. vdso_enabled = 1;
  57. else if (strncmp(s, "off", 4) == 0)
  58. vdso_enabled = 0;
  59. else {
  60. rc = strict_strtoul(s, 0, &val);
  61. vdso_enabled = rc ? 0 : !!val;
  62. }
  63. return !rc;
  64. }
  65. __setup("vdso=", vdso_setup);
  66. /*
  67. * The vdso data page
  68. */
  69. static union {
  70. struct vdso_data data;
  71. u8 page[PAGE_SIZE];
  72. } vdso_data_store __page_aligned_data;
  73. struct vdso_data *vdso_data = &vdso_data_store.data;
  74. /*
  75. * Setup vdso data page.
  76. */
  77. static void vdso_init_data(struct vdso_data *vd)
  78. {
  79. vd->ectg_available =
  80. addressing_mode != HOME_SPACE_MODE && test_facility(31);
  81. }
  82. #ifdef CONFIG_64BIT
  83. /*
  84. * Allocate/free per cpu vdso data.
  85. */
  86. #define SEGMENT_ORDER 2
  87. int vdso_alloc_per_cpu(struct _lowcore *lowcore)
  88. {
  89. unsigned long segment_table, page_table, page_frame;
  90. u32 *psal, *aste;
  91. int i;
  92. lowcore->vdso_per_cpu_data = __LC_PASTE;
  93. if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
  94. return 0;
  95. segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
  96. page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
  97. page_frame = get_zeroed_page(GFP_KERNEL);
  98. if (!segment_table || !page_table || !page_frame)
  99. goto out;
  100. clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
  101. PAGE_SIZE << SEGMENT_ORDER);
  102. clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
  103. 256*sizeof(unsigned long));
  104. *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
  105. *(unsigned long *) page_table = _PAGE_RO + page_frame;
  106. psal = (u32 *) (page_table + 256*sizeof(unsigned long));
  107. aste = psal + 32;
  108. for (i = 4; i < 32; i += 4)
  109. psal[i] = 0x80000000;
  110. lowcore->paste[4] = (u32)(addr_t) psal;
  111. psal[0] = 0x20000000;
  112. psal[2] = (u32)(addr_t) aste;
  113. *(unsigned long *) (aste + 2) = segment_table +
  114. _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
  115. aste[4] = (u32)(addr_t) psal;
  116. lowcore->vdso_per_cpu_data = page_frame;
  117. return 0;
  118. out:
  119. free_page(page_frame);
  120. free_page(page_table);
  121. free_pages(segment_table, SEGMENT_ORDER);
  122. return -ENOMEM;
  123. }
  124. void vdso_free_per_cpu(struct _lowcore *lowcore)
  125. {
  126. unsigned long segment_table, page_table, page_frame;
  127. u32 *psal, *aste;
  128. if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
  129. return;
  130. psal = (u32 *)(addr_t) lowcore->paste[4];
  131. aste = (u32 *)(addr_t) psal[2];
  132. segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
  133. page_table = *(unsigned long *) segment_table;
  134. page_frame = *(unsigned long *) page_table;
  135. free_page(page_frame);
  136. free_page(page_table);
  137. free_pages(segment_table, SEGMENT_ORDER);
  138. }
  139. static void vdso_init_cr5(void)
  140. {
  141. unsigned long cr5;
  142. if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
  143. return;
  144. cr5 = offsetof(struct _lowcore, paste);
  145. __ctl_load(cr5, 5, 5);
  146. }
  147. #endif /* CONFIG_64BIT */
  148. /*
  149. * This is called from binfmt_elf, we create the special vma for the
  150. * vDSO and insert it into the mm struct tree
  151. */
  152. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  153. {
  154. struct mm_struct *mm = current->mm;
  155. struct page **vdso_pagelist;
  156. unsigned long vdso_pages;
  157. unsigned long vdso_base;
  158. int rc;
  159. if (!vdso_enabled)
  160. return 0;
  161. /*
  162. * Only map the vdso for dynamically linked elf binaries.
  163. */
  164. if (!uses_interp)
  165. return 0;
  166. #ifdef CONFIG_64BIT
  167. vdso_pagelist = vdso64_pagelist;
  168. vdso_pages = vdso64_pages;
  169. #ifdef CONFIG_COMPAT
  170. if (is_compat_task()) {
  171. vdso_pagelist = vdso32_pagelist;
  172. vdso_pages = vdso32_pages;
  173. }
  174. #endif
  175. #else
  176. vdso_pagelist = vdso32_pagelist;
  177. vdso_pages = vdso32_pages;
  178. #endif
  179. /*
  180. * vDSO has a problem and was disabled, just don't "enable" it for
  181. * the process
  182. */
  183. if (vdso_pages == 0)
  184. return 0;
  185. current->mm->context.vdso_base = 0;
  186. /*
  187. * pick a base address for the vDSO in process space. We try to put
  188. * it at vdso_base which is the "natural" base for it, but we might
  189. * fail and end up putting it elsewhere.
  190. */
  191. down_write(&mm->mmap_sem);
  192. vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
  193. if (IS_ERR_VALUE(vdso_base)) {
  194. rc = vdso_base;
  195. goto out_up;
  196. }
  197. /*
  198. * Put vDSO base into mm struct. We need to do this before calling
  199. * install_special_mapping or the perf counter mmap tracking code
  200. * will fail to recognise it as a vDSO (since arch_vma_name fails).
  201. */
  202. current->mm->context.vdso_base = vdso_base;
  203. /*
  204. * our vma flags don't have VM_WRITE so by default, the process
  205. * isn't allowed to write those pages.
  206. * gdb can break that with ptrace interface, and thus trigger COW
  207. * on those pages but it's then your responsibility to never do that
  208. * on the "data" page of the vDSO or you'll stop getting kernel
  209. * updates and your nice userland gettimeofday will be totally dead.
  210. * It's fine to use that for setting breakpoints in the vDSO code
  211. * pages though.
  212. */
  213. rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
  214. VM_READ|VM_EXEC|
  215. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  216. vdso_pagelist);
  217. if (rc)
  218. current->mm->context.vdso_base = 0;
  219. out_up:
  220. up_write(&mm->mmap_sem);
  221. return rc;
  222. }
  223. const char *arch_vma_name(struct vm_area_struct *vma)
  224. {
  225. if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
  226. return "[vdso]";
  227. return NULL;
  228. }
  229. static int __init vdso_init(void)
  230. {
  231. int i;
  232. if (!vdso_enabled)
  233. return 0;
  234. vdso_init_data(vdso_data);
  235. #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
  236. /* Calculate the size of the 32 bit vDSO */
  237. vdso32_pages = ((&vdso32_end - &vdso32_start
  238. + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
  239. /* Make sure pages are in the correct state */
  240. vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
  241. GFP_KERNEL);
  242. BUG_ON(vdso32_pagelist == NULL);
  243. for (i = 0; i < vdso32_pages - 1; i++) {
  244. struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
  245. ClearPageReserved(pg);
  246. get_page(pg);
  247. vdso32_pagelist[i] = pg;
  248. }
  249. vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
  250. vdso32_pagelist[vdso32_pages] = NULL;
  251. #endif
  252. #ifdef CONFIG_64BIT
  253. /* Calculate the size of the 64 bit vDSO */
  254. vdso64_pages = ((&vdso64_end - &vdso64_start
  255. + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
  256. /* Make sure pages are in the correct state */
  257. vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
  258. GFP_KERNEL);
  259. BUG_ON(vdso64_pagelist == NULL);
  260. for (i = 0; i < vdso64_pages - 1; i++) {
  261. struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
  262. ClearPageReserved(pg);
  263. get_page(pg);
  264. vdso64_pagelist[i] = pg;
  265. }
  266. vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
  267. vdso64_pagelist[vdso64_pages] = NULL;
  268. if (vdso_alloc_per_cpu(&S390_lowcore))
  269. BUG();
  270. vdso_init_cr5();
  271. #endif /* CONFIG_64BIT */
  272. get_page(virt_to_page(vdso_data));
  273. smp_wmb();
  274. return 0;
  275. }
  276. early_initcall(vdso_init);
  277. int in_gate_area_no_mm(unsigned long addr)
  278. {
  279. return 0;
  280. }
  281. int in_gate_area(struct mm_struct *mm, unsigned long addr)
  282. {
  283. return 0;
  284. }
  285. struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
  286. {
  287. return NULL;
  288. }