privcmd.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/string.h>
  13. #include <linux/errno.h>
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/swap.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/miscdevice.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/tlb.h>
  25. #include <asm/xen/hypervisor.h>
  26. #include <asm/xen/hypercall.h>
  27. #include <xen/xen.h>
  28. #include <xen/privcmd.h>
  29. #include <xen/interface/xen.h>
  30. #include <xen/features.h>
  31. #include <xen/page.h>
  32. #include <xen/xen-ops.h>
  33. #include "privcmd.h"
  34. MODULE_LICENSE("GPL");
  35. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  36. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
  37. #endif
  38. static long privcmd_ioctl_hypercall(void __user *udata)
  39. {
  40. struct privcmd_hypercall hypercall;
  41. long ret;
  42. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  43. return -EFAULT;
  44. ret = privcmd_call(hypercall.op,
  45. hypercall.arg[0], hypercall.arg[1],
  46. hypercall.arg[2], hypercall.arg[3],
  47. hypercall.arg[4]);
  48. return ret;
  49. }
  50. static void free_page_list(struct list_head *pages)
  51. {
  52. struct page *p, *n;
  53. list_for_each_entry_safe(p, n, pages, lru)
  54. __free_page(p);
  55. INIT_LIST_HEAD(pages);
  56. }
  57. /*
  58. * Given an array of items in userspace, return a list of pages
  59. * containing the data. If copying fails, either because of memory
  60. * allocation failure or a problem reading user memory, return an
  61. * error code; its up to the caller to dispose of any partial list.
  62. */
  63. static int gather_array(struct list_head *pagelist,
  64. unsigned nelem, size_t size,
  65. const void __user *data)
  66. {
  67. unsigned pageidx;
  68. void *pagedata;
  69. int ret;
  70. if (size > PAGE_SIZE)
  71. return 0;
  72. pageidx = PAGE_SIZE;
  73. pagedata = NULL; /* quiet, gcc */
  74. while (nelem--) {
  75. if (pageidx > PAGE_SIZE-size) {
  76. struct page *page = alloc_page(GFP_KERNEL);
  77. ret = -ENOMEM;
  78. if (page == NULL)
  79. goto fail;
  80. pagedata = page_address(page);
  81. list_add_tail(&page->lru, pagelist);
  82. pageidx = 0;
  83. }
  84. ret = -EFAULT;
  85. if (copy_from_user(pagedata + pageidx, data, size))
  86. goto fail;
  87. data += size;
  88. pageidx += size;
  89. }
  90. ret = 0;
  91. fail:
  92. return ret;
  93. }
  94. /*
  95. * Call function "fn" on each element of the array fragmented
  96. * over a list of pages.
  97. */
  98. static int traverse_pages(unsigned nelem, size_t size,
  99. struct list_head *pos,
  100. int (*fn)(void *data, void *state),
  101. void *state)
  102. {
  103. void *pagedata;
  104. unsigned pageidx;
  105. int ret = 0;
  106. BUG_ON(size > PAGE_SIZE);
  107. pageidx = PAGE_SIZE;
  108. pagedata = NULL; /* hush, gcc */
  109. while (nelem--) {
  110. if (pageidx > PAGE_SIZE-size) {
  111. struct page *page;
  112. pos = pos->next;
  113. page = list_entry(pos, struct page, lru);
  114. pagedata = page_address(page);
  115. pageidx = 0;
  116. }
  117. ret = (*fn)(pagedata + pageidx, state);
  118. if (ret)
  119. break;
  120. pageidx += size;
  121. }
  122. return ret;
  123. }
  124. struct mmap_mfn_state {
  125. unsigned long va;
  126. struct vm_area_struct *vma;
  127. domid_t domain;
  128. };
  129. static int mmap_mfn_range(void *data, void *state)
  130. {
  131. struct privcmd_mmap_entry *msg = data;
  132. struct mmap_mfn_state *st = state;
  133. struct vm_area_struct *vma = st->vma;
  134. int rc;
  135. /* Do not allow range to wrap the address space. */
  136. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  137. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  138. return -EINVAL;
  139. /* Range chunks must be contiguous in va space. */
  140. if ((msg->va != st->va) ||
  141. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  142. return -EINVAL;
  143. rc = xen_remap_domain_mfn_range(vma,
  144. msg->va & PAGE_MASK,
  145. msg->mfn, msg->npages,
  146. vma->vm_page_prot,
  147. st->domain);
  148. if (rc < 0)
  149. return rc;
  150. st->va += msg->npages << PAGE_SHIFT;
  151. return 0;
  152. }
  153. static long privcmd_ioctl_mmap(void __user *udata)
  154. {
  155. struct privcmd_mmap mmapcmd;
  156. struct mm_struct *mm = current->mm;
  157. struct vm_area_struct *vma;
  158. int rc;
  159. LIST_HEAD(pagelist);
  160. struct mmap_mfn_state state;
  161. if (!xen_initial_domain())
  162. return -EPERM;
  163. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  164. return -EFAULT;
  165. rc = gather_array(&pagelist,
  166. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  167. mmapcmd.entry);
  168. if (rc || list_empty(&pagelist))
  169. goto out;
  170. down_write(&mm->mmap_sem);
  171. {
  172. struct page *page = list_first_entry(&pagelist,
  173. struct page, lru);
  174. struct privcmd_mmap_entry *msg = page_address(page);
  175. vma = find_vma(mm, msg->va);
  176. rc = -EINVAL;
  177. if (!vma || (msg->va != vma->vm_start) ||
  178. !privcmd_enforce_singleshot_mapping(vma))
  179. goto out_up;
  180. }
  181. state.va = vma->vm_start;
  182. state.vma = vma;
  183. state.domain = mmapcmd.dom;
  184. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  185. &pagelist,
  186. mmap_mfn_range, &state);
  187. out_up:
  188. up_write(&mm->mmap_sem);
  189. out:
  190. free_page_list(&pagelist);
  191. return rc;
  192. }
  193. struct mmap_batch_state {
  194. domid_t domain;
  195. unsigned long va;
  196. struct vm_area_struct *vma;
  197. /* A tristate:
  198. * 0 for no errors
  199. * 1 if at least one error has happened (and no
  200. * -ENOENT errors have happened)
  201. * -ENOENT if at least 1 -ENOENT has happened.
  202. */
  203. int global_error;
  204. /* An array for individual errors */
  205. int *err;
  206. /* User-space mfn array to store errors in the second pass for V1. */
  207. xen_pfn_t __user *user_mfn;
  208. };
  209. static int mmap_batch_fn(void *data, void *state)
  210. {
  211. xen_pfn_t *mfnp = data;
  212. struct mmap_batch_state *st = state;
  213. int ret;
  214. ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
  215. st->vma->vm_page_prot, st->domain);
  216. /* Store error code for second pass. */
  217. *(st->err++) = ret;
  218. /* And see if it affects the global_error. */
  219. if (ret < 0) {
  220. if (ret == -ENOENT)
  221. st->global_error = -ENOENT;
  222. else {
  223. /* Record that at least one error has happened. */
  224. if (st->global_error == 0)
  225. st->global_error = 1;
  226. }
  227. }
  228. st->va += PAGE_SIZE;
  229. return 0;
  230. }
  231. static int mmap_return_errors_v1(void *data, void *state)
  232. {
  233. xen_pfn_t *mfnp = data;
  234. struct mmap_batch_state *st = state;
  235. int err = *(st->err++);
  236. /*
  237. * V1 encodes the error codes in the 32bit top nibble of the
  238. * mfn (with its known limitations vis-a-vis 64 bit callers).
  239. */
  240. *mfnp |= (err == -ENOENT) ?
  241. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  242. PRIVCMD_MMAPBATCH_MFN_ERROR;
  243. return __put_user(*mfnp, st->user_mfn++);
  244. }
  245. static struct vm_operations_struct privcmd_vm_ops;
  246. static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
  247. {
  248. int ret;
  249. struct privcmd_mmapbatch_v2 m;
  250. struct mm_struct *mm = current->mm;
  251. struct vm_area_struct *vma;
  252. unsigned long nr_pages;
  253. LIST_HEAD(pagelist);
  254. int *err_array = NULL;
  255. struct mmap_batch_state state;
  256. if (!xen_initial_domain())
  257. return -EPERM;
  258. switch (version) {
  259. case 1:
  260. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  261. return -EFAULT;
  262. /* Returns per-frame error in m.arr. */
  263. m.err = NULL;
  264. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  265. return -EFAULT;
  266. break;
  267. case 2:
  268. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  269. return -EFAULT;
  270. /* Returns per-frame error code in m.err. */
  271. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  272. return -EFAULT;
  273. break;
  274. default:
  275. return -EINVAL;
  276. }
  277. nr_pages = m.num;
  278. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  279. return -EINVAL;
  280. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  281. if (ret)
  282. goto out;
  283. if (list_empty(&pagelist)) {
  284. ret = -EINVAL;
  285. goto out;
  286. }
  287. err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
  288. if (err_array == NULL) {
  289. ret = -ENOMEM;
  290. goto out;
  291. }
  292. down_write(&mm->mmap_sem);
  293. vma = find_vma(mm, m.addr);
  294. ret = -EINVAL;
  295. if (!vma ||
  296. vma->vm_ops != &privcmd_vm_ops ||
  297. (m.addr != vma->vm_start) ||
  298. ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
  299. !privcmd_enforce_singleshot_mapping(vma)) {
  300. up_write(&mm->mmap_sem);
  301. goto out;
  302. }
  303. state.domain = m.dom;
  304. state.vma = vma;
  305. state.va = m.addr;
  306. state.global_error = 0;
  307. state.err = err_array;
  308. /* mmap_batch_fn guarantees ret == 0 */
  309. BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
  310. &pagelist, mmap_batch_fn, &state));
  311. up_write(&mm->mmap_sem);
  312. if (state.global_error && (version == 1)) {
  313. /* Write back errors in second pass. */
  314. state.user_mfn = (xen_pfn_t *)m.arr;
  315. state.err = err_array;
  316. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  317. &pagelist, mmap_return_errors_v1, &state);
  318. } else if (version == 2) {
  319. ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
  320. if (ret)
  321. ret = -EFAULT;
  322. }
  323. /* If we have not had any EFAULT-like global errors then set the global
  324. * error to -ENOENT if necessary. */
  325. if ((ret == 0) && (state.global_error == -ENOENT))
  326. ret = -ENOENT;
  327. out:
  328. kfree(err_array);
  329. free_page_list(&pagelist);
  330. return ret;
  331. }
  332. static long privcmd_ioctl(struct file *file,
  333. unsigned int cmd, unsigned long data)
  334. {
  335. int ret = -ENOSYS;
  336. void __user *udata = (void __user *) data;
  337. switch (cmd) {
  338. case IOCTL_PRIVCMD_HYPERCALL:
  339. ret = privcmd_ioctl_hypercall(udata);
  340. break;
  341. case IOCTL_PRIVCMD_MMAP:
  342. ret = privcmd_ioctl_mmap(udata);
  343. break;
  344. case IOCTL_PRIVCMD_MMAPBATCH:
  345. ret = privcmd_ioctl_mmap_batch(udata, 1);
  346. break;
  347. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  348. ret = privcmd_ioctl_mmap_batch(udata, 2);
  349. break;
  350. default:
  351. ret = -EINVAL;
  352. break;
  353. }
  354. return ret;
  355. }
  356. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  357. {
  358. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  359. vma, vma->vm_start, vma->vm_end,
  360. vmf->pgoff, vmf->virtual_address);
  361. return VM_FAULT_SIGBUS;
  362. }
  363. static struct vm_operations_struct privcmd_vm_ops = {
  364. .fault = privcmd_fault
  365. };
  366. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  367. {
  368. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  369. * how to recreate these mappings */
  370. vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
  371. vma->vm_ops = &privcmd_vm_ops;
  372. vma->vm_private_data = NULL;
  373. return 0;
  374. }
  375. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
  376. {
  377. return (xchg(&vma->vm_private_data, (void *)1) == NULL);
  378. }
  379. const struct file_operations xen_privcmd_fops = {
  380. .owner = THIS_MODULE,
  381. .unlocked_ioctl = privcmd_ioctl,
  382. .mmap = privcmd_mmap,
  383. };
  384. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  385. static struct miscdevice privcmd_dev = {
  386. .minor = MISC_DYNAMIC_MINOR,
  387. .name = "xen/privcmd",
  388. .fops = &xen_privcmd_fops,
  389. };
  390. static int __init privcmd_init(void)
  391. {
  392. int err;
  393. if (!xen_domain())
  394. return -ENODEV;
  395. err = misc_register(&privcmd_dev);
  396. if (err != 0) {
  397. printk(KERN_ERR "Could not register Xen privcmd device\n");
  398. return err;
  399. }
  400. return 0;
  401. }
  402. static void __exit privcmd_exit(void)
  403. {
  404. misc_deregister(&privcmd_dev);
  405. }
  406. module_init(privcmd_init);
  407. module_exit(privcmd_exit);