privcmd.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/string.h>
  13. #include <linux/errno.h>
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/swap.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/miscdevice.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/tlb.h>
  25. #include <asm/xen/hypervisor.h>
  26. #include <asm/xen/hypercall.h>
  27. #include <xen/xen.h>
  28. #include <xen/privcmd.h>
  29. #include <xen/interface/xen.h>
  30. #include <xen/features.h>
  31. #include <xen/page.h>
  32. #include <xen/xen-ops.h>
  33. #include <xen/balloon.h>
  34. #include "privcmd.h"
  35. MODULE_LICENSE("GPL");
  36. #define PRIV_VMA_LOCKED ((void *)1)
  37. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  38. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
  39. #endif
  40. static long privcmd_ioctl_hypercall(void __user *udata)
  41. {
  42. struct privcmd_hypercall hypercall;
  43. long ret;
  44. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  45. return -EFAULT;
  46. ret = privcmd_call(hypercall.op,
  47. hypercall.arg[0], hypercall.arg[1],
  48. hypercall.arg[2], hypercall.arg[3],
  49. hypercall.arg[4]);
  50. return ret;
  51. }
  52. static void free_page_list(struct list_head *pages)
  53. {
  54. struct page *p, *n;
  55. list_for_each_entry_safe(p, n, pages, lru)
  56. __free_page(p);
  57. INIT_LIST_HEAD(pages);
  58. }
  59. /*
  60. * Given an array of items in userspace, return a list of pages
  61. * containing the data. If copying fails, either because of memory
  62. * allocation failure or a problem reading user memory, return an
  63. * error code; its up to the caller to dispose of any partial list.
  64. */
  65. static int gather_array(struct list_head *pagelist,
  66. unsigned nelem, size_t size,
  67. const void __user *data)
  68. {
  69. unsigned pageidx;
  70. void *pagedata;
  71. int ret;
  72. if (size > PAGE_SIZE)
  73. return 0;
  74. pageidx = PAGE_SIZE;
  75. pagedata = NULL; /* quiet, gcc */
  76. while (nelem--) {
  77. if (pageidx > PAGE_SIZE-size) {
  78. struct page *page = alloc_page(GFP_KERNEL);
  79. ret = -ENOMEM;
  80. if (page == NULL)
  81. goto fail;
  82. pagedata = page_address(page);
  83. list_add_tail(&page->lru, pagelist);
  84. pageidx = 0;
  85. }
  86. ret = -EFAULT;
  87. if (copy_from_user(pagedata + pageidx, data, size))
  88. goto fail;
  89. data += size;
  90. pageidx += size;
  91. }
  92. ret = 0;
  93. fail:
  94. return ret;
  95. }
  96. /*
  97. * Call function "fn" on each element of the array fragmented
  98. * over a list of pages.
  99. */
  100. static int traverse_pages(unsigned nelem, size_t size,
  101. struct list_head *pos,
  102. int (*fn)(void *data, void *state),
  103. void *state)
  104. {
  105. void *pagedata;
  106. unsigned pageidx;
  107. int ret = 0;
  108. BUG_ON(size > PAGE_SIZE);
  109. pageidx = PAGE_SIZE;
  110. pagedata = NULL; /* hush, gcc */
  111. while (nelem--) {
  112. if (pageidx > PAGE_SIZE-size) {
  113. struct page *page;
  114. pos = pos->next;
  115. page = list_entry(pos, struct page, lru);
  116. pagedata = page_address(page);
  117. pageidx = 0;
  118. }
  119. ret = (*fn)(pagedata + pageidx, state);
  120. if (ret)
  121. break;
  122. pageidx += size;
  123. }
  124. return ret;
  125. }
  126. struct mmap_mfn_state {
  127. unsigned long va;
  128. struct vm_area_struct *vma;
  129. domid_t domain;
  130. };
  131. static int mmap_mfn_range(void *data, void *state)
  132. {
  133. struct privcmd_mmap_entry *msg = data;
  134. struct mmap_mfn_state *st = state;
  135. struct vm_area_struct *vma = st->vma;
  136. int rc;
  137. /* Do not allow range to wrap the address space. */
  138. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  139. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  140. return -EINVAL;
  141. /* Range chunks must be contiguous in va space. */
  142. if ((msg->va != st->va) ||
  143. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  144. return -EINVAL;
  145. rc = xen_remap_domain_mfn_range(vma,
  146. msg->va & PAGE_MASK,
  147. msg->mfn, msg->npages,
  148. vma->vm_page_prot,
  149. st->domain, NULL);
  150. if (rc < 0)
  151. return rc;
  152. st->va += msg->npages << PAGE_SHIFT;
  153. return 0;
  154. }
  155. static long privcmd_ioctl_mmap(void __user *udata)
  156. {
  157. struct privcmd_mmap mmapcmd;
  158. struct mm_struct *mm = current->mm;
  159. struct vm_area_struct *vma;
  160. int rc;
  161. LIST_HEAD(pagelist);
  162. struct mmap_mfn_state state;
  163. /* We only support privcmd_ioctl_mmap_batch for auto translated. */
  164. if (xen_feature(XENFEAT_auto_translated_physmap))
  165. return -ENOSYS;
  166. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  167. return -EFAULT;
  168. rc = gather_array(&pagelist,
  169. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  170. mmapcmd.entry);
  171. if (rc || list_empty(&pagelist))
  172. goto out;
  173. down_write(&mm->mmap_sem);
  174. {
  175. struct page *page = list_first_entry(&pagelist,
  176. struct page, lru);
  177. struct privcmd_mmap_entry *msg = page_address(page);
  178. vma = find_vma(mm, msg->va);
  179. rc = -EINVAL;
  180. if (!vma || (msg->va != vma->vm_start) ||
  181. !privcmd_enforce_singleshot_mapping(vma))
  182. goto out_up;
  183. }
  184. state.va = vma->vm_start;
  185. state.vma = vma;
  186. state.domain = mmapcmd.dom;
  187. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  188. &pagelist,
  189. mmap_mfn_range, &state);
  190. out_up:
  191. up_write(&mm->mmap_sem);
  192. out:
  193. free_page_list(&pagelist);
  194. return rc;
  195. }
  196. struct mmap_batch_state {
  197. domid_t domain;
  198. unsigned long va;
  199. struct vm_area_struct *vma;
  200. int index;
  201. /* A tristate:
  202. * 0 for no errors
  203. * 1 if at least one error has happened (and no
  204. * -ENOENT errors have happened)
  205. * -ENOENT if at least 1 -ENOENT has happened.
  206. */
  207. int global_error;
  208. int version;
  209. /* User-space mfn array to store errors in the second pass for V1. */
  210. xen_pfn_t __user *user_mfn;
  211. /* User-space int array to store errors in the second pass for V2. */
  212. int __user *user_err;
  213. };
  214. /* auto translated dom0 note: if domU being created is PV, then mfn is
  215. * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
  216. */
  217. static int mmap_batch_fn(void *data, void *state)
  218. {
  219. xen_pfn_t *mfnp = data;
  220. struct mmap_batch_state *st = state;
  221. struct vm_area_struct *vma = st->vma;
  222. struct page **pages = vma->vm_private_data;
  223. struct page *cur_page = NULL;
  224. int ret;
  225. if (xen_feature(XENFEAT_auto_translated_physmap))
  226. cur_page = pages[st->index++];
  227. ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
  228. st->vma->vm_page_prot, st->domain,
  229. &cur_page);
  230. /* Store error code for second pass. */
  231. if (st->version == 1) {
  232. if (ret < 0) {
  233. /*
  234. * V1 encodes the error codes in the 32bit top nibble of the
  235. * mfn (with its known limitations vis-a-vis 64 bit callers).
  236. */
  237. *mfnp |= (ret == -ENOENT) ?
  238. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  239. PRIVCMD_MMAPBATCH_MFN_ERROR;
  240. }
  241. } else { /* st->version == 2 */
  242. *((int *) mfnp) = ret;
  243. }
  244. /* And see if it affects the global_error. */
  245. if (ret < 0) {
  246. if (ret == -ENOENT)
  247. st->global_error = -ENOENT;
  248. else {
  249. /* Record that at least one error has happened. */
  250. if (st->global_error == 0)
  251. st->global_error = 1;
  252. }
  253. }
  254. st->va += PAGE_SIZE;
  255. return 0;
  256. }
  257. static int mmap_return_errors(void *data, void *state)
  258. {
  259. struct mmap_batch_state *st = state;
  260. if (st->version == 1) {
  261. xen_pfn_t mfnp = *((xen_pfn_t *) data);
  262. if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
  263. return __put_user(mfnp, st->user_mfn++);
  264. else
  265. st->user_mfn++;
  266. } else { /* st->version == 2 */
  267. int err = *((int *) data);
  268. if (err)
  269. return __put_user(err, st->user_err++);
  270. else
  271. st->user_err++;
  272. }
  273. return 0;
  274. }
  275. /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
  276. * the vma with the page info to use later.
  277. * Returns: 0 if success, otherwise -errno
  278. */
  279. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  280. {
  281. int rc;
  282. struct page **pages;
  283. pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  284. if (pages == NULL)
  285. return -ENOMEM;
  286. rc = alloc_xenballooned_pages(numpgs, pages, 0);
  287. if (rc != 0) {
  288. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  289. numpgs, rc);
  290. kfree(pages);
  291. return -ENOMEM;
  292. }
  293. BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
  294. vma->vm_private_data = pages;
  295. return 0;
  296. }
  297. static struct vm_operations_struct privcmd_vm_ops;
  298. static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
  299. {
  300. int ret;
  301. struct privcmd_mmapbatch_v2 m;
  302. struct mm_struct *mm = current->mm;
  303. struct vm_area_struct *vma;
  304. unsigned long nr_pages;
  305. LIST_HEAD(pagelist);
  306. struct mmap_batch_state state;
  307. switch (version) {
  308. case 1:
  309. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  310. return -EFAULT;
  311. /* Returns per-frame error in m.arr. */
  312. m.err = NULL;
  313. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  314. return -EFAULT;
  315. break;
  316. case 2:
  317. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  318. return -EFAULT;
  319. /* Returns per-frame error code in m.err. */
  320. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  321. return -EFAULT;
  322. break;
  323. default:
  324. return -EINVAL;
  325. }
  326. nr_pages = m.num;
  327. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  328. return -EINVAL;
  329. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  330. if (ret)
  331. goto out;
  332. if (list_empty(&pagelist)) {
  333. ret = -EINVAL;
  334. goto out;
  335. }
  336. if (version == 2) {
  337. /* Zero error array now to only copy back actual errors. */
  338. if (clear_user(m.err, sizeof(int) * m.num)) {
  339. ret = -EFAULT;
  340. goto out;
  341. }
  342. }
  343. down_write(&mm->mmap_sem);
  344. vma = find_vma(mm, m.addr);
  345. if (!vma ||
  346. vma->vm_ops != &privcmd_vm_ops ||
  347. (m.addr != vma->vm_start) ||
  348. ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
  349. !privcmd_enforce_singleshot_mapping(vma)) {
  350. up_write(&mm->mmap_sem);
  351. ret = -EINVAL;
  352. goto out;
  353. }
  354. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  355. ret = alloc_empty_pages(vma, m.num);
  356. if (ret < 0) {
  357. up_write(&mm->mmap_sem);
  358. goto out;
  359. }
  360. }
  361. state.domain = m.dom;
  362. state.vma = vma;
  363. state.va = m.addr;
  364. state.index = 0;
  365. state.global_error = 0;
  366. state.version = version;
  367. /* mmap_batch_fn guarantees ret == 0 */
  368. BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
  369. &pagelist, mmap_batch_fn, &state));
  370. up_write(&mm->mmap_sem);
  371. if (state.global_error) {
  372. /* Write back errors in second pass. */
  373. state.user_mfn = (xen_pfn_t *)m.arr;
  374. state.user_err = m.err;
  375. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  376. &pagelist, mmap_return_errors, &state);
  377. } else
  378. ret = 0;
  379. /* If we have not had any EFAULT-like global errors then set the global
  380. * error to -ENOENT if necessary. */
  381. if ((ret == 0) && (state.global_error == -ENOENT))
  382. ret = -ENOENT;
  383. out:
  384. free_page_list(&pagelist);
  385. return ret;
  386. }
  387. static long privcmd_ioctl(struct file *file,
  388. unsigned int cmd, unsigned long data)
  389. {
  390. int ret = -ENOSYS;
  391. void __user *udata = (void __user *) data;
  392. switch (cmd) {
  393. case IOCTL_PRIVCMD_HYPERCALL:
  394. ret = privcmd_ioctl_hypercall(udata);
  395. break;
  396. case IOCTL_PRIVCMD_MMAP:
  397. ret = privcmd_ioctl_mmap(udata);
  398. break;
  399. case IOCTL_PRIVCMD_MMAPBATCH:
  400. ret = privcmd_ioctl_mmap_batch(udata, 1);
  401. break;
  402. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  403. ret = privcmd_ioctl_mmap_batch(udata, 2);
  404. break;
  405. default:
  406. ret = -EINVAL;
  407. break;
  408. }
  409. return ret;
  410. }
  411. static void privcmd_close(struct vm_area_struct *vma)
  412. {
  413. struct page **pages = vma->vm_private_data;
  414. int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  415. if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
  416. return;
  417. xen_unmap_domain_mfn_range(vma, numpgs, pages);
  418. free_xenballooned_pages(numpgs, pages);
  419. kfree(pages);
  420. }
  421. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  422. {
  423. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  424. vma, vma->vm_start, vma->vm_end,
  425. vmf->pgoff, vmf->virtual_address);
  426. return VM_FAULT_SIGBUS;
  427. }
  428. static struct vm_operations_struct privcmd_vm_ops = {
  429. .close = privcmd_close,
  430. .fault = privcmd_fault
  431. };
  432. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  433. {
  434. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  435. * how to recreate these mappings */
  436. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
  437. VM_DONTEXPAND | VM_DONTDUMP;
  438. vma->vm_ops = &privcmd_vm_ops;
  439. vma->vm_private_data = NULL;
  440. return 0;
  441. }
  442. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
  443. {
  444. return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
  445. }
  446. const struct file_operations xen_privcmd_fops = {
  447. .owner = THIS_MODULE,
  448. .unlocked_ioctl = privcmd_ioctl,
  449. .mmap = privcmd_mmap,
  450. };
  451. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  452. static struct miscdevice privcmd_dev = {
  453. .minor = MISC_DYNAMIC_MINOR,
  454. .name = "xen/privcmd",
  455. .fops = &xen_privcmd_fops,
  456. };
  457. static int __init privcmd_init(void)
  458. {
  459. int err;
  460. if (!xen_domain())
  461. return -ENODEV;
  462. err = misc_register(&privcmd_dev);
  463. if (err != 0) {
  464. printk(KERN_ERR "Could not register Xen privcmd device\n");
  465. return err;
  466. }
  467. return 0;
  468. }
  469. static void __exit privcmd_exit(void)
  470. {
  471. misc_deregister(&privcmd_dev);
  472. }
  473. module_init(privcmd_init);
  474. module_exit(privcmd_exit);