privcmd.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/mm.h>
  16. #include <linux/mman.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/swap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/miscdevice.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/tlb.h>
  26. #include <asm/xen/hypervisor.h>
  27. #include <asm/xen/hypercall.h>
  28. #include <xen/xen.h>
  29. #include <xen/privcmd.h>
  30. #include <xen/interface/xen.h>
  31. #include <xen/features.h>
  32. #include <xen/page.h>
  33. #include <xen/xen-ops.h>
  34. #include <xen/balloon.h>
  35. #include "privcmd.h"
  36. MODULE_LICENSE("GPL");
  37. #define PRIV_VMA_LOCKED ((void *)1)
  38. static int privcmd_vma_range_is_mapped(
  39. struct vm_area_struct *vma,
  40. unsigned long addr,
  41. unsigned long nr_pages);
  42. static long privcmd_ioctl_hypercall(void __user *udata)
  43. {
  44. struct privcmd_hypercall hypercall;
  45. long ret;
  46. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  47. return -EFAULT;
  48. ret = privcmd_call(hypercall.op,
  49. hypercall.arg[0], hypercall.arg[1],
  50. hypercall.arg[2], hypercall.arg[3],
  51. hypercall.arg[4]);
  52. return ret;
  53. }
  54. static void free_page_list(struct list_head *pages)
  55. {
  56. struct page *p, *n;
  57. list_for_each_entry_safe(p, n, pages, lru)
  58. __free_page(p);
  59. INIT_LIST_HEAD(pages);
  60. }
  61. /*
  62. * Given an array of items in userspace, return a list of pages
  63. * containing the data. If copying fails, either because of memory
  64. * allocation failure or a problem reading user memory, return an
  65. * error code; its up to the caller to dispose of any partial list.
  66. */
  67. static int gather_array(struct list_head *pagelist,
  68. unsigned nelem, size_t size,
  69. const void __user *data)
  70. {
  71. unsigned pageidx;
  72. void *pagedata;
  73. int ret;
  74. if (size > PAGE_SIZE)
  75. return 0;
  76. pageidx = PAGE_SIZE;
  77. pagedata = NULL; /* quiet, gcc */
  78. while (nelem--) {
  79. if (pageidx > PAGE_SIZE-size) {
  80. struct page *page = alloc_page(GFP_KERNEL);
  81. ret = -ENOMEM;
  82. if (page == NULL)
  83. goto fail;
  84. pagedata = page_address(page);
  85. list_add_tail(&page->lru, pagelist);
  86. pageidx = 0;
  87. }
  88. ret = -EFAULT;
  89. if (copy_from_user(pagedata + pageidx, data, size))
  90. goto fail;
  91. data += size;
  92. pageidx += size;
  93. }
  94. ret = 0;
  95. fail:
  96. return ret;
  97. }
  98. /*
  99. * Call function "fn" on each element of the array fragmented
  100. * over a list of pages.
  101. */
  102. static int traverse_pages(unsigned nelem, size_t size,
  103. struct list_head *pos,
  104. int (*fn)(void *data, void *state),
  105. void *state)
  106. {
  107. void *pagedata;
  108. unsigned pageidx;
  109. int ret = 0;
  110. BUG_ON(size > PAGE_SIZE);
  111. pageidx = PAGE_SIZE;
  112. pagedata = NULL; /* hush, gcc */
  113. while (nelem--) {
  114. if (pageidx > PAGE_SIZE-size) {
  115. struct page *page;
  116. pos = pos->next;
  117. page = list_entry(pos, struct page, lru);
  118. pagedata = page_address(page);
  119. pageidx = 0;
  120. }
  121. ret = (*fn)(pagedata + pageidx, state);
  122. if (ret)
  123. break;
  124. pageidx += size;
  125. }
  126. return ret;
  127. }
  128. struct mmap_mfn_state {
  129. unsigned long va;
  130. struct vm_area_struct *vma;
  131. domid_t domain;
  132. };
  133. static int mmap_mfn_range(void *data, void *state)
  134. {
  135. struct privcmd_mmap_entry *msg = data;
  136. struct mmap_mfn_state *st = state;
  137. struct vm_area_struct *vma = st->vma;
  138. int rc;
  139. /* Do not allow range to wrap the address space. */
  140. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  141. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  142. return -EINVAL;
  143. /* Range chunks must be contiguous in va space. */
  144. if ((msg->va != st->va) ||
  145. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  146. return -EINVAL;
  147. rc = xen_remap_domain_mfn_range(vma,
  148. msg->va & PAGE_MASK,
  149. msg->mfn, msg->npages,
  150. vma->vm_page_prot,
  151. st->domain, NULL);
  152. if (rc < 0)
  153. return rc;
  154. st->va += msg->npages << PAGE_SHIFT;
  155. return 0;
  156. }
  157. static long privcmd_ioctl_mmap(void __user *udata)
  158. {
  159. struct privcmd_mmap mmapcmd;
  160. struct mm_struct *mm = current->mm;
  161. struct vm_area_struct *vma;
  162. int rc;
  163. LIST_HEAD(pagelist);
  164. struct mmap_mfn_state state;
  165. /* We only support privcmd_ioctl_mmap_batch for auto translated. */
  166. if (xen_feature(XENFEAT_auto_translated_physmap))
  167. return -ENOSYS;
  168. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  169. return -EFAULT;
  170. rc = gather_array(&pagelist,
  171. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  172. mmapcmd.entry);
  173. if (rc || list_empty(&pagelist))
  174. goto out;
  175. down_write(&mm->mmap_sem);
  176. {
  177. struct page *page = list_first_entry(&pagelist,
  178. struct page, lru);
  179. struct privcmd_mmap_entry *msg = page_address(page);
  180. vma = find_vma(mm, msg->va);
  181. rc = -EINVAL;
  182. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  183. goto out_up;
  184. vma->vm_private_data = PRIV_VMA_LOCKED;
  185. }
  186. state.va = vma->vm_start;
  187. state.vma = vma;
  188. state.domain = mmapcmd.dom;
  189. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  190. &pagelist,
  191. mmap_mfn_range, &state);
  192. out_up:
  193. up_write(&mm->mmap_sem);
  194. out:
  195. free_page_list(&pagelist);
  196. return rc;
  197. }
  198. struct mmap_batch_state {
  199. domid_t domain;
  200. unsigned long va;
  201. struct vm_area_struct *vma;
  202. int index;
  203. /* A tristate:
  204. * 0 for no errors
  205. * 1 if at least one error has happened (and no
  206. * -ENOENT errors have happened)
  207. * -ENOENT if at least 1 -ENOENT has happened.
  208. */
  209. int global_error;
  210. int version;
  211. /* User-space mfn array to store errors in the second pass for V1. */
  212. xen_pfn_t __user *user_mfn;
  213. /* User-space int array to store errors in the second pass for V2. */
  214. int __user *user_err;
  215. };
  216. /* auto translated dom0 note: if domU being created is PV, then mfn is
  217. * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
  218. */
  219. static int mmap_batch_fn(void *data, void *state)
  220. {
  221. xen_pfn_t *mfnp = data;
  222. struct mmap_batch_state *st = state;
  223. struct vm_area_struct *vma = st->vma;
  224. struct page **pages = vma->vm_private_data;
  225. struct page *cur_page = NULL;
  226. int ret;
  227. if (xen_feature(XENFEAT_auto_translated_physmap))
  228. cur_page = pages[st->index++];
  229. ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
  230. st->vma->vm_page_prot, st->domain,
  231. &cur_page);
  232. /* Store error code for second pass. */
  233. if (st->version == 1) {
  234. if (ret < 0) {
  235. /*
  236. * V1 encodes the error codes in the 32bit top nibble of the
  237. * mfn (with its known limitations vis-a-vis 64 bit callers).
  238. */
  239. *mfnp |= (ret == -ENOENT) ?
  240. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  241. PRIVCMD_MMAPBATCH_MFN_ERROR;
  242. }
  243. } else { /* st->version == 2 */
  244. *((int *) mfnp) = ret;
  245. }
  246. /* And see if it affects the global_error. */
  247. if (ret < 0) {
  248. if (ret == -ENOENT)
  249. st->global_error = -ENOENT;
  250. else {
  251. /* Record that at least one error has happened. */
  252. if (st->global_error == 0)
  253. st->global_error = 1;
  254. }
  255. }
  256. st->va += PAGE_SIZE;
  257. return 0;
  258. }
  259. static int mmap_return_errors(void *data, void *state)
  260. {
  261. struct mmap_batch_state *st = state;
  262. if (st->version == 1) {
  263. xen_pfn_t mfnp = *((xen_pfn_t *) data);
  264. if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
  265. return __put_user(mfnp, st->user_mfn++);
  266. else
  267. st->user_mfn++;
  268. } else { /* st->version == 2 */
  269. int err = *((int *) data);
  270. if (err)
  271. return __put_user(err, st->user_err++);
  272. else
  273. st->user_err++;
  274. }
  275. return 0;
  276. }
  277. /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
  278. * the vma with the page info to use later.
  279. * Returns: 0 if success, otherwise -errno
  280. */
  281. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  282. {
  283. int rc;
  284. struct page **pages;
  285. pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  286. if (pages == NULL)
  287. return -ENOMEM;
  288. rc = alloc_xenballooned_pages(numpgs, pages, 0);
  289. if (rc != 0) {
  290. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  291. numpgs, rc);
  292. kfree(pages);
  293. return -ENOMEM;
  294. }
  295. BUG_ON(vma->vm_private_data != NULL);
  296. vma->vm_private_data = pages;
  297. return 0;
  298. }
  299. static struct vm_operations_struct privcmd_vm_ops;
  300. static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
  301. {
  302. int ret;
  303. struct privcmd_mmapbatch_v2 m;
  304. struct mm_struct *mm = current->mm;
  305. struct vm_area_struct *vma;
  306. unsigned long nr_pages;
  307. LIST_HEAD(pagelist);
  308. struct mmap_batch_state state;
  309. switch (version) {
  310. case 1:
  311. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  312. return -EFAULT;
  313. /* Returns per-frame error in m.arr. */
  314. m.err = NULL;
  315. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  316. return -EFAULT;
  317. break;
  318. case 2:
  319. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  320. return -EFAULT;
  321. /* Returns per-frame error code in m.err. */
  322. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  323. return -EFAULT;
  324. break;
  325. default:
  326. return -EINVAL;
  327. }
  328. nr_pages = m.num;
  329. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  330. return -EINVAL;
  331. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  332. if (ret)
  333. goto out;
  334. if (list_empty(&pagelist)) {
  335. ret = -EINVAL;
  336. goto out;
  337. }
  338. if (version == 2) {
  339. /* Zero error array now to only copy back actual errors. */
  340. if (clear_user(m.err, sizeof(int) * m.num)) {
  341. ret = -EFAULT;
  342. goto out;
  343. }
  344. }
  345. down_write(&mm->mmap_sem);
  346. vma = find_vma(mm, m.addr);
  347. if (!vma ||
  348. vma->vm_ops != &privcmd_vm_ops) {
  349. ret = -EINVAL;
  350. goto out_unlock;
  351. }
  352. /*
  353. * Caller must either:
  354. *
  355. * Map the whole VMA range, which will also allocate all the
  356. * pages required for the auto_translated_physmap case.
  357. *
  358. * Or
  359. *
  360. * Map unmapped holes left from a previous map attempt (e.g.,
  361. * because those foreign frames were previously paged out).
  362. */
  363. if (vma->vm_private_data == NULL) {
  364. if (m.addr != vma->vm_start ||
  365. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  366. ret = -EINVAL;
  367. goto out_unlock;
  368. }
  369. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  370. ret = alloc_empty_pages(vma, m.num);
  371. if (ret < 0)
  372. goto out_unlock;
  373. } else
  374. vma->vm_private_data = PRIV_VMA_LOCKED;
  375. } else {
  376. if (m.addr < vma->vm_start ||
  377. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  378. ret = -EINVAL;
  379. goto out_unlock;
  380. }
  381. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  382. ret = -EINVAL;
  383. goto out_unlock;
  384. }
  385. }
  386. state.domain = m.dom;
  387. state.vma = vma;
  388. state.va = m.addr;
  389. state.index = 0;
  390. state.global_error = 0;
  391. state.version = version;
  392. /* mmap_batch_fn guarantees ret == 0 */
  393. BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
  394. &pagelist, mmap_batch_fn, &state));
  395. up_write(&mm->mmap_sem);
  396. if (state.global_error) {
  397. /* Write back errors in second pass. */
  398. state.user_mfn = (xen_pfn_t *)m.arr;
  399. state.user_err = m.err;
  400. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  401. &pagelist, mmap_return_errors, &state);
  402. } else
  403. ret = 0;
  404. /* If we have not had any EFAULT-like global errors then set the global
  405. * error to -ENOENT if necessary. */
  406. if ((ret == 0) && (state.global_error == -ENOENT))
  407. ret = -ENOENT;
  408. out:
  409. free_page_list(&pagelist);
  410. return ret;
  411. out_unlock:
  412. up_write(&mm->mmap_sem);
  413. goto out;
  414. }
  415. static long privcmd_ioctl(struct file *file,
  416. unsigned int cmd, unsigned long data)
  417. {
  418. int ret = -ENOSYS;
  419. void __user *udata = (void __user *) data;
  420. switch (cmd) {
  421. case IOCTL_PRIVCMD_HYPERCALL:
  422. ret = privcmd_ioctl_hypercall(udata);
  423. break;
  424. case IOCTL_PRIVCMD_MMAP:
  425. ret = privcmd_ioctl_mmap(udata);
  426. break;
  427. case IOCTL_PRIVCMD_MMAPBATCH:
  428. ret = privcmd_ioctl_mmap_batch(udata, 1);
  429. break;
  430. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  431. ret = privcmd_ioctl_mmap_batch(udata, 2);
  432. break;
  433. default:
  434. ret = -EINVAL;
  435. break;
  436. }
  437. return ret;
  438. }
  439. static void privcmd_close(struct vm_area_struct *vma)
  440. {
  441. struct page **pages = vma->vm_private_data;
  442. int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  443. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  444. return;
  445. xen_unmap_domain_mfn_range(vma, numpgs, pages);
  446. free_xenballooned_pages(numpgs, pages);
  447. kfree(pages);
  448. }
  449. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  450. {
  451. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  452. vma, vma->vm_start, vma->vm_end,
  453. vmf->pgoff, vmf->virtual_address);
  454. return VM_FAULT_SIGBUS;
  455. }
  456. static struct vm_operations_struct privcmd_vm_ops = {
  457. .close = privcmd_close,
  458. .fault = privcmd_fault
  459. };
  460. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  461. {
  462. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  463. * how to recreate these mappings */
  464. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
  465. VM_DONTEXPAND | VM_DONTDUMP;
  466. vma->vm_ops = &privcmd_vm_ops;
  467. vma->vm_private_data = NULL;
  468. return 0;
  469. }
  470. /*
  471. * For MMAPBATCH*. This allows asserting the singleshot mapping
  472. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  473. * can be then retried until success.
  474. */
  475. static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
  476. unsigned long addr, void *data)
  477. {
  478. return pte_none(*pte) ? 0 : -EBUSY;
  479. }
  480. static int privcmd_vma_range_is_mapped(
  481. struct vm_area_struct *vma,
  482. unsigned long addr,
  483. unsigned long nr_pages)
  484. {
  485. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  486. is_mapped_fn, NULL) != 0;
  487. }
  488. const struct file_operations xen_privcmd_fops = {
  489. .owner = THIS_MODULE,
  490. .unlocked_ioctl = privcmd_ioctl,
  491. .mmap = privcmd_mmap,
  492. };
  493. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  494. static struct miscdevice privcmd_dev = {
  495. .minor = MISC_DYNAMIC_MINOR,
  496. .name = "xen/privcmd",
  497. .fops = &xen_privcmd_fops,
  498. };
  499. static int __init privcmd_init(void)
  500. {
  501. int err;
  502. if (!xen_domain())
  503. return -ENODEV;
  504. err = misc_register(&privcmd_dev);
  505. if (err != 0) {
  506. pr_err("Could not register Xen privcmd device\n");
  507. return err;
  508. }
  509. return 0;
  510. }
  511. static void __exit privcmd_exit(void)
  512. {
  513. misc_deregister(&privcmd_dev);
  514. }
  515. module_init(privcmd_init);
  516. module_exit(privcmd_exit);