privcmd.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/string.h>
  12. #include <linux/errno.h>
  13. #include <linux/mm.h>
  14. #include <linux/mman.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/swap.h>
  17. #include <linux/smp_lock.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/seq_file.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/tlb.h>
  24. #include <asm/xen/hypervisor.h>
  25. #include <asm/xen/hypercall.h>
  26. #include <xen/xen.h>
  27. #include <xen/privcmd.h>
  28. #include <xen/interface/xen.h>
  29. #include <xen/features.h>
  30. #include <xen/page.h>
  31. #define REMAP_BATCH_SIZE 16
  32. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  33. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
  34. #endif
  35. struct remap_data {
  36. unsigned long mfn;
  37. pgprot_t prot;
  38. struct mmu_update *mmu_update;
  39. };
  40. static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
  41. unsigned long addr, void *data)
  42. {
  43. struct remap_data *rmd = data;
  44. pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
  45. rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
  46. rmd->mmu_update->val = pte_val_ma(pte);
  47. rmd->mmu_update++;
  48. return 0;
  49. }
  50. static int remap_domain_mfn_range(struct vm_area_struct *vma,
  51. unsigned long addr,
  52. unsigned long mfn, int nr,
  53. pgprot_t prot, unsigned domid)
  54. {
  55. struct remap_data rmd;
  56. struct mmu_update mmu_update[REMAP_BATCH_SIZE];
  57. int batch;
  58. unsigned long range;
  59. int err = 0;
  60. prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
  61. vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
  62. rmd.mfn = mfn;
  63. rmd.prot = prot;
  64. while (nr) {
  65. batch = min(REMAP_BATCH_SIZE, nr);
  66. range = (unsigned long)batch << PAGE_SHIFT;
  67. rmd.mmu_update = mmu_update;
  68. err = apply_to_page_range(vma->vm_mm, addr, range,
  69. remap_area_mfn_pte_fn, &rmd);
  70. if (err)
  71. goto out;
  72. err = -EFAULT;
  73. if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
  74. goto out;
  75. nr -= batch;
  76. addr += range;
  77. }
  78. err = 0;
  79. out:
  80. flush_tlb_all();
  81. return err;
  82. }
  83. static long privcmd_ioctl_hypercall(void __user *udata)
  84. {
  85. struct privcmd_hypercall hypercall;
  86. long ret;
  87. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  88. return -EFAULT;
  89. ret = privcmd_call(hypercall.op,
  90. hypercall.arg[0], hypercall.arg[1],
  91. hypercall.arg[2], hypercall.arg[3],
  92. hypercall.arg[4]);
  93. return ret;
  94. }
  95. static void free_page_list(struct list_head *pages)
  96. {
  97. struct page *p, *n;
  98. list_for_each_entry_safe(p, n, pages, lru)
  99. __free_page(p);
  100. INIT_LIST_HEAD(pages);
  101. }
  102. /*
  103. * Given an array of items in userspace, return a list of pages
  104. * containing the data. If copying fails, either because of memory
  105. * allocation failure or a problem reading user memory, return an
  106. * error code; its up to the caller to dispose of any partial list.
  107. */
  108. static int gather_array(struct list_head *pagelist,
  109. unsigned nelem, size_t size,
  110. void __user *data)
  111. {
  112. unsigned pageidx;
  113. void *pagedata;
  114. int ret;
  115. if (size > PAGE_SIZE)
  116. return 0;
  117. pageidx = PAGE_SIZE;
  118. pagedata = NULL; /* quiet, gcc */
  119. while (nelem--) {
  120. if (pageidx > PAGE_SIZE-size) {
  121. struct page *page = alloc_page(GFP_KERNEL);
  122. ret = -ENOMEM;
  123. if (page == NULL)
  124. goto fail;
  125. pagedata = page_address(page);
  126. list_add_tail(&page->lru, pagelist);
  127. pageidx = 0;
  128. }
  129. ret = -EFAULT;
  130. if (copy_from_user(pagedata + pageidx, data, size))
  131. goto fail;
  132. data += size;
  133. pageidx += size;
  134. }
  135. ret = 0;
  136. fail:
  137. return ret;
  138. }
  139. /*
  140. * Call function "fn" on each element of the array fragmented
  141. * over a list of pages.
  142. */
  143. static int traverse_pages(unsigned nelem, size_t size,
  144. struct list_head *pos,
  145. int (*fn)(void *data, void *state),
  146. void *state)
  147. {
  148. void *pagedata;
  149. unsigned pageidx;
  150. int ret = 0;
  151. BUG_ON(size > PAGE_SIZE);
  152. pageidx = PAGE_SIZE;
  153. pagedata = NULL; /* hush, gcc */
  154. while (nelem--) {
  155. if (pageidx > PAGE_SIZE-size) {
  156. struct page *page;
  157. pos = pos->next;
  158. page = list_entry(pos, struct page, lru);
  159. pagedata = page_address(page);
  160. pageidx = 0;
  161. }
  162. ret = (*fn)(pagedata + pageidx, state);
  163. if (ret)
  164. break;
  165. pageidx += size;
  166. }
  167. return ret;
  168. }
  169. struct mmap_mfn_state {
  170. unsigned long va;
  171. struct vm_area_struct *vma;
  172. domid_t domain;
  173. };
  174. static int mmap_mfn_range(void *data, void *state)
  175. {
  176. struct privcmd_mmap_entry *msg = data;
  177. struct mmap_mfn_state *st = state;
  178. struct vm_area_struct *vma = st->vma;
  179. int rc;
  180. /* Do not allow range to wrap the address space. */
  181. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  182. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  183. return -EINVAL;
  184. /* Range chunks must be contiguous in va space. */
  185. if ((msg->va != st->va) ||
  186. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  187. return -EINVAL;
  188. rc = remap_domain_mfn_range(vma,
  189. msg->va & PAGE_MASK,
  190. msg->mfn, msg->npages,
  191. vma->vm_page_prot,
  192. st->domain);
  193. if (rc < 0)
  194. return rc;
  195. st->va += msg->npages << PAGE_SHIFT;
  196. return 0;
  197. }
  198. static long privcmd_ioctl_mmap(void __user *udata)
  199. {
  200. struct privcmd_mmap mmapcmd;
  201. struct mm_struct *mm = current->mm;
  202. struct vm_area_struct *vma;
  203. int rc;
  204. LIST_HEAD(pagelist);
  205. struct mmap_mfn_state state;
  206. if (!xen_initial_domain())
  207. return -EPERM;
  208. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  209. return -EFAULT;
  210. rc = gather_array(&pagelist,
  211. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  212. mmapcmd.entry);
  213. if (rc || list_empty(&pagelist))
  214. goto out;
  215. down_write(&mm->mmap_sem);
  216. {
  217. struct page *page = list_first_entry(&pagelist,
  218. struct page, lru);
  219. struct privcmd_mmap_entry *msg = page_address(page);
  220. vma = find_vma(mm, msg->va);
  221. rc = -EINVAL;
  222. if (!vma || (msg->va != vma->vm_start) ||
  223. !privcmd_enforce_singleshot_mapping(vma))
  224. goto out_up;
  225. }
  226. state.va = vma->vm_start;
  227. state.vma = vma;
  228. state.domain = mmapcmd.dom;
  229. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  230. &pagelist,
  231. mmap_mfn_range, &state);
  232. out_up:
  233. up_write(&mm->mmap_sem);
  234. out:
  235. free_page_list(&pagelist);
  236. return rc;
  237. }
  238. struct mmap_batch_state {
  239. domid_t domain;
  240. unsigned long va;
  241. struct vm_area_struct *vma;
  242. int err;
  243. xen_pfn_t __user *user;
  244. };
  245. static int mmap_batch_fn(void *data, void *state)
  246. {
  247. xen_pfn_t *mfnp = data;
  248. struct mmap_batch_state *st = state;
  249. if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK,
  250. *mfnp, 1,
  251. st->vma->vm_page_prot, st->domain) < 0) {
  252. *mfnp |= 0xf0000000U;
  253. st->err++;
  254. }
  255. st->va += PAGE_SIZE;
  256. return 0;
  257. }
  258. static int mmap_return_errors(void *data, void *state)
  259. {
  260. xen_pfn_t *mfnp = data;
  261. struct mmap_batch_state *st = state;
  262. put_user(*mfnp, st->user++);
  263. return 0;
  264. }
  265. static struct vm_operations_struct privcmd_vm_ops;
  266. static long privcmd_ioctl_mmap_batch(void __user *udata)
  267. {
  268. int ret;
  269. struct privcmd_mmapbatch m;
  270. struct mm_struct *mm = current->mm;
  271. struct vm_area_struct *vma;
  272. unsigned long nr_pages;
  273. LIST_HEAD(pagelist);
  274. struct mmap_batch_state state;
  275. if (!xen_initial_domain())
  276. return -EPERM;
  277. if (copy_from_user(&m, udata, sizeof(m)))
  278. return -EFAULT;
  279. nr_pages = m.num;
  280. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  281. return -EINVAL;
  282. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
  283. m.arr);
  284. if (ret || list_empty(&pagelist))
  285. goto out;
  286. down_write(&mm->mmap_sem);
  287. vma = find_vma(mm, m.addr);
  288. ret = -EINVAL;
  289. if (!vma ||
  290. vma->vm_ops != &privcmd_vm_ops ||
  291. (m.addr != vma->vm_start) ||
  292. ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
  293. !privcmd_enforce_singleshot_mapping(vma)) {
  294. up_write(&mm->mmap_sem);
  295. goto out;
  296. }
  297. state.domain = m.dom;
  298. state.vma = vma;
  299. state.va = m.addr;
  300. state.err = 0;
  301. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  302. &pagelist, mmap_batch_fn, &state);
  303. up_write(&mm->mmap_sem);
  304. if (state.err > 0) {
  305. ret = 0;
  306. state.user = m.arr;
  307. traverse_pages(m.num, sizeof(xen_pfn_t),
  308. &pagelist,
  309. mmap_return_errors, &state);
  310. }
  311. out:
  312. free_page_list(&pagelist);
  313. return ret;
  314. }
  315. static long privcmd_ioctl(struct file *file,
  316. unsigned int cmd, unsigned long data)
  317. {
  318. int ret = -ENOSYS;
  319. void __user *udata = (void __user *) data;
  320. switch (cmd) {
  321. case IOCTL_PRIVCMD_HYPERCALL:
  322. ret = privcmd_ioctl_hypercall(udata);
  323. break;
  324. case IOCTL_PRIVCMD_MMAP:
  325. ret = privcmd_ioctl_mmap(udata);
  326. break;
  327. case IOCTL_PRIVCMD_MMAPBATCH:
  328. ret = privcmd_ioctl_mmap_batch(udata);
  329. break;
  330. default:
  331. ret = -EINVAL;
  332. break;
  333. }
  334. return ret;
  335. }
  336. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  337. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  338. {
  339. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  340. vma, vma->vm_start, vma->vm_end,
  341. vmf->pgoff, vmf->virtual_address);
  342. return VM_FAULT_SIGBUS;
  343. }
  344. static struct vm_operations_struct privcmd_vm_ops = {
  345. .fault = privcmd_fault
  346. };
  347. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  348. {
  349. /* Unsupported for auto-translate guests. */
  350. if (xen_feature(XENFEAT_auto_translated_physmap))
  351. return -ENOSYS;
  352. /* DONTCOPY is essential for Xen as copy_page_range is broken. */
  353. vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  354. vma->vm_ops = &privcmd_vm_ops;
  355. vma->vm_private_data = NULL;
  356. return 0;
  357. }
  358. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
  359. {
  360. return (xchg(&vma->vm_private_data, (void *)1) == NULL);
  361. }
  362. #endif
  363. const struct file_operations privcmd_file_ops = {
  364. .unlocked_ioctl = privcmd_ioctl,
  365. .mmap = privcmd_mmap,
  366. };