privcmd.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/string.h>
  12. #include <linux/errno.h>
  13. #include <linux/mm.h>
  14. #include <linux/mman.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/swap.h>
  17. #include <linux/highmem.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/seq_file.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/tlb.h>
  23. #include <asm/xen/hypervisor.h>
  24. #include <asm/xen/hypercall.h>
  25. #include <xen/xen.h>
  26. #include <xen/privcmd.h>
  27. #include <xen/interface/xen.h>
  28. #include <xen/features.h>
  29. #include <xen/page.h>
  30. #include <xen/xen-ops.h>
  31. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  32. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
  33. #endif
  34. static long privcmd_ioctl_hypercall(void __user *udata)
  35. {
  36. struct privcmd_hypercall hypercall;
  37. long ret;
  38. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  39. return -EFAULT;
  40. ret = privcmd_call(hypercall.op,
  41. hypercall.arg[0], hypercall.arg[1],
  42. hypercall.arg[2], hypercall.arg[3],
  43. hypercall.arg[4]);
  44. return ret;
  45. }
  46. static void free_page_list(struct list_head *pages)
  47. {
  48. struct page *p, *n;
  49. list_for_each_entry_safe(p, n, pages, lru)
  50. __free_page(p);
  51. INIT_LIST_HEAD(pages);
  52. }
  53. /*
  54. * Given an array of items in userspace, return a list of pages
  55. * containing the data. If copying fails, either because of memory
  56. * allocation failure or a problem reading user memory, return an
  57. * error code; its up to the caller to dispose of any partial list.
  58. */
  59. static int gather_array(struct list_head *pagelist,
  60. unsigned nelem, size_t size,
  61. void __user *data)
  62. {
  63. unsigned pageidx;
  64. void *pagedata;
  65. int ret;
  66. if (size > PAGE_SIZE)
  67. return 0;
  68. pageidx = PAGE_SIZE;
  69. pagedata = NULL; /* quiet, gcc */
  70. while (nelem--) {
  71. if (pageidx > PAGE_SIZE-size) {
  72. struct page *page = alloc_page(GFP_KERNEL);
  73. ret = -ENOMEM;
  74. if (page == NULL)
  75. goto fail;
  76. pagedata = page_address(page);
  77. list_add_tail(&page->lru, pagelist);
  78. pageidx = 0;
  79. }
  80. ret = -EFAULT;
  81. if (copy_from_user(pagedata + pageidx, data, size))
  82. goto fail;
  83. data += size;
  84. pageidx += size;
  85. }
  86. ret = 0;
  87. fail:
  88. return ret;
  89. }
  90. /*
  91. * Call function "fn" on each element of the array fragmented
  92. * over a list of pages.
  93. */
  94. static int traverse_pages(unsigned nelem, size_t size,
  95. struct list_head *pos,
  96. int (*fn)(void *data, void *state),
  97. void *state)
  98. {
  99. void *pagedata;
  100. unsigned pageidx;
  101. int ret = 0;
  102. BUG_ON(size > PAGE_SIZE);
  103. pageidx = PAGE_SIZE;
  104. pagedata = NULL; /* hush, gcc */
  105. while (nelem--) {
  106. if (pageidx > PAGE_SIZE-size) {
  107. struct page *page;
  108. pos = pos->next;
  109. page = list_entry(pos, struct page, lru);
  110. pagedata = page_address(page);
  111. pageidx = 0;
  112. }
  113. ret = (*fn)(pagedata + pageidx, state);
  114. if (ret)
  115. break;
  116. pageidx += size;
  117. }
  118. return ret;
  119. }
  120. struct mmap_mfn_state {
  121. unsigned long va;
  122. struct vm_area_struct *vma;
  123. domid_t domain;
  124. };
  125. static int mmap_mfn_range(void *data, void *state)
  126. {
  127. struct privcmd_mmap_entry *msg = data;
  128. struct mmap_mfn_state *st = state;
  129. struct vm_area_struct *vma = st->vma;
  130. int rc;
  131. /* Do not allow range to wrap the address space. */
  132. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  133. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  134. return -EINVAL;
  135. /* Range chunks must be contiguous in va space. */
  136. if ((msg->va != st->va) ||
  137. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  138. return -EINVAL;
  139. rc = xen_remap_domain_mfn_range(vma,
  140. msg->va & PAGE_MASK,
  141. msg->mfn, msg->npages,
  142. vma->vm_page_prot,
  143. st->domain);
  144. if (rc < 0)
  145. return rc;
  146. st->va += msg->npages << PAGE_SHIFT;
  147. return 0;
  148. }
  149. static long privcmd_ioctl_mmap(void __user *udata)
  150. {
  151. struct privcmd_mmap mmapcmd;
  152. struct mm_struct *mm = current->mm;
  153. struct vm_area_struct *vma;
  154. int rc;
  155. LIST_HEAD(pagelist);
  156. struct mmap_mfn_state state;
  157. if (!xen_initial_domain())
  158. return -EPERM;
  159. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  160. return -EFAULT;
  161. rc = gather_array(&pagelist,
  162. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  163. mmapcmd.entry);
  164. if (rc || list_empty(&pagelist))
  165. goto out;
  166. down_write(&mm->mmap_sem);
  167. {
  168. struct page *page = list_first_entry(&pagelist,
  169. struct page, lru);
  170. struct privcmd_mmap_entry *msg = page_address(page);
  171. vma = find_vma(mm, msg->va);
  172. rc = -EINVAL;
  173. if (!vma || (msg->va != vma->vm_start) ||
  174. !privcmd_enforce_singleshot_mapping(vma))
  175. goto out_up;
  176. }
  177. state.va = vma->vm_start;
  178. state.vma = vma;
  179. state.domain = mmapcmd.dom;
  180. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  181. &pagelist,
  182. mmap_mfn_range, &state);
  183. out_up:
  184. up_write(&mm->mmap_sem);
  185. out:
  186. free_page_list(&pagelist);
  187. return rc;
  188. }
  189. struct mmap_batch_state {
  190. domid_t domain;
  191. unsigned long va;
  192. struct vm_area_struct *vma;
  193. int err;
  194. xen_pfn_t __user *user;
  195. };
  196. static int mmap_batch_fn(void *data, void *state)
  197. {
  198. xen_pfn_t *mfnp = data;
  199. struct mmap_batch_state *st = state;
  200. if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
  201. st->vma->vm_page_prot, st->domain) < 0) {
  202. *mfnp |= 0xf0000000U;
  203. st->err++;
  204. }
  205. st->va += PAGE_SIZE;
  206. return 0;
  207. }
  208. static int mmap_return_errors(void *data, void *state)
  209. {
  210. xen_pfn_t *mfnp = data;
  211. struct mmap_batch_state *st = state;
  212. return put_user(*mfnp, st->user++);
  213. }
  214. static struct vm_operations_struct privcmd_vm_ops;
  215. static long privcmd_ioctl_mmap_batch(void __user *udata)
  216. {
  217. int ret;
  218. struct privcmd_mmapbatch m;
  219. struct mm_struct *mm = current->mm;
  220. struct vm_area_struct *vma;
  221. unsigned long nr_pages;
  222. LIST_HEAD(pagelist);
  223. struct mmap_batch_state state;
  224. if (!xen_initial_domain())
  225. return -EPERM;
  226. if (copy_from_user(&m, udata, sizeof(m)))
  227. return -EFAULT;
  228. nr_pages = m.num;
  229. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  230. return -EINVAL;
  231. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
  232. m.arr);
  233. if (ret || list_empty(&pagelist))
  234. goto out;
  235. down_write(&mm->mmap_sem);
  236. vma = find_vma(mm, m.addr);
  237. ret = -EINVAL;
  238. if (!vma ||
  239. vma->vm_ops != &privcmd_vm_ops ||
  240. (m.addr != vma->vm_start) ||
  241. ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
  242. !privcmd_enforce_singleshot_mapping(vma)) {
  243. up_write(&mm->mmap_sem);
  244. goto out;
  245. }
  246. state.domain = m.dom;
  247. state.vma = vma;
  248. state.va = m.addr;
  249. state.err = 0;
  250. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  251. &pagelist, mmap_batch_fn, &state);
  252. up_write(&mm->mmap_sem);
  253. if (state.err > 0) {
  254. state.user = m.arr;
  255. ret = traverse_pages(m.num, sizeof(xen_pfn_t),
  256. &pagelist,
  257. mmap_return_errors, &state);
  258. }
  259. out:
  260. free_page_list(&pagelist);
  261. return ret;
  262. }
  263. static long privcmd_ioctl(struct file *file,
  264. unsigned int cmd, unsigned long data)
  265. {
  266. int ret = -ENOSYS;
  267. void __user *udata = (void __user *) data;
  268. switch (cmd) {
  269. case IOCTL_PRIVCMD_HYPERCALL:
  270. ret = privcmd_ioctl_hypercall(udata);
  271. break;
  272. case IOCTL_PRIVCMD_MMAP:
  273. ret = privcmd_ioctl_mmap(udata);
  274. break;
  275. case IOCTL_PRIVCMD_MMAPBATCH:
  276. ret = privcmd_ioctl_mmap_batch(udata);
  277. break;
  278. default:
  279. ret = -EINVAL;
  280. break;
  281. }
  282. return ret;
  283. }
  284. #ifndef HAVE_ARCH_PRIVCMD_MMAP
  285. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  286. {
  287. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  288. vma, vma->vm_start, vma->vm_end,
  289. vmf->pgoff, vmf->virtual_address);
  290. return VM_FAULT_SIGBUS;
  291. }
  292. static struct vm_operations_struct privcmd_vm_ops = {
  293. .fault = privcmd_fault
  294. };
  295. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  296. {
  297. /* Unsupported for auto-translate guests. */
  298. if (xen_feature(XENFEAT_auto_translated_physmap))
  299. return -ENOSYS;
  300. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  301. * how to recreate these mappings */
  302. vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
  303. vma->vm_ops = &privcmd_vm_ops;
  304. vma->vm_private_data = NULL;
  305. return 0;
  306. }
  307. static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
  308. {
  309. return (xchg(&vma->vm_private_data, (void *)1) == NULL);
  310. }
  311. #endif
  312. const struct file_operations privcmd_file_ops = {
  313. .unlocked_ioctl = privcmd_ioctl,
  314. .mmap = privcmd_mmap,
  315. };