vmci_host.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * VMware VMCI Driver
  3. *
  4. * Copyright (C) 2012 VMware, Inc. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation version 2 and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. * for more details.
  14. */
  15. #include <linux/vmw_vmci_defs.h>
  16. #include <linux/vmw_vmci_api.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/highmem.h>
  21. #include <linux/atomic.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/mutex.h>
  25. #include <linux/sched.h>
  26. #include <linux/file.h>
  27. #include <linux/init.h>
  28. #include <linux/poll.h>
  29. #include <linux/pci.h>
  30. #include <linux/smp.h>
  31. #include <linux/fs.h>
  32. #include <linux/io.h>
  33. #include "vmci_handle_array.h"
  34. #include "vmci_queue_pair.h"
  35. #include "vmci_datagram.h"
  36. #include "vmci_doorbell.h"
  37. #include "vmci_resource.h"
  38. #include "vmci_context.h"
  39. #include "vmci_driver.h"
  40. #include "vmci_event.h"
  41. #define VMCI_UTIL_NUM_RESOURCES 1
  42. enum {
  43. VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
  44. VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
  45. };
  46. enum {
  47. VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
  48. VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
  49. VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
  50. };
  51. /*
  52. * VMCI driver initialization. This block can also be used to
  53. * pass initial group membership etc.
  54. */
  55. struct vmci_init_blk {
  56. u32 cid;
  57. u32 flags;
  58. };
  59. /* VMCIqueue_pairAllocInfo_VMToVM */
  60. struct vmci_qp_alloc_info_vmvm {
  61. struct vmci_handle handle;
  62. u32 peer;
  63. u32 flags;
  64. u64 produce_size;
  65. u64 consume_size;
  66. u64 produce_page_file; /* User VA. */
  67. u64 consume_page_file; /* User VA. */
  68. u64 produce_page_file_size; /* Size of the file name array. */
  69. u64 consume_page_file_size; /* Size of the file name array. */
  70. s32 result;
  71. u32 _pad;
  72. };
  73. /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
  74. struct vmci_set_notify_info {
  75. u64 notify_uva;
  76. s32 result;
  77. u32 _pad;
  78. };
  79. /*
  80. * Per-instance host state
  81. */
  82. struct vmci_host_dev {
  83. struct vmci_ctx *context;
  84. int user_version;
  85. enum vmci_obj_type ct_type;
  86. struct mutex lock; /* Mutex lock for vmci context access */
  87. };
  88. static struct vmci_ctx *host_context;
  89. static bool vmci_host_device_initialized;
  90. static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
  91. /*
  92. * Determines whether the VMCI host personality is
  93. * available. Since the core functionality of the host driver is
  94. * always present, all guests could possibly use the host
  95. * personality. However, to minimize the deviation from the
  96. * pre-unified driver state of affairs, we only consider the host
  97. * device active if there is no active guest device or if there
  98. * are VMX'en with active VMCI contexts using the host device.
  99. */
  100. bool vmci_host_code_active(void)
  101. {
  102. return vmci_host_device_initialized &&
  103. (!vmci_guest_code_active() ||
  104. atomic_read(&vmci_host_active_users) > 0);
  105. }
  106. /*
  107. * Called on open of /dev/vmci.
  108. */
  109. static int vmci_host_open(struct inode *inode, struct file *filp)
  110. {
  111. struct vmci_host_dev *vmci_host_dev;
  112. vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
  113. if (vmci_host_dev == NULL)
  114. return -ENOMEM;
  115. vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
  116. mutex_init(&vmci_host_dev->lock);
  117. filp->private_data = vmci_host_dev;
  118. return 0;
  119. }
  120. /*
  121. * Called on close of /dev/vmci, most often when the process
  122. * exits.
  123. */
  124. static int vmci_host_close(struct inode *inode, struct file *filp)
  125. {
  126. struct vmci_host_dev *vmci_host_dev = filp->private_data;
  127. if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
  128. vmci_ctx_destroy(vmci_host_dev->context);
  129. vmci_host_dev->context = NULL;
  130. /*
  131. * The number of active contexts is used to track whether any
  132. * VMX'en are using the host personality. It is incremented when
  133. * a context is created through the IOCTL_VMCI_INIT_CONTEXT
  134. * ioctl.
  135. */
  136. atomic_dec(&vmci_host_active_users);
  137. }
  138. vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
  139. kfree(vmci_host_dev);
  140. filp->private_data = NULL;
  141. return 0;
  142. }
  143. /*
  144. * This is used to wake up the VMX when a VMCI call arrives, or
  145. * to wake up select() or poll() at the next clock tick.
  146. */
  147. static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
  148. {
  149. struct vmci_host_dev *vmci_host_dev = filp->private_data;
  150. struct vmci_ctx *context = vmci_host_dev->context;
  151. unsigned int mask = 0;
  152. if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
  153. /* Check for VMCI calls to this VM context. */
  154. if (wait)
  155. poll_wait(filp, &context->host_context.wait_queue,
  156. wait);
  157. spin_lock(&context->lock);
  158. if (context->pending_datagrams > 0 ||
  159. vmci_handle_arr_get_size(
  160. context->pending_doorbell_array) > 0) {
  161. mask = POLLIN;
  162. }
  163. spin_unlock(&context->lock);
  164. }
  165. return mask;
  166. }
  167. /*
  168. * Copies the handles of a handle array into a user buffer, and
  169. * returns the new length in userBufferSize. If the copy to the
  170. * user buffer fails, the functions still returns VMCI_SUCCESS,
  171. * but retval != 0.
  172. */
  173. static int drv_cp_harray_to_user(void __user *user_buf_uva,
  174. u64 *user_buf_size,
  175. struct vmci_handle_arr *handle_array,
  176. int *retval)
  177. {
  178. u32 array_size = 0;
  179. struct vmci_handle *handles;
  180. if (handle_array)
  181. array_size = vmci_handle_arr_get_size(handle_array);
  182. if (array_size * sizeof(*handles) > *user_buf_size)
  183. return VMCI_ERROR_MORE_DATA;
  184. *user_buf_size = array_size * sizeof(*handles);
  185. if (*user_buf_size)
  186. *retval = copy_to_user(user_buf_uva,
  187. vmci_handle_arr_get_handles
  188. (handle_array), *user_buf_size);
  189. return VMCI_SUCCESS;
  190. }
  191. /*
  192. * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
  193. * which maps the notify boolean in user VA in kernel space.
  194. */
  195. static int vmci_host_setup_notify(struct vmci_ctx *context,
  196. unsigned long uva)
  197. {
  198. struct page *page;
  199. int retval;
  200. if (context->notify_page) {
  201. pr_devel("%s: Notify mechanism is already set up\n", __func__);
  202. return VMCI_ERROR_DUPLICATE_ENTRY;
  203. }
  204. /*
  205. * We are using 'bool' internally, but let's make sure we explicit
  206. * about the size.
  207. */
  208. BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
  209. if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
  210. return VMCI_ERROR_GENERIC;
  211. /*
  212. * Lock physical page backing a given user VA.
  213. */
  214. down_read(&current->mm->mmap_sem);
  215. retval = get_user_pages(current, current->mm,
  216. PAGE_ALIGN(uva),
  217. 1, 1, 0, &page, NULL);
  218. up_read(&current->mm->mmap_sem);
  219. if (retval != 1)
  220. return VMCI_ERROR_GENERIC;
  221. /*
  222. * Map the locked page and set up notify pointer.
  223. */
  224. context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
  225. vmci_ctx_check_signal_notify(context);
  226. return VMCI_SUCCESS;
  227. }
  228. static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
  229. unsigned int cmd, void __user *uptr)
  230. {
  231. if (cmd == IOCTL_VMCI_VERSION2) {
  232. int __user *vptr = uptr;
  233. if (get_user(vmci_host_dev->user_version, vptr))
  234. return -EFAULT;
  235. }
  236. /*
  237. * The basic logic here is:
  238. *
  239. * If the user sends in a version of 0 tell it our version.
  240. * If the user didn't send in a version, tell it our version.
  241. * If the user sent in an old version, tell it -its- version.
  242. * If the user sent in an newer version, tell it our version.
  243. *
  244. * The rationale behind telling the caller its version is that
  245. * Workstation 6.5 required that VMX and VMCI kernel module were
  246. * version sync'd. All new VMX users will be programmed to
  247. * handle the VMCI kernel module version.
  248. */
  249. if (vmci_host_dev->user_version > 0 &&
  250. vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
  251. return vmci_host_dev->user_version;
  252. }
  253. return VMCI_VERSION;
  254. }
  255. #define vmci_ioctl_err(fmt, ...) \
  256. pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
  257. static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
  258. const char *ioctl_name,
  259. void __user *uptr)
  260. {
  261. struct vmci_init_blk init_block;
  262. const struct cred *cred;
  263. int retval;
  264. if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
  265. vmci_ioctl_err("error reading init block\n");
  266. return -EFAULT;
  267. }
  268. mutex_lock(&vmci_host_dev->lock);
  269. if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
  270. vmci_ioctl_err("received VMCI init on initialized handle\n");
  271. retval = -EINVAL;
  272. goto out;
  273. }
  274. if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
  275. vmci_ioctl_err("unsupported VMCI restriction flag\n");
  276. retval = -EINVAL;
  277. goto out;
  278. }
  279. cred = get_current_cred();
  280. vmci_host_dev->context = vmci_ctx_create(init_block.cid,
  281. init_block.flags, 0,
  282. vmci_host_dev->user_version,
  283. cred);
  284. put_cred(cred);
  285. if (IS_ERR(vmci_host_dev->context)) {
  286. retval = PTR_ERR(vmci_host_dev->context);
  287. vmci_ioctl_err("error initializing context\n");
  288. goto out;
  289. }
  290. /*
  291. * Copy cid to userlevel, we do this to allow the VMX
  292. * to enforce its policy on cid generation.
  293. */
  294. init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
  295. if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
  296. vmci_ctx_destroy(vmci_host_dev->context);
  297. vmci_host_dev->context = NULL;
  298. vmci_ioctl_err("error writing init block\n");
  299. retval = -EFAULT;
  300. goto out;
  301. }
  302. vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
  303. atomic_inc(&vmci_host_active_users);
  304. retval = 0;
  305. out:
  306. mutex_unlock(&vmci_host_dev->lock);
  307. return retval;
  308. }
  309. static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
  310. const char *ioctl_name,
  311. void __user *uptr)
  312. {
  313. struct vmci_datagram_snd_rcv_info send_info;
  314. struct vmci_datagram *dg = NULL;
  315. u32 cid;
  316. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  317. vmci_ioctl_err("only valid for contexts\n");
  318. return -EINVAL;
  319. }
  320. if (copy_from_user(&send_info, uptr, sizeof(send_info)))
  321. return -EFAULT;
  322. if (send_info.len > VMCI_MAX_DG_SIZE) {
  323. vmci_ioctl_err("datagram is too big (size=%d)\n",
  324. send_info.len);
  325. return -EINVAL;
  326. }
  327. if (send_info.len < sizeof(*dg)) {
  328. vmci_ioctl_err("datagram is too small (size=%d)\n",
  329. send_info.len);
  330. return -EINVAL;
  331. }
  332. dg = kmalloc(send_info.len, GFP_KERNEL);
  333. if (!dg) {
  334. vmci_ioctl_err(
  335. "cannot allocate memory to dispatch datagram\n");
  336. return -ENOMEM;
  337. }
  338. if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
  339. send_info.len)) {
  340. vmci_ioctl_err("error getting datagram\n");
  341. kfree(dg);
  342. return -EFAULT;
  343. }
  344. pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
  345. dg->dst.context, dg->dst.resource,
  346. dg->src.context, dg->src.resource,
  347. (unsigned long long)dg->payload_size);
  348. /* Get source context id. */
  349. cid = vmci_ctx_get_id(vmci_host_dev->context);
  350. send_info.result = vmci_datagram_dispatch(cid, dg, true);
  351. kfree(dg);
  352. return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
  353. }
  354. static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
  355. const char *ioctl_name,
  356. void __user *uptr)
  357. {
  358. struct vmci_datagram_snd_rcv_info recv_info;
  359. struct vmci_datagram *dg = NULL;
  360. int retval;
  361. size_t size;
  362. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  363. vmci_ioctl_err("only valid for contexts\n");
  364. return -EINVAL;
  365. }
  366. if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
  367. return -EFAULT;
  368. size = recv_info.len;
  369. recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
  370. &size, &dg);
  371. if (recv_info.result >= VMCI_SUCCESS) {
  372. void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
  373. retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
  374. kfree(dg);
  375. if (retval != 0)
  376. return -EFAULT;
  377. }
  378. return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
  379. }
  380. static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
  381. const char *ioctl_name,
  382. void __user *uptr)
  383. {
  384. struct vmci_handle handle;
  385. int vmci_status;
  386. int __user *retptr;
  387. u32 cid;
  388. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  389. vmci_ioctl_err("only valid for contexts\n");
  390. return -EINVAL;
  391. }
  392. cid = vmci_ctx_get_id(vmci_host_dev->context);
  393. if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
  394. struct vmci_qp_alloc_info_vmvm alloc_info;
  395. struct vmci_qp_alloc_info_vmvm __user *info = uptr;
  396. if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
  397. return -EFAULT;
  398. handle = alloc_info.handle;
  399. retptr = &info->result;
  400. vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
  401. alloc_info.peer,
  402. alloc_info.flags,
  403. VMCI_NO_PRIVILEGE_FLAGS,
  404. alloc_info.produce_size,
  405. alloc_info.consume_size,
  406. NULL,
  407. vmci_host_dev->context);
  408. if (vmci_status == VMCI_SUCCESS)
  409. vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
  410. } else {
  411. struct vmci_qp_alloc_info alloc_info;
  412. struct vmci_qp_alloc_info __user *info = uptr;
  413. struct vmci_qp_page_store page_store;
  414. if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
  415. return -EFAULT;
  416. handle = alloc_info.handle;
  417. retptr = &info->result;
  418. page_store.pages = alloc_info.ppn_va;
  419. page_store.len = alloc_info.num_ppns;
  420. vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
  421. alloc_info.peer,
  422. alloc_info.flags,
  423. VMCI_NO_PRIVILEGE_FLAGS,
  424. alloc_info.produce_size,
  425. alloc_info.consume_size,
  426. &page_store,
  427. vmci_host_dev->context);
  428. }
  429. if (put_user(vmci_status, retptr)) {
  430. if (vmci_status >= VMCI_SUCCESS) {
  431. vmci_status = vmci_qp_broker_detach(handle,
  432. vmci_host_dev->context);
  433. }
  434. return -EFAULT;
  435. }
  436. return 0;
  437. }
  438. static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
  439. const char *ioctl_name,
  440. void __user *uptr)
  441. {
  442. struct vmci_qp_set_va_info set_va_info;
  443. struct vmci_qp_set_va_info __user *info = uptr;
  444. s32 result;
  445. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  446. vmci_ioctl_err("only valid for contexts\n");
  447. return -EINVAL;
  448. }
  449. if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
  450. vmci_ioctl_err("is not allowed\n");
  451. return -EINVAL;
  452. }
  453. if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
  454. return -EFAULT;
  455. if (set_va_info.va) {
  456. /*
  457. * VMX is passing down a new VA for the queue
  458. * pair mapping.
  459. */
  460. result = vmci_qp_broker_map(set_va_info.handle,
  461. vmci_host_dev->context,
  462. set_va_info.va);
  463. } else {
  464. /*
  465. * The queue pair is about to be unmapped by
  466. * the VMX.
  467. */
  468. result = vmci_qp_broker_unmap(set_va_info.handle,
  469. vmci_host_dev->context, 0);
  470. }
  471. return put_user(result, &info->result) ? -EFAULT : 0;
  472. }
  473. static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
  474. const char *ioctl_name,
  475. void __user *uptr)
  476. {
  477. struct vmci_qp_page_file_info page_file_info;
  478. struct vmci_qp_page_file_info __user *info = uptr;
  479. s32 result;
  480. if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
  481. vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
  482. vmci_ioctl_err("not supported on this VMX (version=%d)\n",
  483. vmci_host_dev->user_version);
  484. return -EINVAL;
  485. }
  486. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  487. vmci_ioctl_err("only valid for contexts\n");
  488. return -EINVAL;
  489. }
  490. if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
  491. return -EFAULT;
  492. /*
  493. * Communicate success pre-emptively to the caller. Note that the
  494. * basic premise is that it is incumbent upon the caller not to look at
  495. * the info.result field until after the ioctl() returns. And then,
  496. * only if the ioctl() result indicates no error. We send up the
  497. * SUCCESS status before calling SetPageStore() store because failing
  498. * to copy up the result code means unwinding the SetPageStore().
  499. *
  500. * It turns out the logic to unwind a SetPageStore() opens a can of
  501. * worms. For example, if a host had created the queue_pair and a
  502. * guest attaches and SetPageStore() is successful but writing success
  503. * fails, then ... the host has to be stopped from writing (anymore)
  504. * data into the queue_pair. That means an additional test in the
  505. * VMCI_Enqueue() code path. Ugh.
  506. */
  507. if (put_user(VMCI_SUCCESS, &info->result)) {
  508. /*
  509. * In this case, we can't write a result field of the
  510. * caller's info block. So, we don't even try to
  511. * SetPageStore().
  512. */
  513. return -EFAULT;
  514. }
  515. result = vmci_qp_broker_set_page_store(page_file_info.handle,
  516. page_file_info.produce_va,
  517. page_file_info.consume_va,
  518. vmci_host_dev->context);
  519. if (result < VMCI_SUCCESS) {
  520. if (put_user(result, &info->result)) {
  521. /*
  522. * Note that in this case the SetPageStore()
  523. * call failed but we were unable to
  524. * communicate that to the caller (because the
  525. * copy_to_user() call failed). So, if we
  526. * simply return an error (in this case
  527. * -EFAULT) then the caller will know that the
  528. * SetPageStore failed even though we couldn't
  529. * put the result code in the result field and
  530. * indicate exactly why it failed.
  531. *
  532. * That says nothing about the issue where we
  533. * were once able to write to the caller's info
  534. * memory and now can't. Something more
  535. * serious is probably going on than the fact
  536. * that SetPageStore() didn't work.
  537. */
  538. return -EFAULT;
  539. }
  540. }
  541. return 0;
  542. }
  543. static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
  544. const char *ioctl_name,
  545. void __user *uptr)
  546. {
  547. struct vmci_qp_dtch_info detach_info;
  548. struct vmci_qp_dtch_info __user *info = uptr;
  549. s32 result;
  550. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  551. vmci_ioctl_err("only valid for contexts\n");
  552. return -EINVAL;
  553. }
  554. if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
  555. return -EFAULT;
  556. result = vmci_qp_broker_detach(detach_info.handle,
  557. vmci_host_dev->context);
  558. if (result == VMCI_SUCCESS &&
  559. vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
  560. result = VMCI_SUCCESS_LAST_DETACH;
  561. }
  562. return put_user(result, &info->result) ? -EFAULT : 0;
  563. }
  564. static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
  565. const char *ioctl_name,
  566. void __user *uptr)
  567. {
  568. struct vmci_ctx_info ar_info;
  569. struct vmci_ctx_info __user *info = uptr;
  570. s32 result;
  571. u32 cid;
  572. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  573. vmci_ioctl_err("only valid for contexts\n");
  574. return -EINVAL;
  575. }
  576. if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
  577. return -EFAULT;
  578. cid = vmci_ctx_get_id(vmci_host_dev->context);
  579. result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
  580. return put_user(result, &info->result) ? -EFAULT : 0;
  581. }
  582. static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
  583. const char *ioctl_name,
  584. void __user *uptr)
  585. {
  586. struct vmci_ctx_info ar_info;
  587. struct vmci_ctx_info __user *info = uptr;
  588. u32 cid;
  589. int result;
  590. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  591. vmci_ioctl_err("only valid for contexts\n");
  592. return -EINVAL;
  593. }
  594. if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
  595. return -EFAULT;
  596. cid = vmci_ctx_get_id(vmci_host_dev->context);
  597. result = vmci_ctx_remove_notification(cid,
  598. ar_info.remote_cid);
  599. return put_user(result, &info->result) ? -EFAULT : 0;
  600. }
  601. static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
  602. const char *ioctl_name,
  603. void __user *uptr)
  604. {
  605. struct vmci_ctx_chkpt_buf_info get_info;
  606. u32 cid;
  607. void *cpt_buf;
  608. int retval;
  609. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  610. vmci_ioctl_err("only valid for contexts\n");
  611. return -EINVAL;
  612. }
  613. if (copy_from_user(&get_info, uptr, sizeof(get_info)))
  614. return -EFAULT;
  615. cid = vmci_ctx_get_id(vmci_host_dev->context);
  616. get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
  617. &get_info.buf_size, &cpt_buf);
  618. if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
  619. void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
  620. retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
  621. kfree(cpt_buf);
  622. if (retval)
  623. return -EFAULT;
  624. }
  625. return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
  626. }
  627. static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
  628. const char *ioctl_name,
  629. void __user *uptr)
  630. {
  631. struct vmci_ctx_chkpt_buf_info set_info;
  632. u32 cid;
  633. void *cpt_buf;
  634. int retval;
  635. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  636. vmci_ioctl_err("only valid for contexts\n");
  637. return -EINVAL;
  638. }
  639. if (copy_from_user(&set_info, uptr, sizeof(set_info)))
  640. return -EFAULT;
  641. cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
  642. if (!cpt_buf) {
  643. vmci_ioctl_err(
  644. "cannot allocate memory to set cpt state (type=%d)\n",
  645. set_info.cpt_type);
  646. return -ENOMEM;
  647. }
  648. if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
  649. set_info.buf_size)) {
  650. retval = -EFAULT;
  651. goto out;
  652. }
  653. cid = vmci_ctx_get_id(vmci_host_dev->context);
  654. set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
  655. set_info.buf_size, cpt_buf);
  656. retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
  657. out:
  658. kfree(cpt_buf);
  659. return retval;
  660. }
  661. static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
  662. const char *ioctl_name,
  663. void __user *uptr)
  664. {
  665. u32 __user *u32ptr = uptr;
  666. return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
  667. }
  668. static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
  669. const char *ioctl_name,
  670. void __user *uptr)
  671. {
  672. struct vmci_set_notify_info notify_info;
  673. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  674. vmci_ioctl_err("only valid for contexts\n");
  675. return -EINVAL;
  676. }
  677. if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
  678. return -EFAULT;
  679. if (notify_info.notify_uva) {
  680. notify_info.result =
  681. vmci_host_setup_notify(vmci_host_dev->context,
  682. notify_info.notify_uva);
  683. } else {
  684. vmci_ctx_unset_notify(vmci_host_dev->context);
  685. notify_info.result = VMCI_SUCCESS;
  686. }
  687. return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
  688. -EFAULT : 0;
  689. }
  690. static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
  691. const char *ioctl_name,
  692. void __user *uptr)
  693. {
  694. struct vmci_dbell_notify_resource_info info;
  695. u32 cid;
  696. if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
  697. vmci_ioctl_err("invalid for current VMX versions\n");
  698. return -EINVAL;
  699. }
  700. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  701. vmci_ioctl_err("only valid for contexts\n");
  702. return -EINVAL;
  703. }
  704. if (copy_from_user(&info, uptr, sizeof(info)))
  705. return -EFAULT;
  706. cid = vmci_ctx_get_id(vmci_host_dev->context);
  707. switch (info.action) {
  708. case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
  709. if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
  710. u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
  711. info.result = vmci_ctx_notify_dbell(cid, info.handle,
  712. flags);
  713. } else {
  714. info.result = VMCI_ERROR_UNAVAILABLE;
  715. }
  716. break;
  717. case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
  718. info.result = vmci_ctx_dbell_create(cid, info.handle);
  719. break;
  720. case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
  721. info.result = vmci_ctx_dbell_destroy(cid, info.handle);
  722. break;
  723. default:
  724. vmci_ioctl_err("got unknown action (action=%d)\n",
  725. info.action);
  726. info.result = VMCI_ERROR_INVALID_ARGS;
  727. }
  728. return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
  729. }
  730. static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
  731. const char *ioctl_name,
  732. void __user *uptr)
  733. {
  734. struct vmci_ctx_notify_recv_info info;
  735. struct vmci_handle_arr *db_handle_array;
  736. struct vmci_handle_arr *qp_handle_array;
  737. void __user *ubuf;
  738. u32 cid;
  739. int retval = 0;
  740. if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
  741. vmci_ioctl_err("only valid for contexts\n");
  742. return -EINVAL;
  743. }
  744. if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
  745. vmci_ioctl_err("not supported for the current vmx version\n");
  746. return -EINVAL;
  747. }
  748. if (copy_from_user(&info, uptr, sizeof(info)))
  749. return -EFAULT;
  750. if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
  751. (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
  752. return -EINVAL;
  753. }
  754. cid = vmci_ctx_get_id(vmci_host_dev->context);
  755. info.result = vmci_ctx_rcv_notifications_get(cid,
  756. &db_handle_array, &qp_handle_array);
  757. if (info.result != VMCI_SUCCESS)
  758. return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
  759. ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
  760. info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
  761. db_handle_array, &retval);
  762. if (info.result == VMCI_SUCCESS && !retval) {
  763. ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
  764. info.result = drv_cp_harray_to_user(ubuf,
  765. &info.qp_handle_buf_size,
  766. qp_handle_array, &retval);
  767. }
  768. if (!retval && copy_to_user(uptr, &info, sizeof(info)))
  769. retval = -EFAULT;
  770. vmci_ctx_rcv_notifications_release(cid,
  771. db_handle_array, qp_handle_array,
  772. info.result == VMCI_SUCCESS && !retval);
  773. return retval;
  774. }
  775. static long vmci_host_unlocked_ioctl(struct file *filp,
  776. unsigned int iocmd, unsigned long ioarg)
  777. {
  778. #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
  779. char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
  780. return vmci_host_do_ ## ioctl_fn( \
  781. vmci_host_dev, name, uptr); \
  782. } while (0)
  783. struct vmci_host_dev *vmci_host_dev = filp->private_data;
  784. void __user *uptr = (void __user *)ioarg;
  785. switch (iocmd) {
  786. case IOCTL_VMCI_INIT_CONTEXT:
  787. VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
  788. case IOCTL_VMCI_DATAGRAM_SEND:
  789. VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
  790. case IOCTL_VMCI_DATAGRAM_RECEIVE:
  791. VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
  792. case IOCTL_VMCI_QUEUEPAIR_ALLOC:
  793. VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
  794. case IOCTL_VMCI_QUEUEPAIR_SETVA:
  795. VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
  796. case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
  797. VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
  798. case IOCTL_VMCI_QUEUEPAIR_DETACH:
  799. VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
  800. case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
  801. VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
  802. case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
  803. VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
  804. case IOCTL_VMCI_CTX_GET_CPT_STATE:
  805. VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
  806. case IOCTL_VMCI_CTX_SET_CPT_STATE:
  807. VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
  808. case IOCTL_VMCI_GET_CONTEXT_ID:
  809. VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
  810. case IOCTL_VMCI_SET_NOTIFY:
  811. VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
  812. case IOCTL_VMCI_NOTIFY_RESOURCE:
  813. VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
  814. case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
  815. VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
  816. case IOCTL_VMCI_VERSION:
  817. case IOCTL_VMCI_VERSION2:
  818. return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
  819. default:
  820. pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
  821. return -EINVAL;
  822. }
  823. #undef VMCI_DO_IOCTL
  824. }
  825. static const struct file_operations vmuser_fops = {
  826. .owner = THIS_MODULE,
  827. .open = vmci_host_open,
  828. .release = vmci_host_close,
  829. .poll = vmci_host_poll,
  830. .unlocked_ioctl = vmci_host_unlocked_ioctl,
  831. .compat_ioctl = vmci_host_unlocked_ioctl,
  832. };
  833. static struct miscdevice vmci_host_miscdev = {
  834. .name = "vmci",
  835. .minor = MISC_DYNAMIC_MINOR,
  836. .fops = &vmuser_fops,
  837. };
  838. int __init vmci_host_init(void)
  839. {
  840. int error;
  841. host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
  842. VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
  843. -1, VMCI_VERSION, NULL);
  844. if (IS_ERR(host_context)) {
  845. error = PTR_ERR(host_context);
  846. pr_warn("Failed to initialize VMCIContext (error%d)\n",
  847. error);
  848. return error;
  849. }
  850. error = misc_register(&vmci_host_miscdev);
  851. if (error) {
  852. pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
  853. vmci_host_miscdev.name,
  854. MISC_MAJOR, vmci_host_miscdev.minor,
  855. error);
  856. pr_warn("Unable to initialize host personality\n");
  857. vmci_ctx_destroy(host_context);
  858. return error;
  859. }
  860. pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
  861. vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
  862. vmci_host_device_initialized = true;
  863. return 0;
  864. }
  865. void __exit vmci_host_exit(void)
  866. {
  867. int error;
  868. vmci_host_device_initialized = false;
  869. error = misc_deregister(&vmci_host_miscdev);
  870. if (error)
  871. pr_warn("Error unregistering character device: %d\n", error);
  872. vmci_ctx_destroy(host_context);
  873. vmci_qp_broker_exit();
  874. pr_debug("VMCI host driver module unloaded\n");
  875. }