net.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Author: Michael S. Tsirkin <mst@redhat.com>
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * virtio-net server in host kernel.
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/eventfd.h>
  10. #include <linux/vhost.h>
  11. #include <linux/virtio_net.h>
  12. #include <linux/mmu_context.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/file.h>
  19. #include <linux/net.h>
  20. #include <linux/if_packet.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/if_tun.h>
  23. #include <linux/if_macvlan.h>
  24. #include <net/sock.h>
  25. #include "vhost.h"
  26. /* Max number of bytes transferred before requeueing the job.
  27. * Using this limit prevents one virtqueue from starving others. */
  28. #define VHOST_NET_WEIGHT 0x80000
  29. enum {
  30. VHOST_NET_VQ_RX = 0,
  31. VHOST_NET_VQ_TX = 1,
  32. VHOST_NET_VQ_MAX = 2,
  33. };
  34. enum vhost_net_poll_state {
  35. VHOST_NET_POLL_DISABLED = 0,
  36. VHOST_NET_POLL_STARTED = 1,
  37. VHOST_NET_POLL_STOPPED = 2,
  38. };
  39. struct vhost_net {
  40. struct vhost_dev dev;
  41. struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
  42. struct vhost_poll poll[VHOST_NET_VQ_MAX];
  43. /* Tells us whether we are polling a socket for TX.
  44. * We only do this when socket buffer fills up.
  45. * Protected by tx vq lock. */
  46. enum vhost_net_poll_state tx_poll_state;
  47. };
  48. /* Pop first len bytes from iovec. Return number of segments used. */
  49. static int move_iovec_hdr(struct iovec *from, struct iovec *to,
  50. size_t len, int iov_count)
  51. {
  52. int seg = 0;
  53. size_t size;
  54. while (len && seg < iov_count) {
  55. size = min(from->iov_len, len);
  56. to->iov_base = from->iov_base;
  57. to->iov_len = size;
  58. from->iov_len -= size;
  59. from->iov_base += size;
  60. len -= size;
  61. ++from;
  62. ++to;
  63. ++seg;
  64. }
  65. return seg;
  66. }
  67. /* Caller must have TX VQ lock */
  68. static void tx_poll_stop(struct vhost_net *net)
  69. {
  70. if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
  71. return;
  72. vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
  73. net->tx_poll_state = VHOST_NET_POLL_STOPPED;
  74. }
  75. /* Caller must have TX VQ lock */
  76. static void tx_poll_start(struct vhost_net *net, struct socket *sock)
  77. {
  78. if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
  79. return;
  80. vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
  81. net->tx_poll_state = VHOST_NET_POLL_STARTED;
  82. }
  83. /* Expects to be always run from workqueue - which acts as
  84. * read-size critical section for our kind of RCU. */
  85. static void handle_tx(struct vhost_net *net)
  86. {
  87. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
  88. unsigned head, out, in, s;
  89. struct msghdr msg = {
  90. .msg_name = NULL,
  91. .msg_namelen = 0,
  92. .msg_control = NULL,
  93. .msg_controllen = 0,
  94. .msg_iov = vq->iov,
  95. .msg_flags = MSG_DONTWAIT,
  96. };
  97. size_t len, total_len = 0;
  98. int err, wmem;
  99. size_t hdr_size;
  100. struct socket *sock = rcu_dereference(vq->private_data);
  101. if (!sock)
  102. return;
  103. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  104. if (wmem >= sock->sk->sk_sndbuf)
  105. return;
  106. use_mm(net->dev.mm);
  107. mutex_lock(&vq->mutex);
  108. vhost_disable_notify(vq);
  109. if (wmem < sock->sk->sk_sndbuf * 2)
  110. tx_poll_stop(net);
  111. hdr_size = vq->hdr_size;
  112. for (;;) {
  113. head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  114. ARRAY_SIZE(vq->iov),
  115. &out, &in,
  116. NULL, NULL);
  117. /* Nothing new? Wait for eventfd to tell us they refilled. */
  118. if (head == vq->num) {
  119. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  120. if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
  121. tx_poll_start(net, sock);
  122. set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  123. break;
  124. }
  125. if (unlikely(vhost_enable_notify(vq))) {
  126. vhost_disable_notify(vq);
  127. continue;
  128. }
  129. break;
  130. }
  131. if (in) {
  132. vq_err(vq, "Unexpected descriptor format for TX: "
  133. "out %d, int %d\n", out, in);
  134. break;
  135. }
  136. /* Skip header. TODO: support TSO. */
  137. s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
  138. msg.msg_iovlen = out;
  139. len = iov_length(vq->iov, out);
  140. /* Sanity check */
  141. if (!len) {
  142. vq_err(vq, "Unexpected header len for TX: "
  143. "%zd expected %zd\n",
  144. iov_length(vq->hdr, s), hdr_size);
  145. break;
  146. }
  147. /* TODO: Check specific error and bomb out unless ENOBUFS? */
  148. err = sock->ops->sendmsg(NULL, sock, &msg, len);
  149. if (unlikely(err < 0)) {
  150. vhost_discard_vq_desc(vq);
  151. tx_poll_start(net, sock);
  152. break;
  153. }
  154. if (err != len)
  155. pr_err("Truncated TX packet: "
  156. " len %d != %zd\n", err, len);
  157. vhost_add_used_and_signal(&net->dev, vq, head, 0);
  158. total_len += len;
  159. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  160. vhost_poll_queue(&vq->poll);
  161. break;
  162. }
  163. }
  164. mutex_unlock(&vq->mutex);
  165. unuse_mm(net->dev.mm);
  166. }
  167. /* Expects to be always run from workqueue - which acts as
  168. * read-size critical section for our kind of RCU. */
  169. static void handle_rx(struct vhost_net *net)
  170. {
  171. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
  172. unsigned head, out, in, log, s;
  173. struct vhost_log *vq_log;
  174. struct msghdr msg = {
  175. .msg_name = NULL,
  176. .msg_namelen = 0,
  177. .msg_control = NULL, /* FIXME: get and handle RX aux data. */
  178. .msg_controllen = 0,
  179. .msg_iov = vq->iov,
  180. .msg_flags = MSG_DONTWAIT,
  181. };
  182. struct virtio_net_hdr hdr = {
  183. .flags = 0,
  184. .gso_type = VIRTIO_NET_HDR_GSO_NONE
  185. };
  186. size_t len, total_len = 0;
  187. int err;
  188. size_t hdr_size;
  189. struct socket *sock = rcu_dereference(vq->private_data);
  190. if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
  191. return;
  192. use_mm(net->dev.mm);
  193. mutex_lock(&vq->mutex);
  194. vhost_disable_notify(vq);
  195. hdr_size = vq->hdr_size;
  196. vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
  197. vq->log : NULL;
  198. for (;;) {
  199. head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  200. ARRAY_SIZE(vq->iov),
  201. &out, &in,
  202. vq_log, &log);
  203. /* OK, now we need to know about added descriptors. */
  204. if (head == vq->num) {
  205. if (unlikely(vhost_enable_notify(vq))) {
  206. /* They have slipped one in as we were
  207. * doing that: check again. */
  208. vhost_disable_notify(vq);
  209. continue;
  210. }
  211. /* Nothing new? Wait for eventfd to tell us
  212. * they refilled. */
  213. break;
  214. }
  215. /* We don't need to be notified again. */
  216. if (out) {
  217. vq_err(vq, "Unexpected descriptor format for RX: "
  218. "out %d, int %d\n",
  219. out, in);
  220. break;
  221. }
  222. /* Skip header. TODO: support TSO/mergeable rx buffers. */
  223. s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
  224. msg.msg_iovlen = in;
  225. len = iov_length(vq->iov, in);
  226. /* Sanity check */
  227. if (!len) {
  228. vq_err(vq, "Unexpected header len for RX: "
  229. "%zd expected %zd\n",
  230. iov_length(vq->hdr, s), hdr_size);
  231. break;
  232. }
  233. err = sock->ops->recvmsg(NULL, sock, &msg,
  234. len, MSG_DONTWAIT | MSG_TRUNC);
  235. /* TODO: Check specific error and bomb out unless EAGAIN? */
  236. if (err < 0) {
  237. vhost_discard_vq_desc(vq);
  238. break;
  239. }
  240. /* TODO: Should check and handle checksum. */
  241. if (err > len) {
  242. pr_err("Discarded truncated rx packet: "
  243. " len %d > %zd\n", err, len);
  244. vhost_discard_vq_desc(vq);
  245. continue;
  246. }
  247. len = err;
  248. err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
  249. if (err) {
  250. vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
  251. vq->iov->iov_base, err);
  252. break;
  253. }
  254. len += hdr_size;
  255. vhost_add_used_and_signal(&net->dev, vq, head, len);
  256. if (unlikely(vq_log))
  257. vhost_log_write(vq, vq_log, log, len);
  258. total_len += len;
  259. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  260. vhost_poll_queue(&vq->poll);
  261. break;
  262. }
  263. }
  264. mutex_unlock(&vq->mutex);
  265. unuse_mm(net->dev.mm);
  266. }
  267. static void handle_tx_kick(struct work_struct *work)
  268. {
  269. struct vhost_virtqueue *vq;
  270. struct vhost_net *net;
  271. vq = container_of(work, struct vhost_virtqueue, poll.work);
  272. net = container_of(vq->dev, struct vhost_net, dev);
  273. handle_tx(net);
  274. }
  275. static void handle_rx_kick(struct work_struct *work)
  276. {
  277. struct vhost_virtqueue *vq;
  278. struct vhost_net *net;
  279. vq = container_of(work, struct vhost_virtqueue, poll.work);
  280. net = container_of(vq->dev, struct vhost_net, dev);
  281. handle_rx(net);
  282. }
  283. static void handle_tx_net(struct work_struct *work)
  284. {
  285. struct vhost_net *net;
  286. net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
  287. handle_tx(net);
  288. }
  289. static void handle_rx_net(struct work_struct *work)
  290. {
  291. struct vhost_net *net;
  292. net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
  293. handle_rx(net);
  294. }
  295. static int vhost_net_open(struct inode *inode, struct file *f)
  296. {
  297. struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
  298. int r;
  299. if (!n)
  300. return -ENOMEM;
  301. n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
  302. n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
  303. r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
  304. if (r < 0) {
  305. kfree(n);
  306. return r;
  307. }
  308. vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
  309. vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
  310. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  311. f->private_data = n;
  312. return 0;
  313. }
  314. static void vhost_net_disable_vq(struct vhost_net *n,
  315. struct vhost_virtqueue *vq)
  316. {
  317. if (!vq->private_data)
  318. return;
  319. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  320. tx_poll_stop(n);
  321. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  322. } else
  323. vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
  324. }
  325. static void vhost_net_enable_vq(struct vhost_net *n,
  326. struct vhost_virtqueue *vq)
  327. {
  328. struct socket *sock = vq->private_data;
  329. if (!sock)
  330. return;
  331. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  332. n->tx_poll_state = VHOST_NET_POLL_STOPPED;
  333. tx_poll_start(n, sock);
  334. } else
  335. vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
  336. }
  337. static struct socket *vhost_net_stop_vq(struct vhost_net *n,
  338. struct vhost_virtqueue *vq)
  339. {
  340. struct socket *sock;
  341. mutex_lock(&vq->mutex);
  342. sock = vq->private_data;
  343. vhost_net_disable_vq(n, vq);
  344. rcu_assign_pointer(vq->private_data, NULL);
  345. mutex_unlock(&vq->mutex);
  346. return sock;
  347. }
  348. static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
  349. struct socket **rx_sock)
  350. {
  351. *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
  352. *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
  353. }
  354. static void vhost_net_flush_vq(struct vhost_net *n, int index)
  355. {
  356. vhost_poll_flush(n->poll + index);
  357. vhost_poll_flush(&n->dev.vqs[index].poll);
  358. }
  359. static void vhost_net_flush(struct vhost_net *n)
  360. {
  361. vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
  362. vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
  363. }
  364. static int vhost_net_release(struct inode *inode, struct file *f)
  365. {
  366. struct vhost_net *n = f->private_data;
  367. struct socket *tx_sock;
  368. struct socket *rx_sock;
  369. vhost_net_stop(n, &tx_sock, &rx_sock);
  370. vhost_net_flush(n);
  371. vhost_dev_cleanup(&n->dev);
  372. if (tx_sock)
  373. fput(tx_sock->file);
  374. if (rx_sock)
  375. fput(rx_sock->file);
  376. /* We do an extra flush before freeing memory,
  377. * since jobs can re-queue themselves. */
  378. vhost_net_flush(n);
  379. kfree(n);
  380. return 0;
  381. }
  382. static struct socket *get_raw_socket(int fd)
  383. {
  384. struct {
  385. struct sockaddr_ll sa;
  386. char buf[MAX_ADDR_LEN];
  387. } uaddr;
  388. int uaddr_len = sizeof uaddr, r;
  389. struct socket *sock = sockfd_lookup(fd, &r);
  390. if (!sock)
  391. return ERR_PTR(-ENOTSOCK);
  392. /* Parameter checking */
  393. if (sock->sk->sk_type != SOCK_RAW) {
  394. r = -ESOCKTNOSUPPORT;
  395. goto err;
  396. }
  397. r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
  398. &uaddr_len, 0);
  399. if (r)
  400. goto err;
  401. if (uaddr.sa.sll_family != AF_PACKET) {
  402. r = -EPFNOSUPPORT;
  403. goto err;
  404. }
  405. return sock;
  406. err:
  407. fput(sock->file);
  408. return ERR_PTR(r);
  409. }
  410. static struct socket *get_tap_socket(int fd)
  411. {
  412. struct file *file = fget(fd);
  413. struct socket *sock;
  414. if (!file)
  415. return ERR_PTR(-EBADF);
  416. sock = tun_get_socket(file);
  417. if (!IS_ERR(sock))
  418. return sock;
  419. sock = macvtap_get_socket(file);
  420. if (IS_ERR(sock))
  421. fput(file);
  422. return sock;
  423. }
  424. static struct socket *get_socket(int fd)
  425. {
  426. struct socket *sock;
  427. /* special case to disable backend */
  428. if (fd == -1)
  429. return NULL;
  430. sock = get_raw_socket(fd);
  431. if (!IS_ERR(sock))
  432. return sock;
  433. sock = get_tap_socket(fd);
  434. if (!IS_ERR(sock))
  435. return sock;
  436. return ERR_PTR(-ENOTSOCK);
  437. }
  438. static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
  439. {
  440. struct socket *sock, *oldsock;
  441. struct vhost_virtqueue *vq;
  442. int r;
  443. mutex_lock(&n->dev.mutex);
  444. r = vhost_dev_check_owner(&n->dev);
  445. if (r)
  446. goto err;
  447. if (index >= VHOST_NET_VQ_MAX) {
  448. r = -ENOBUFS;
  449. goto err;
  450. }
  451. vq = n->vqs + index;
  452. mutex_lock(&vq->mutex);
  453. /* Verify that ring has been setup correctly. */
  454. if (!vhost_vq_access_ok(vq)) {
  455. r = -EFAULT;
  456. goto err;
  457. }
  458. sock = get_socket(fd);
  459. if (IS_ERR(sock)) {
  460. r = PTR_ERR(sock);
  461. goto err;
  462. }
  463. /* start polling new socket */
  464. oldsock = vq->private_data;
  465. if (sock == oldsock)
  466. goto done;
  467. vhost_net_disable_vq(n, vq);
  468. rcu_assign_pointer(vq->private_data, sock);
  469. vhost_net_enable_vq(n, vq);
  470. mutex_unlock(&vq->mutex);
  471. done:
  472. if (oldsock) {
  473. vhost_net_flush_vq(n, index);
  474. fput(oldsock->file);
  475. }
  476. err:
  477. mutex_unlock(&n->dev.mutex);
  478. return r;
  479. }
  480. static long vhost_net_reset_owner(struct vhost_net *n)
  481. {
  482. struct socket *tx_sock = NULL;
  483. struct socket *rx_sock = NULL;
  484. long err;
  485. mutex_lock(&n->dev.mutex);
  486. err = vhost_dev_check_owner(&n->dev);
  487. if (err)
  488. goto done;
  489. vhost_net_stop(n, &tx_sock, &rx_sock);
  490. vhost_net_flush(n);
  491. err = vhost_dev_reset_owner(&n->dev);
  492. done:
  493. mutex_unlock(&n->dev.mutex);
  494. if (tx_sock)
  495. fput(tx_sock->file);
  496. if (rx_sock)
  497. fput(rx_sock->file);
  498. return err;
  499. }
  500. static int vhost_net_set_features(struct vhost_net *n, u64 features)
  501. {
  502. size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
  503. sizeof(struct virtio_net_hdr) : 0;
  504. int i;
  505. mutex_lock(&n->dev.mutex);
  506. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  507. !vhost_log_access_ok(&n->dev)) {
  508. mutex_unlock(&n->dev.mutex);
  509. return -EFAULT;
  510. }
  511. n->dev.acked_features = features;
  512. smp_wmb();
  513. for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
  514. mutex_lock(&n->vqs[i].mutex);
  515. n->vqs[i].hdr_size = hdr_size;
  516. mutex_unlock(&n->vqs[i].mutex);
  517. }
  518. vhost_net_flush(n);
  519. mutex_unlock(&n->dev.mutex);
  520. return 0;
  521. }
  522. static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
  523. unsigned long arg)
  524. {
  525. struct vhost_net *n = f->private_data;
  526. void __user *argp = (void __user *)arg;
  527. u64 __user *featurep = argp;
  528. struct vhost_vring_file backend;
  529. u64 features;
  530. int r;
  531. switch (ioctl) {
  532. case VHOST_NET_SET_BACKEND:
  533. r = copy_from_user(&backend, argp, sizeof backend);
  534. if (r < 0)
  535. return r;
  536. return vhost_net_set_backend(n, backend.index, backend.fd);
  537. case VHOST_GET_FEATURES:
  538. features = VHOST_FEATURES;
  539. return copy_to_user(featurep, &features, sizeof features);
  540. case VHOST_SET_FEATURES:
  541. r = copy_from_user(&features, featurep, sizeof features);
  542. if (r < 0)
  543. return r;
  544. if (features & ~VHOST_FEATURES)
  545. return -EOPNOTSUPP;
  546. return vhost_net_set_features(n, features);
  547. case VHOST_RESET_OWNER:
  548. return vhost_net_reset_owner(n);
  549. default:
  550. mutex_lock(&n->dev.mutex);
  551. r = vhost_dev_ioctl(&n->dev, ioctl, arg);
  552. vhost_net_flush(n);
  553. mutex_unlock(&n->dev.mutex);
  554. return r;
  555. }
  556. }
  557. #ifdef CONFIG_COMPAT
  558. static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
  559. unsigned long arg)
  560. {
  561. return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  562. }
  563. #endif
  564. const static struct file_operations vhost_net_fops = {
  565. .owner = THIS_MODULE,
  566. .release = vhost_net_release,
  567. .unlocked_ioctl = vhost_net_ioctl,
  568. #ifdef CONFIG_COMPAT
  569. .compat_ioctl = vhost_net_compat_ioctl,
  570. #endif
  571. .open = vhost_net_open,
  572. };
  573. static struct miscdevice vhost_net_misc = {
  574. VHOST_NET_MINOR,
  575. "vhost-net",
  576. &vhost_net_fops,
  577. };
  578. int vhost_net_init(void)
  579. {
  580. int r = vhost_init();
  581. if (r)
  582. goto err_init;
  583. r = misc_register(&vhost_net_misc);
  584. if (r)
  585. goto err_reg;
  586. return 0;
  587. err_reg:
  588. vhost_cleanup();
  589. err_init:
  590. return r;
  591. }
  592. module_init(vhost_net_init);
  593. void vhost_net_exit(void)
  594. {
  595. misc_deregister(&vhost_net_misc);
  596. vhost_cleanup();
  597. }
  598. module_exit(vhost_net_exit);
  599. MODULE_VERSION("0.0.1");
  600. MODULE_LICENSE("GPL v2");
  601. MODULE_AUTHOR("Michael S. Tsirkin");
  602. MODULE_DESCRIPTION("Host kernel accelerator for virtio net");