net.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Author: Michael S. Tsirkin <mst@redhat.com>
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * virtio-net server in host kernel.
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/eventfd.h>
  10. #include <linux/vhost.h>
  11. #include <linux/virtio_net.h>
  12. #include <linux/mmu_context.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/file.h>
  19. #include <linux/net.h>
  20. #include <linux/if_packet.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/if_tun.h>
  23. #include <net/sock.h>
  24. #include "vhost.h"
  25. /* Max number of bytes transferred before requeueing the job.
  26. * Using this limit prevents one virtqueue from starving others. */
  27. #define VHOST_NET_WEIGHT 0x80000
  28. enum {
  29. VHOST_NET_VQ_RX = 0,
  30. VHOST_NET_VQ_TX = 1,
  31. VHOST_NET_VQ_MAX = 2,
  32. };
  33. enum vhost_net_poll_state {
  34. VHOST_NET_POLL_DISABLED = 0,
  35. VHOST_NET_POLL_STARTED = 1,
  36. VHOST_NET_POLL_STOPPED = 2,
  37. };
  38. struct vhost_net {
  39. struct vhost_dev dev;
  40. struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
  41. struct vhost_poll poll[VHOST_NET_VQ_MAX];
  42. /* Tells us whether we are polling a socket for TX.
  43. * We only do this when socket buffer fills up.
  44. * Protected by tx vq lock. */
  45. enum vhost_net_poll_state tx_poll_state;
  46. };
  47. /* Pop first len bytes from iovec. Return number of segments used. */
  48. static int move_iovec_hdr(struct iovec *from, struct iovec *to,
  49. size_t len, int iov_count)
  50. {
  51. int seg = 0;
  52. size_t size;
  53. while (len && seg < iov_count) {
  54. size = min(from->iov_len, len);
  55. to->iov_base = from->iov_base;
  56. to->iov_len = size;
  57. from->iov_len -= size;
  58. from->iov_base += size;
  59. len -= size;
  60. ++from;
  61. ++to;
  62. ++seg;
  63. }
  64. return seg;
  65. }
  66. /* Caller must have TX VQ lock */
  67. static void tx_poll_stop(struct vhost_net *net)
  68. {
  69. if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
  70. return;
  71. vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
  72. net->tx_poll_state = VHOST_NET_POLL_STOPPED;
  73. }
  74. /* Caller must have TX VQ lock */
  75. static void tx_poll_start(struct vhost_net *net, struct socket *sock)
  76. {
  77. if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
  78. return;
  79. vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
  80. net->tx_poll_state = VHOST_NET_POLL_STARTED;
  81. }
  82. /* Expects to be always run from workqueue - which acts as
  83. * read-size critical section for our kind of RCU. */
  84. static void handle_tx(struct vhost_net *net)
  85. {
  86. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
  87. unsigned head, out, in, s;
  88. struct msghdr msg = {
  89. .msg_name = NULL,
  90. .msg_namelen = 0,
  91. .msg_control = NULL,
  92. .msg_controllen = 0,
  93. .msg_iov = vq->iov,
  94. .msg_flags = MSG_DONTWAIT,
  95. };
  96. size_t len, total_len = 0;
  97. int err, wmem;
  98. size_t hdr_size;
  99. struct socket *sock = rcu_dereference(vq->private_data);
  100. if (!sock)
  101. return;
  102. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  103. if (wmem >= sock->sk->sk_sndbuf)
  104. return;
  105. use_mm(net->dev.mm);
  106. mutex_lock(&vq->mutex);
  107. vhost_disable_notify(vq);
  108. if (wmem < sock->sk->sk_sndbuf * 2)
  109. tx_poll_stop(net);
  110. hdr_size = vq->hdr_size;
  111. for (;;) {
  112. head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  113. ARRAY_SIZE(vq->iov),
  114. &out, &in,
  115. NULL, NULL);
  116. /* Nothing new? Wait for eventfd to tell us they refilled. */
  117. if (head == vq->num) {
  118. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  119. if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
  120. tx_poll_start(net, sock);
  121. set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  122. break;
  123. }
  124. if (unlikely(vhost_enable_notify(vq))) {
  125. vhost_disable_notify(vq);
  126. continue;
  127. }
  128. break;
  129. }
  130. if (in) {
  131. vq_err(vq, "Unexpected descriptor format for TX: "
  132. "out %d, int %d\n", out, in);
  133. break;
  134. }
  135. /* Skip header. TODO: support TSO. */
  136. s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
  137. msg.msg_iovlen = out;
  138. len = iov_length(vq->iov, out);
  139. /* Sanity check */
  140. if (!len) {
  141. vq_err(vq, "Unexpected header len for TX: "
  142. "%zd expected %zd\n",
  143. iov_length(vq->hdr, s), hdr_size);
  144. break;
  145. }
  146. /* TODO: Check specific error and bomb out unless ENOBUFS? */
  147. err = sock->ops->sendmsg(NULL, sock, &msg, len);
  148. if (unlikely(err < 0)) {
  149. vhost_discard_vq_desc(vq);
  150. tx_poll_start(net, sock);
  151. break;
  152. }
  153. if (err != len)
  154. pr_err("Truncated TX packet: "
  155. " len %d != %zd\n", err, len);
  156. vhost_add_used_and_signal(&net->dev, vq, head, 0);
  157. total_len += len;
  158. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  159. vhost_poll_queue(&vq->poll);
  160. break;
  161. }
  162. }
  163. mutex_unlock(&vq->mutex);
  164. unuse_mm(net->dev.mm);
  165. }
  166. /* Expects to be always run from workqueue - which acts as
  167. * read-size critical section for our kind of RCU. */
  168. static void handle_rx(struct vhost_net *net)
  169. {
  170. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
  171. unsigned head, out, in, log, s;
  172. struct vhost_log *vq_log;
  173. struct msghdr msg = {
  174. .msg_name = NULL,
  175. .msg_namelen = 0,
  176. .msg_control = NULL, /* FIXME: get and handle RX aux data. */
  177. .msg_controllen = 0,
  178. .msg_iov = vq->iov,
  179. .msg_flags = MSG_DONTWAIT,
  180. };
  181. struct virtio_net_hdr hdr = {
  182. .flags = 0,
  183. .gso_type = VIRTIO_NET_HDR_GSO_NONE
  184. };
  185. size_t len, total_len = 0;
  186. int err;
  187. size_t hdr_size;
  188. struct socket *sock = rcu_dereference(vq->private_data);
  189. if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
  190. return;
  191. use_mm(net->dev.mm);
  192. mutex_lock(&vq->mutex);
  193. vhost_disable_notify(vq);
  194. hdr_size = vq->hdr_size;
  195. vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
  196. vq->log : NULL;
  197. for (;;) {
  198. head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  199. ARRAY_SIZE(vq->iov),
  200. &out, &in,
  201. vq_log, &log);
  202. /* OK, now we need to know about added descriptors. */
  203. if (head == vq->num) {
  204. if (unlikely(vhost_enable_notify(vq))) {
  205. /* They have slipped one in as we were
  206. * doing that: check again. */
  207. vhost_disable_notify(vq);
  208. continue;
  209. }
  210. /* Nothing new? Wait for eventfd to tell us
  211. * they refilled. */
  212. break;
  213. }
  214. /* We don't need to be notified again. */
  215. if (out) {
  216. vq_err(vq, "Unexpected descriptor format for RX: "
  217. "out %d, int %d\n",
  218. out, in);
  219. break;
  220. }
  221. /* Skip header. TODO: support TSO/mergeable rx buffers. */
  222. s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
  223. msg.msg_iovlen = in;
  224. len = iov_length(vq->iov, in);
  225. /* Sanity check */
  226. if (!len) {
  227. vq_err(vq, "Unexpected header len for RX: "
  228. "%zd expected %zd\n",
  229. iov_length(vq->hdr, s), hdr_size);
  230. break;
  231. }
  232. err = sock->ops->recvmsg(NULL, sock, &msg,
  233. len, MSG_DONTWAIT | MSG_TRUNC);
  234. /* TODO: Check specific error and bomb out unless EAGAIN? */
  235. if (err < 0) {
  236. vhost_discard_vq_desc(vq);
  237. break;
  238. }
  239. /* TODO: Should check and handle checksum. */
  240. if (err > len) {
  241. pr_err("Discarded truncated rx packet: "
  242. " len %d > %zd\n", err, len);
  243. vhost_discard_vq_desc(vq);
  244. continue;
  245. }
  246. len = err;
  247. err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
  248. if (err) {
  249. vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
  250. vq->iov->iov_base, err);
  251. break;
  252. }
  253. len += hdr_size;
  254. vhost_add_used_and_signal(&net->dev, vq, head, len);
  255. if (unlikely(vq_log))
  256. vhost_log_write(vq, vq_log, log, len);
  257. total_len += len;
  258. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  259. vhost_poll_queue(&vq->poll);
  260. break;
  261. }
  262. }
  263. mutex_unlock(&vq->mutex);
  264. unuse_mm(net->dev.mm);
  265. }
  266. static void handle_tx_kick(struct work_struct *work)
  267. {
  268. struct vhost_virtqueue *vq;
  269. struct vhost_net *net;
  270. vq = container_of(work, struct vhost_virtqueue, poll.work);
  271. net = container_of(vq->dev, struct vhost_net, dev);
  272. handle_tx(net);
  273. }
  274. static void handle_rx_kick(struct work_struct *work)
  275. {
  276. struct vhost_virtqueue *vq;
  277. struct vhost_net *net;
  278. vq = container_of(work, struct vhost_virtqueue, poll.work);
  279. net = container_of(vq->dev, struct vhost_net, dev);
  280. handle_rx(net);
  281. }
  282. static void handle_tx_net(struct work_struct *work)
  283. {
  284. struct vhost_net *net;
  285. net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
  286. handle_tx(net);
  287. }
  288. static void handle_rx_net(struct work_struct *work)
  289. {
  290. struct vhost_net *net;
  291. net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
  292. handle_rx(net);
  293. }
  294. static int vhost_net_open(struct inode *inode, struct file *f)
  295. {
  296. struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
  297. int r;
  298. if (!n)
  299. return -ENOMEM;
  300. n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
  301. n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
  302. r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
  303. if (r < 0) {
  304. kfree(n);
  305. return r;
  306. }
  307. vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
  308. vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
  309. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  310. f->private_data = n;
  311. return 0;
  312. }
  313. static void vhost_net_disable_vq(struct vhost_net *n,
  314. struct vhost_virtqueue *vq)
  315. {
  316. if (!vq->private_data)
  317. return;
  318. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  319. tx_poll_stop(n);
  320. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  321. } else
  322. vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
  323. }
  324. static void vhost_net_enable_vq(struct vhost_net *n,
  325. struct vhost_virtqueue *vq)
  326. {
  327. struct socket *sock = vq->private_data;
  328. if (!sock)
  329. return;
  330. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  331. n->tx_poll_state = VHOST_NET_POLL_STOPPED;
  332. tx_poll_start(n, sock);
  333. } else
  334. vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
  335. }
  336. static struct socket *vhost_net_stop_vq(struct vhost_net *n,
  337. struct vhost_virtqueue *vq)
  338. {
  339. struct socket *sock;
  340. mutex_lock(&vq->mutex);
  341. sock = vq->private_data;
  342. vhost_net_disable_vq(n, vq);
  343. rcu_assign_pointer(vq->private_data, NULL);
  344. mutex_unlock(&vq->mutex);
  345. return sock;
  346. }
  347. static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
  348. struct socket **rx_sock)
  349. {
  350. *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
  351. *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
  352. }
  353. static void vhost_net_flush_vq(struct vhost_net *n, int index)
  354. {
  355. vhost_poll_flush(n->poll + index);
  356. vhost_poll_flush(&n->dev.vqs[index].poll);
  357. }
  358. static void vhost_net_flush(struct vhost_net *n)
  359. {
  360. vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
  361. vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
  362. }
  363. static int vhost_net_release(struct inode *inode, struct file *f)
  364. {
  365. struct vhost_net *n = f->private_data;
  366. struct socket *tx_sock;
  367. struct socket *rx_sock;
  368. vhost_net_stop(n, &tx_sock, &rx_sock);
  369. vhost_net_flush(n);
  370. vhost_dev_cleanup(&n->dev);
  371. if (tx_sock)
  372. fput(tx_sock->file);
  373. if (rx_sock)
  374. fput(rx_sock->file);
  375. /* We do an extra flush before freeing memory,
  376. * since jobs can re-queue themselves. */
  377. vhost_net_flush(n);
  378. kfree(n);
  379. return 0;
  380. }
  381. static struct socket *get_raw_socket(int fd)
  382. {
  383. struct {
  384. struct sockaddr_ll sa;
  385. char buf[MAX_ADDR_LEN];
  386. } uaddr;
  387. int uaddr_len = sizeof uaddr, r;
  388. struct socket *sock = sockfd_lookup(fd, &r);
  389. if (!sock)
  390. return ERR_PTR(-ENOTSOCK);
  391. /* Parameter checking */
  392. if (sock->sk->sk_type != SOCK_RAW) {
  393. r = -ESOCKTNOSUPPORT;
  394. goto err;
  395. }
  396. r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
  397. &uaddr_len, 0);
  398. if (r)
  399. goto err;
  400. if (uaddr.sa.sll_family != AF_PACKET) {
  401. r = -EPFNOSUPPORT;
  402. goto err;
  403. }
  404. return sock;
  405. err:
  406. fput(sock->file);
  407. return ERR_PTR(r);
  408. }
  409. static struct socket *get_tun_socket(int fd)
  410. {
  411. struct file *file = fget(fd);
  412. struct socket *sock;
  413. if (!file)
  414. return ERR_PTR(-EBADF);
  415. sock = tun_get_socket(file);
  416. if (IS_ERR(sock))
  417. fput(file);
  418. return sock;
  419. }
  420. static struct socket *get_socket(int fd)
  421. {
  422. struct socket *sock;
  423. /* special case to disable backend */
  424. if (fd == -1)
  425. return NULL;
  426. sock = get_raw_socket(fd);
  427. if (!IS_ERR(sock))
  428. return sock;
  429. sock = get_tun_socket(fd);
  430. if (!IS_ERR(sock))
  431. return sock;
  432. return ERR_PTR(-ENOTSOCK);
  433. }
  434. static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
  435. {
  436. struct socket *sock, *oldsock;
  437. struct vhost_virtqueue *vq;
  438. int r;
  439. mutex_lock(&n->dev.mutex);
  440. r = vhost_dev_check_owner(&n->dev);
  441. if (r)
  442. goto err;
  443. if (index >= VHOST_NET_VQ_MAX) {
  444. r = -ENOBUFS;
  445. goto err;
  446. }
  447. vq = n->vqs + index;
  448. mutex_lock(&vq->mutex);
  449. /* Verify that ring has been setup correctly. */
  450. if (!vhost_vq_access_ok(vq)) {
  451. r = -EFAULT;
  452. goto err;
  453. }
  454. sock = get_socket(fd);
  455. if (IS_ERR(sock)) {
  456. r = PTR_ERR(sock);
  457. goto err;
  458. }
  459. /* start polling new socket */
  460. oldsock = vq->private_data;
  461. if (sock == oldsock)
  462. goto done;
  463. vhost_net_disable_vq(n, vq);
  464. rcu_assign_pointer(vq->private_data, sock);
  465. vhost_net_enable_vq(n, vq);
  466. mutex_unlock(&vq->mutex);
  467. done:
  468. if (oldsock) {
  469. vhost_net_flush_vq(n, index);
  470. fput(oldsock->file);
  471. }
  472. err:
  473. mutex_unlock(&n->dev.mutex);
  474. return r;
  475. }
  476. static long vhost_net_reset_owner(struct vhost_net *n)
  477. {
  478. struct socket *tx_sock = NULL;
  479. struct socket *rx_sock = NULL;
  480. long err;
  481. mutex_lock(&n->dev.mutex);
  482. err = vhost_dev_check_owner(&n->dev);
  483. if (err)
  484. goto done;
  485. vhost_net_stop(n, &tx_sock, &rx_sock);
  486. vhost_net_flush(n);
  487. err = vhost_dev_reset_owner(&n->dev);
  488. done:
  489. mutex_unlock(&n->dev.mutex);
  490. if (tx_sock)
  491. fput(tx_sock->file);
  492. if (rx_sock)
  493. fput(rx_sock->file);
  494. return err;
  495. }
  496. static int vhost_net_set_features(struct vhost_net *n, u64 features)
  497. {
  498. size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
  499. sizeof(struct virtio_net_hdr) : 0;
  500. int i;
  501. mutex_lock(&n->dev.mutex);
  502. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  503. !vhost_log_access_ok(&n->dev)) {
  504. mutex_unlock(&n->dev.mutex);
  505. return -EFAULT;
  506. }
  507. n->dev.acked_features = features;
  508. smp_wmb();
  509. for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
  510. mutex_lock(&n->vqs[i].mutex);
  511. n->vqs[i].hdr_size = hdr_size;
  512. mutex_unlock(&n->vqs[i].mutex);
  513. }
  514. vhost_net_flush(n);
  515. mutex_unlock(&n->dev.mutex);
  516. return 0;
  517. }
  518. static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
  519. unsigned long arg)
  520. {
  521. struct vhost_net *n = f->private_data;
  522. void __user *argp = (void __user *)arg;
  523. u64 __user *featurep = argp;
  524. struct vhost_vring_file backend;
  525. u64 features;
  526. int r;
  527. switch (ioctl) {
  528. case VHOST_NET_SET_BACKEND:
  529. r = copy_from_user(&backend, argp, sizeof backend);
  530. if (r < 0)
  531. return r;
  532. return vhost_net_set_backend(n, backend.index, backend.fd);
  533. case VHOST_GET_FEATURES:
  534. features = VHOST_FEATURES;
  535. return copy_to_user(featurep, &features, sizeof features);
  536. case VHOST_SET_FEATURES:
  537. r = copy_from_user(&features, featurep, sizeof features);
  538. if (r < 0)
  539. return r;
  540. if (features & ~VHOST_FEATURES)
  541. return -EOPNOTSUPP;
  542. return vhost_net_set_features(n, features);
  543. case VHOST_RESET_OWNER:
  544. return vhost_net_reset_owner(n);
  545. default:
  546. mutex_lock(&n->dev.mutex);
  547. r = vhost_dev_ioctl(&n->dev, ioctl, arg);
  548. vhost_net_flush(n);
  549. mutex_unlock(&n->dev.mutex);
  550. return r;
  551. }
  552. }
  553. #ifdef CONFIG_COMPAT
  554. static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
  555. unsigned long arg)
  556. {
  557. return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  558. }
  559. #endif
  560. const static struct file_operations vhost_net_fops = {
  561. .owner = THIS_MODULE,
  562. .release = vhost_net_release,
  563. .unlocked_ioctl = vhost_net_ioctl,
  564. #ifdef CONFIG_COMPAT
  565. .compat_ioctl = vhost_net_compat_ioctl,
  566. #endif
  567. .open = vhost_net_open,
  568. };
  569. static struct miscdevice vhost_net_misc = {
  570. VHOST_NET_MINOR,
  571. "vhost-net",
  572. &vhost_net_fops,
  573. };
  574. int vhost_net_init(void)
  575. {
  576. int r = vhost_init();
  577. if (r)
  578. goto err_init;
  579. r = misc_register(&vhost_net_misc);
  580. if (r)
  581. goto err_reg;
  582. return 0;
  583. err_reg:
  584. vhost_cleanup();
  585. err_init:
  586. return r;
  587. }
  588. module_init(vhost_net_init);
  589. void vhost_net_exit(void)
  590. {
  591. misc_deregister(&vhost_net_misc);
  592. vhost_cleanup();
  593. }
  594. module_exit(vhost_net_exit);
  595. MODULE_VERSION("0.0.1");
  596. MODULE_LICENSE("GPL v2");
  597. MODULE_AUTHOR("Michael S. Tsirkin");
  598. MODULE_DESCRIPTION("Host kernel accelerator for virtio net");