virtio_net.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. /* A network driver using virtio.
  2. *
  3. * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. //#define DEBUG
  20. #include <linux/netdevice.h>
  21. #include <linux/etherdevice.h>
  22. #include <linux/ethtool.h>
  23. #include <linux/module.h>
  24. #include <linux/virtio.h>
  25. #include <linux/virtio_net.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/if_vlan.h>
  28. static int napi_weight = 128;
  29. module_param(napi_weight, int, 0444);
  30. static int csum = 1, gso = 1;
  31. module_param(csum, bool, 0444);
  32. module_param(gso, bool, 0444);
  33. /* FIXME: MTU in config. */
  34. #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  35. #define GOOD_COPY_LEN 128
  36. #define VIRTNET_SEND_COMMAND_SG_MAX 2
  37. struct virtnet_info
  38. {
  39. struct virtio_device *vdev;
  40. struct virtqueue *rvq, *svq, *cvq;
  41. struct net_device *dev;
  42. struct napi_struct napi;
  43. unsigned int status;
  44. /* Number of input buffers, and max we've ever had. */
  45. unsigned int num, max;
  46. /* I like... big packets and I cannot lie! */
  47. bool big_packets;
  48. /* Host will merge rx buffers for big packets (shake it! shake it!) */
  49. bool mergeable_rx_bufs;
  50. /* Work struct for refilling if we run low on memory. */
  51. struct delayed_work refill;
  52. /* Chain pages by the private ptr. */
  53. struct page *pages;
  54. };
  55. struct skb_vnet_hdr {
  56. union {
  57. struct virtio_net_hdr hdr;
  58. struct virtio_net_hdr_mrg_rxbuf mhdr;
  59. };
  60. unsigned int num_sg;
  61. };
  62. struct padded_vnet_hdr {
  63. struct virtio_net_hdr hdr;
  64. /*
  65. * virtio_net_hdr should be in a separated sg buffer because of a
  66. * QEMU bug, and data sg buffer shares same page with this header sg.
  67. * This padding makes next sg 16 byte aligned after virtio_net_hdr.
  68. */
  69. char padding[6];
  70. };
  71. static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
  72. {
  73. return (struct skb_vnet_hdr *)skb->cb;
  74. }
  75. /*
  76. * private is used to chain pages for big packets, put the whole
  77. * most recent used list in the beginning for reuse
  78. */
  79. static void give_pages(struct virtnet_info *vi, struct page *page)
  80. {
  81. struct page *end;
  82. /* Find end of list, sew whole thing into vi->pages. */
  83. for (end = page; end->private; end = (struct page *)end->private);
  84. end->private = (unsigned long)vi->pages;
  85. vi->pages = page;
  86. }
  87. static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
  88. {
  89. struct page *p = vi->pages;
  90. if (p) {
  91. vi->pages = (struct page *)p->private;
  92. /* clear private here, it is used to chain pages */
  93. p->private = 0;
  94. } else
  95. p = alloc_page(gfp_mask);
  96. return p;
  97. }
  98. static void skb_xmit_done(struct virtqueue *svq)
  99. {
  100. struct virtnet_info *vi = svq->vdev->priv;
  101. /* Suppress further interrupts. */
  102. svq->vq_ops->disable_cb(svq);
  103. /* We were probably waiting for more output buffers. */
  104. netif_wake_queue(vi->dev);
  105. }
  106. static void set_skb_frag(struct sk_buff *skb, struct page *page,
  107. unsigned int offset, unsigned int *len)
  108. {
  109. int i = skb_shinfo(skb)->nr_frags;
  110. skb_frag_t *f;
  111. f = &skb_shinfo(skb)->frags[i];
  112. f->size = min((unsigned)PAGE_SIZE - offset, *len);
  113. f->page_offset = offset;
  114. f->page = page;
  115. skb->data_len += f->size;
  116. skb->len += f->size;
  117. skb_shinfo(skb)->nr_frags++;
  118. *len -= f->size;
  119. }
  120. static struct sk_buff *page_to_skb(struct virtnet_info *vi,
  121. struct page *page, unsigned int len)
  122. {
  123. struct sk_buff *skb;
  124. struct skb_vnet_hdr *hdr;
  125. unsigned int copy, hdr_len, offset;
  126. char *p;
  127. p = page_address(page);
  128. /* copy small packet so we can reuse these pages for small data */
  129. skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
  130. if (unlikely(!skb))
  131. return NULL;
  132. hdr = skb_vnet_hdr(skb);
  133. if (vi->mergeable_rx_bufs) {
  134. hdr_len = sizeof hdr->mhdr;
  135. offset = hdr_len;
  136. } else {
  137. hdr_len = sizeof hdr->hdr;
  138. offset = sizeof(struct padded_vnet_hdr);
  139. }
  140. memcpy(hdr, p, hdr_len);
  141. len -= hdr_len;
  142. p += offset;
  143. copy = len;
  144. if (copy > skb_tailroom(skb))
  145. copy = skb_tailroom(skb);
  146. memcpy(skb_put(skb, copy), p, copy);
  147. len -= copy;
  148. offset += copy;
  149. while (len) {
  150. set_skb_frag(skb, page, offset, &len);
  151. page = (struct page *)page->private;
  152. offset = 0;
  153. }
  154. if (page)
  155. give_pages(vi, page);
  156. return skb;
  157. }
  158. static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
  159. {
  160. struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
  161. struct page *page;
  162. int num_buf, i, len;
  163. num_buf = hdr->mhdr.num_buffers;
  164. while (--num_buf) {
  165. i = skb_shinfo(skb)->nr_frags;
  166. if (i >= MAX_SKB_FRAGS) {
  167. pr_debug("%s: packet too long\n", skb->dev->name);
  168. skb->dev->stats.rx_length_errors++;
  169. return -EINVAL;
  170. }
  171. page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
  172. if (!page) {
  173. pr_debug("%s: rx error: %d buffers missing\n",
  174. skb->dev->name, hdr->mhdr.num_buffers);
  175. skb->dev->stats.rx_length_errors++;
  176. return -EINVAL;
  177. }
  178. if (len > PAGE_SIZE)
  179. len = PAGE_SIZE;
  180. set_skb_frag(skb, page, 0, &len);
  181. --vi->num;
  182. }
  183. return 0;
  184. }
  185. static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
  186. {
  187. struct virtnet_info *vi = netdev_priv(dev);
  188. struct sk_buff *skb;
  189. struct page *page;
  190. struct skb_vnet_hdr *hdr;
  191. if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
  192. pr_debug("%s: short packet %i\n", dev->name, len);
  193. dev->stats.rx_length_errors++;
  194. if (vi->mergeable_rx_bufs || vi->big_packets)
  195. give_pages(vi, buf);
  196. else
  197. dev_kfree_skb(buf);
  198. return;
  199. }
  200. if (!vi->mergeable_rx_bufs && !vi->big_packets) {
  201. skb = buf;
  202. len -= sizeof(struct virtio_net_hdr);
  203. skb_trim(skb, len);
  204. } else {
  205. page = buf;
  206. skb = page_to_skb(vi, page, len);
  207. if (unlikely(!skb)) {
  208. dev->stats.rx_dropped++;
  209. give_pages(vi, page);
  210. return;
  211. }
  212. if (vi->mergeable_rx_bufs)
  213. if (receive_mergeable(vi, skb)) {
  214. dev_kfree_skb(skb);
  215. return;
  216. }
  217. }
  218. hdr = skb_vnet_hdr(skb);
  219. skb->truesize += skb->data_len;
  220. dev->stats.rx_bytes += skb->len;
  221. dev->stats.rx_packets++;
  222. if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  223. pr_debug("Needs csum!\n");
  224. if (!skb_partial_csum_set(skb,
  225. hdr->hdr.csum_start,
  226. hdr->hdr.csum_offset))
  227. goto frame_err;
  228. }
  229. skb->protocol = eth_type_trans(skb, dev);
  230. pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
  231. ntohs(skb->protocol), skb->len, skb->pkt_type);
  232. if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
  233. pr_debug("GSO!\n");
  234. switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  235. case VIRTIO_NET_HDR_GSO_TCPV4:
  236. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  237. break;
  238. case VIRTIO_NET_HDR_GSO_UDP:
  239. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  240. break;
  241. case VIRTIO_NET_HDR_GSO_TCPV6:
  242. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  243. break;
  244. default:
  245. if (net_ratelimit())
  246. printk(KERN_WARNING "%s: bad gso type %u.\n",
  247. dev->name, hdr->hdr.gso_type);
  248. goto frame_err;
  249. }
  250. if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
  251. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  252. skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
  253. if (skb_shinfo(skb)->gso_size == 0) {
  254. if (net_ratelimit())
  255. printk(KERN_WARNING "%s: zero gso size.\n",
  256. dev->name);
  257. goto frame_err;
  258. }
  259. /* Header must be checked, and gso_segs computed. */
  260. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  261. skb_shinfo(skb)->gso_segs = 0;
  262. }
  263. netif_receive_skb(skb);
  264. return;
  265. frame_err:
  266. dev->stats.rx_frame_errors++;
  267. dev_kfree_skb(skb);
  268. }
  269. static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
  270. {
  271. struct sk_buff *skb;
  272. struct skb_vnet_hdr *hdr;
  273. struct scatterlist sg[2];
  274. int err;
  275. skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
  276. if (unlikely(!skb))
  277. return -ENOMEM;
  278. skb_put(skb, MAX_PACKET_LEN);
  279. hdr = skb_vnet_hdr(skb);
  280. sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
  281. skb_to_sgvec(skb, sg + 1, 0, skb->len);
  282. err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
  283. if (err < 0)
  284. dev_kfree_skb(skb);
  285. return err;
  286. }
  287. static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
  288. {
  289. struct scatterlist sg[MAX_SKB_FRAGS + 2];
  290. struct page *first, *list = NULL;
  291. char *p;
  292. int i, err, offset;
  293. /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
  294. for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
  295. first = get_a_page(vi, gfp);
  296. if (!first) {
  297. if (list)
  298. give_pages(vi, list);
  299. return -ENOMEM;
  300. }
  301. sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
  302. /* chain new page in list head to match sg */
  303. first->private = (unsigned long)list;
  304. list = first;
  305. }
  306. first = get_a_page(vi, gfp);
  307. if (!first) {
  308. give_pages(vi, list);
  309. return -ENOMEM;
  310. }
  311. p = page_address(first);
  312. /* sg[0], sg[1] share the same page */
  313. /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
  314. sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
  315. /* sg[1] for data packet, from offset */
  316. offset = sizeof(struct padded_vnet_hdr);
  317. sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
  318. /* chain first in list head */
  319. first->private = (unsigned long)list;
  320. err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
  321. first);
  322. if (err < 0)
  323. give_pages(vi, first);
  324. return err;
  325. }
  326. static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
  327. {
  328. struct page *page;
  329. struct scatterlist sg;
  330. int err;
  331. page = get_a_page(vi, gfp);
  332. if (!page)
  333. return -ENOMEM;
  334. sg_init_one(&sg, page_address(page), PAGE_SIZE);
  335. err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
  336. if (err < 0)
  337. give_pages(vi, page);
  338. return err;
  339. }
  340. /* Returns false if we couldn't fill entirely (OOM). */
  341. static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
  342. {
  343. int err;
  344. bool oom = false;
  345. do {
  346. if (vi->mergeable_rx_bufs)
  347. err = add_recvbuf_mergeable(vi, gfp);
  348. else if (vi->big_packets)
  349. err = add_recvbuf_big(vi, gfp);
  350. else
  351. err = add_recvbuf_small(vi, gfp);
  352. if (err < 0) {
  353. oom = true;
  354. break;
  355. }
  356. ++vi->num;
  357. } while (err > 0);
  358. if (unlikely(vi->num > vi->max))
  359. vi->max = vi->num;
  360. vi->rvq->vq_ops->kick(vi->rvq);
  361. return !oom;
  362. }
  363. static void skb_recv_done(struct virtqueue *rvq)
  364. {
  365. struct virtnet_info *vi = rvq->vdev->priv;
  366. /* Schedule NAPI, Suppress further interrupts if successful. */
  367. if (napi_schedule_prep(&vi->napi)) {
  368. rvq->vq_ops->disable_cb(rvq);
  369. __napi_schedule(&vi->napi);
  370. }
  371. }
  372. static void refill_work(struct work_struct *work)
  373. {
  374. struct virtnet_info *vi;
  375. bool still_empty;
  376. vi = container_of(work, struct virtnet_info, refill.work);
  377. napi_disable(&vi->napi);
  378. still_empty = !try_fill_recv(vi, GFP_KERNEL);
  379. napi_enable(&vi->napi);
  380. /* In theory, this can happen: if we don't get any buffers in
  381. * we will *never* try to fill again. */
  382. if (still_empty)
  383. schedule_delayed_work(&vi->refill, HZ/2);
  384. }
  385. static int virtnet_poll(struct napi_struct *napi, int budget)
  386. {
  387. struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
  388. void *buf;
  389. unsigned int len, received = 0;
  390. again:
  391. while (received < budget &&
  392. (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
  393. receive_buf(vi->dev, buf, len);
  394. --vi->num;
  395. received++;
  396. }
  397. if (vi->num < vi->max / 2) {
  398. if (!try_fill_recv(vi, GFP_ATOMIC))
  399. schedule_delayed_work(&vi->refill, 0);
  400. }
  401. /* Out of packets? */
  402. if (received < budget) {
  403. napi_complete(napi);
  404. if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
  405. napi_schedule_prep(napi)) {
  406. vi->rvq->vq_ops->disable_cb(vi->rvq);
  407. __napi_schedule(napi);
  408. goto again;
  409. }
  410. }
  411. return received;
  412. }
  413. static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
  414. {
  415. struct sk_buff *skb;
  416. unsigned int len, tot_sgs = 0;
  417. while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
  418. pr_debug("Sent skb %p\n", skb);
  419. vi->dev->stats.tx_bytes += skb->len;
  420. vi->dev->stats.tx_packets++;
  421. tot_sgs += skb_vnet_hdr(skb)->num_sg;
  422. dev_kfree_skb_any(skb);
  423. }
  424. return tot_sgs;
  425. }
  426. static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
  427. {
  428. struct scatterlist sg[2+MAX_SKB_FRAGS];
  429. struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
  430. const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
  431. sg_init_table(sg, 2+MAX_SKB_FRAGS);
  432. pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
  433. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  434. hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
  435. hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
  436. hdr->hdr.csum_offset = skb->csum_offset;
  437. } else {
  438. hdr->hdr.flags = 0;
  439. hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
  440. }
  441. if (skb_is_gso(skb)) {
  442. hdr->hdr.hdr_len = skb_headlen(skb);
  443. hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
  444. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  445. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
  446. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  447. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
  448. else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  449. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
  450. else
  451. BUG();
  452. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
  453. hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
  454. } else {
  455. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
  456. hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
  457. }
  458. hdr->mhdr.num_buffers = 0;
  459. /* Encode metadata header at front. */
  460. if (vi->mergeable_rx_bufs)
  461. sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
  462. else
  463. sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
  464. hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
  465. return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
  466. }
  467. static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
  468. {
  469. struct virtnet_info *vi = netdev_priv(dev);
  470. int capacity;
  471. again:
  472. /* Free up any pending old buffers before queueing new ones. */
  473. free_old_xmit_skbs(vi);
  474. /* Try to transmit */
  475. capacity = xmit_skb(vi, skb);
  476. /* This can happen with OOM and indirect buffers. */
  477. if (unlikely(capacity < 0)) {
  478. netif_stop_queue(dev);
  479. dev_warn(&dev->dev, "Unexpected full queue\n");
  480. if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
  481. vi->svq->vq_ops->disable_cb(vi->svq);
  482. netif_start_queue(dev);
  483. goto again;
  484. }
  485. return NETDEV_TX_BUSY;
  486. }
  487. vi->svq->vq_ops->kick(vi->svq);
  488. /* Don't wait up for transmitted skbs to be freed. */
  489. skb_orphan(skb);
  490. nf_reset(skb);
  491. /* Apparently nice girls don't return TX_BUSY; stop the queue
  492. * before it gets out of hand. Naturally, this wastes entries. */
  493. if (capacity < 2+MAX_SKB_FRAGS) {
  494. netif_stop_queue(dev);
  495. if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
  496. /* More just got used, free them then recheck. */
  497. capacity += free_old_xmit_skbs(vi);
  498. if (capacity >= 2+MAX_SKB_FRAGS) {
  499. netif_start_queue(dev);
  500. vi->svq->vq_ops->disable_cb(vi->svq);
  501. }
  502. }
  503. }
  504. return NETDEV_TX_OK;
  505. }
  506. static int virtnet_set_mac_address(struct net_device *dev, void *p)
  507. {
  508. struct virtnet_info *vi = netdev_priv(dev);
  509. struct virtio_device *vdev = vi->vdev;
  510. int ret;
  511. ret = eth_mac_addr(dev, p);
  512. if (ret)
  513. return ret;
  514. if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
  515. vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
  516. dev->dev_addr, dev->addr_len);
  517. return 0;
  518. }
  519. #ifdef CONFIG_NET_POLL_CONTROLLER
  520. static void virtnet_netpoll(struct net_device *dev)
  521. {
  522. struct virtnet_info *vi = netdev_priv(dev);
  523. napi_schedule(&vi->napi);
  524. }
  525. #endif
  526. static int virtnet_open(struct net_device *dev)
  527. {
  528. struct virtnet_info *vi = netdev_priv(dev);
  529. napi_enable(&vi->napi);
  530. /* If all buffers were filled by other side before we napi_enabled, we
  531. * won't get another interrupt, so process any outstanding packets
  532. * now. virtnet_poll wants re-enable the queue, so we disable here.
  533. * We synchronize against interrupts via NAPI_STATE_SCHED */
  534. if (napi_schedule_prep(&vi->napi)) {
  535. vi->rvq->vq_ops->disable_cb(vi->rvq);
  536. __napi_schedule(&vi->napi);
  537. }
  538. return 0;
  539. }
  540. /*
  541. * Send command via the control virtqueue and check status. Commands
  542. * supported by the hypervisor, as indicated by feature bits, should
  543. * never fail unless improperly formated.
  544. */
  545. static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
  546. struct scatterlist *data, int out, int in)
  547. {
  548. struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
  549. struct virtio_net_ctrl_hdr ctrl;
  550. virtio_net_ctrl_ack status = ~0;
  551. unsigned int tmp;
  552. int i;
  553. /* Caller should know better */
  554. BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
  555. (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
  556. out++; /* Add header */
  557. in++; /* Add return status */
  558. ctrl.class = class;
  559. ctrl.cmd = cmd;
  560. sg_init_table(sg, out + in);
  561. sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
  562. for_each_sg(data, s, out + in - 2, i)
  563. sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
  564. sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
  565. BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
  566. vi->cvq->vq_ops->kick(vi->cvq);
  567. /*
  568. * Spin for a response, the kick causes an ioport write, trapping
  569. * into the hypervisor, so the request should be handled immediately.
  570. */
  571. while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
  572. cpu_relax();
  573. return status == VIRTIO_NET_OK;
  574. }
  575. static int virtnet_close(struct net_device *dev)
  576. {
  577. struct virtnet_info *vi = netdev_priv(dev);
  578. napi_disable(&vi->napi);
  579. return 0;
  580. }
  581. static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
  582. {
  583. struct virtnet_info *vi = netdev_priv(dev);
  584. struct virtio_device *vdev = vi->vdev;
  585. if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
  586. return -ENOSYS;
  587. return ethtool_op_set_tx_hw_csum(dev, data);
  588. }
  589. static void virtnet_set_rx_mode(struct net_device *dev)
  590. {
  591. struct virtnet_info *vi = netdev_priv(dev);
  592. struct scatterlist sg[2];
  593. u8 promisc, allmulti;
  594. struct virtio_net_ctrl_mac *mac_data;
  595. struct dev_addr_list *addr;
  596. struct netdev_hw_addr *ha;
  597. int uc_count;
  598. int mc_count;
  599. void *buf;
  600. int i;
  601. /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
  602. if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
  603. return;
  604. promisc = ((dev->flags & IFF_PROMISC) != 0);
  605. allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
  606. sg_init_one(sg, &promisc, sizeof(promisc));
  607. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
  608. VIRTIO_NET_CTRL_RX_PROMISC,
  609. sg, 1, 0))
  610. dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
  611. promisc ? "en" : "dis");
  612. sg_init_one(sg, &allmulti, sizeof(allmulti));
  613. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
  614. VIRTIO_NET_CTRL_RX_ALLMULTI,
  615. sg, 1, 0))
  616. dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
  617. allmulti ? "en" : "dis");
  618. uc_count = netdev_uc_count(dev);
  619. mc_count = netdev_mc_count(dev);
  620. /* MAC filter - use one buffer for both lists */
  621. buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
  622. (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
  623. mac_data = buf;
  624. if (!buf) {
  625. dev_warn(&dev->dev, "No memory for MAC address buffer\n");
  626. return;
  627. }
  628. sg_init_table(sg, 2);
  629. /* Store the unicast list and count in the front of the buffer */
  630. mac_data->entries = uc_count;
  631. i = 0;
  632. netdev_for_each_uc_addr(ha, dev)
  633. memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
  634. sg_set_buf(&sg[0], mac_data,
  635. sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
  636. /* multicast list and count fill the end */
  637. mac_data = (void *)&mac_data->macs[uc_count][0];
  638. mac_data->entries = mc_count;
  639. i = 0;
  640. netdev_for_each_mc_addr(addr, dev)
  641. memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
  642. sg_set_buf(&sg[1], mac_data,
  643. sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
  644. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
  645. VIRTIO_NET_CTRL_MAC_TABLE_SET,
  646. sg, 2, 0))
  647. dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
  648. kfree(buf);
  649. }
  650. static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
  651. {
  652. struct virtnet_info *vi = netdev_priv(dev);
  653. struct scatterlist sg;
  654. sg_init_one(&sg, &vid, sizeof(vid));
  655. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
  656. VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
  657. dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
  658. }
  659. static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
  660. {
  661. struct virtnet_info *vi = netdev_priv(dev);
  662. struct scatterlist sg;
  663. sg_init_one(&sg, &vid, sizeof(vid));
  664. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
  665. VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
  666. dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
  667. }
  668. static const struct ethtool_ops virtnet_ethtool_ops = {
  669. .set_tx_csum = virtnet_set_tx_csum,
  670. .set_sg = ethtool_op_set_sg,
  671. .set_tso = ethtool_op_set_tso,
  672. .set_ufo = ethtool_op_set_ufo,
  673. .get_link = ethtool_op_get_link,
  674. };
  675. #define MIN_MTU 68
  676. #define MAX_MTU 65535
  677. static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
  678. {
  679. if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
  680. return -EINVAL;
  681. dev->mtu = new_mtu;
  682. return 0;
  683. }
  684. static const struct net_device_ops virtnet_netdev = {
  685. .ndo_open = virtnet_open,
  686. .ndo_stop = virtnet_close,
  687. .ndo_start_xmit = start_xmit,
  688. .ndo_validate_addr = eth_validate_addr,
  689. .ndo_set_mac_address = virtnet_set_mac_address,
  690. .ndo_set_rx_mode = virtnet_set_rx_mode,
  691. .ndo_change_mtu = virtnet_change_mtu,
  692. .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
  693. .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
  694. #ifdef CONFIG_NET_POLL_CONTROLLER
  695. .ndo_poll_controller = virtnet_netpoll,
  696. #endif
  697. };
  698. static void virtnet_update_status(struct virtnet_info *vi)
  699. {
  700. u16 v;
  701. if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
  702. return;
  703. vi->vdev->config->get(vi->vdev,
  704. offsetof(struct virtio_net_config, status),
  705. &v, sizeof(v));
  706. /* Ignore unknown (future) status bits */
  707. v &= VIRTIO_NET_S_LINK_UP;
  708. if (vi->status == v)
  709. return;
  710. vi->status = v;
  711. if (vi->status & VIRTIO_NET_S_LINK_UP) {
  712. netif_carrier_on(vi->dev);
  713. netif_wake_queue(vi->dev);
  714. } else {
  715. netif_carrier_off(vi->dev);
  716. netif_stop_queue(vi->dev);
  717. }
  718. }
  719. static void virtnet_config_changed(struct virtio_device *vdev)
  720. {
  721. struct virtnet_info *vi = vdev->priv;
  722. virtnet_update_status(vi);
  723. }
  724. static int virtnet_probe(struct virtio_device *vdev)
  725. {
  726. int err;
  727. struct net_device *dev;
  728. struct virtnet_info *vi;
  729. struct virtqueue *vqs[3];
  730. vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
  731. const char *names[] = { "input", "output", "control" };
  732. int nvqs;
  733. /* Allocate ourselves a network device with room for our info */
  734. dev = alloc_etherdev(sizeof(struct virtnet_info));
  735. if (!dev)
  736. return -ENOMEM;
  737. /* Set up network device as normal. */
  738. dev->netdev_ops = &virtnet_netdev;
  739. dev->features = NETIF_F_HIGHDMA;
  740. SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
  741. SET_NETDEV_DEV(dev, &vdev->dev);
  742. /* Do we support "hardware" checksums? */
  743. if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
  744. /* This opens up the world of extra features. */
  745. dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
  746. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
  747. dev->features |= NETIF_F_TSO | NETIF_F_UFO
  748. | NETIF_F_TSO_ECN | NETIF_F_TSO6;
  749. }
  750. /* Individual feature bits: what can host handle? */
  751. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
  752. dev->features |= NETIF_F_TSO;
  753. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
  754. dev->features |= NETIF_F_TSO6;
  755. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
  756. dev->features |= NETIF_F_TSO_ECN;
  757. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
  758. dev->features |= NETIF_F_UFO;
  759. }
  760. /* Configuration may specify what MAC to use. Otherwise random. */
  761. if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
  762. vdev->config->get(vdev,
  763. offsetof(struct virtio_net_config, mac),
  764. dev->dev_addr, dev->addr_len);
  765. } else
  766. random_ether_addr(dev->dev_addr);
  767. /* Set up our device-specific information */
  768. vi = netdev_priv(dev);
  769. netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
  770. vi->dev = dev;
  771. vi->vdev = vdev;
  772. vdev->priv = vi;
  773. vi->pages = NULL;
  774. INIT_DELAYED_WORK(&vi->refill, refill_work);
  775. /* If we can receive ANY GSO packets, we must allocate large ones. */
  776. if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
  777. virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
  778. virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
  779. vi->big_packets = true;
  780. if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
  781. vi->mergeable_rx_bufs = true;
  782. /* We expect two virtqueues, receive then send,
  783. * and optionally control. */
  784. nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
  785. err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
  786. if (err)
  787. goto free;
  788. vi->rvq = vqs[0];
  789. vi->svq = vqs[1];
  790. if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
  791. vi->cvq = vqs[2];
  792. if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
  793. dev->features |= NETIF_F_HW_VLAN_FILTER;
  794. }
  795. err = register_netdev(dev);
  796. if (err) {
  797. pr_debug("virtio_net: registering device failed\n");
  798. goto free_vqs;
  799. }
  800. /* Last of all, set up some receive buffers. */
  801. try_fill_recv(vi, GFP_KERNEL);
  802. /* If we didn't even get one input buffer, we're useless. */
  803. if (vi->num == 0) {
  804. err = -ENOMEM;
  805. goto unregister;
  806. }
  807. vi->status = VIRTIO_NET_S_LINK_UP;
  808. virtnet_update_status(vi);
  809. netif_carrier_on(dev);
  810. pr_debug("virtnet: registered device %s\n", dev->name);
  811. return 0;
  812. unregister:
  813. unregister_netdev(dev);
  814. cancel_delayed_work_sync(&vi->refill);
  815. free_vqs:
  816. vdev->config->del_vqs(vdev);
  817. free:
  818. free_netdev(dev);
  819. return err;
  820. }
  821. static void free_unused_bufs(struct virtnet_info *vi)
  822. {
  823. void *buf;
  824. while (1) {
  825. buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
  826. if (!buf)
  827. break;
  828. dev_kfree_skb(buf);
  829. }
  830. while (1) {
  831. buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
  832. if (!buf)
  833. break;
  834. if (vi->mergeable_rx_bufs || vi->big_packets)
  835. give_pages(vi, buf);
  836. else
  837. dev_kfree_skb(buf);
  838. --vi->num;
  839. }
  840. BUG_ON(vi->num != 0);
  841. }
  842. static void __devexit virtnet_remove(struct virtio_device *vdev)
  843. {
  844. struct virtnet_info *vi = vdev->priv;
  845. /* Stop all the virtqueues. */
  846. vdev->config->reset(vdev);
  847. unregister_netdev(vi->dev);
  848. cancel_delayed_work_sync(&vi->refill);
  849. /* Free unused buffers in both send and recv, if any. */
  850. free_unused_bufs(vi);
  851. vdev->config->del_vqs(vi->vdev);
  852. while (vi->pages)
  853. __free_pages(get_a_page(vi, GFP_KERNEL), 0);
  854. free_netdev(vi->dev);
  855. }
  856. static struct virtio_device_id id_table[] = {
  857. { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
  858. { 0 },
  859. };
  860. static unsigned int features[] = {
  861. VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
  862. VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
  863. VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
  864. VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
  865. VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
  866. VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
  867. VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
  868. };
  869. static struct virtio_driver virtio_net_driver = {
  870. .feature_table = features,
  871. .feature_table_size = ARRAY_SIZE(features),
  872. .driver.name = KBUILD_MODNAME,
  873. .driver.owner = THIS_MODULE,
  874. .id_table = id_table,
  875. .probe = virtnet_probe,
  876. .remove = __devexit_p(virtnet_remove),
  877. .config_changed = virtnet_config_changed,
  878. };
  879. static int __init init(void)
  880. {
  881. return register_virtio_driver(&virtio_net_driver);
  882. }
  883. static void __exit fini(void)
  884. {
  885. unregister_virtio_driver(&virtio_net_driver);
  886. }
  887. module_init(init);
  888. module_exit(fini);
  889. MODULE_DEVICE_TABLE(virtio, id_table);
  890. MODULE_DESCRIPTION("Virtio network driver");
  891. MODULE_LICENSE("GPL");