virtio_net.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /* A network driver using virtio.
  2. *
  3. * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. //#define DEBUG
  20. #include <linux/netdevice.h>
  21. #include <linux/etherdevice.h>
  22. #include <linux/ethtool.h>
  23. #include <linux/module.h>
  24. #include <linux/virtio.h>
  25. #include <linux/virtio_net.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/if_vlan.h>
  28. static int napi_weight = 128;
  29. module_param(napi_weight, int, 0444);
  30. static int csum = 1, gso = 1;
  31. module_param(csum, bool, 0444);
  32. module_param(gso, bool, 0444);
  33. /* FIXME: MTU in config. */
  34. #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  35. #define GOOD_COPY_LEN 128
  36. #define VIRTNET_SEND_COMMAND_SG_MAX 2
  37. struct virtnet_info
  38. {
  39. struct virtio_device *vdev;
  40. struct virtqueue *rvq, *svq, *cvq;
  41. struct net_device *dev;
  42. struct napi_struct napi;
  43. unsigned int status;
  44. /* Number of input buffers, and max we've ever had. */
  45. unsigned int num, max;
  46. /* I like... big packets and I cannot lie! */
  47. bool big_packets;
  48. /* Host will merge rx buffers for big packets (shake it! shake it!) */
  49. bool mergeable_rx_bufs;
  50. /* Send queue. */
  51. struct sk_buff_head send;
  52. /* Work struct for refilling if we run low on memory. */
  53. struct delayed_work refill;
  54. /* Chain pages by the private ptr. */
  55. struct page *pages;
  56. };
  57. struct skb_vnet_hdr {
  58. union {
  59. struct virtio_net_hdr hdr;
  60. struct virtio_net_hdr_mrg_rxbuf mhdr;
  61. };
  62. unsigned int num_sg;
  63. };
  64. struct padded_vnet_hdr {
  65. struct virtio_net_hdr hdr;
  66. /*
  67. * virtio_net_hdr should be in a separated sg buffer because of a
  68. * QEMU bug, and data sg buffer shares same page with this header sg.
  69. * This padding makes next sg 16 byte aligned after virtio_net_hdr.
  70. */
  71. char padding[6];
  72. };
  73. static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
  74. {
  75. return (struct skb_vnet_hdr *)skb->cb;
  76. }
  77. /*
  78. * private is used to chain pages for big packets, put the whole
  79. * most recent used list in the beginning for reuse
  80. */
  81. static void give_pages(struct virtnet_info *vi, struct page *page)
  82. {
  83. struct page *end;
  84. /* Find end of list, sew whole thing into vi->pages. */
  85. for (end = page; end->private; end = (struct page *)end->private);
  86. end->private = (unsigned long)vi->pages;
  87. vi->pages = page;
  88. }
  89. static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
  90. {
  91. struct page *p = vi->pages;
  92. if (p) {
  93. vi->pages = (struct page *)p->private;
  94. /* clear private here, it is used to chain pages */
  95. p->private = 0;
  96. } else
  97. p = alloc_page(gfp_mask);
  98. return p;
  99. }
  100. static void skb_xmit_done(struct virtqueue *svq)
  101. {
  102. struct virtnet_info *vi = svq->vdev->priv;
  103. /* Suppress further interrupts. */
  104. svq->vq_ops->disable_cb(svq);
  105. /* We were probably waiting for more output buffers. */
  106. netif_wake_queue(vi->dev);
  107. }
  108. static void set_skb_frag(struct sk_buff *skb, struct page *page,
  109. unsigned int offset, unsigned int *len)
  110. {
  111. int i = skb_shinfo(skb)->nr_frags;
  112. skb_frag_t *f;
  113. f = &skb_shinfo(skb)->frags[i];
  114. f->size = min((unsigned)PAGE_SIZE - offset, *len);
  115. f->page_offset = offset;
  116. f->page = page;
  117. skb->data_len += f->size;
  118. skb->len += f->size;
  119. skb_shinfo(skb)->nr_frags++;
  120. *len -= f->size;
  121. }
  122. static struct sk_buff *page_to_skb(struct virtnet_info *vi,
  123. struct page *page, unsigned int len)
  124. {
  125. struct sk_buff *skb;
  126. struct skb_vnet_hdr *hdr;
  127. unsigned int copy, hdr_len, offset;
  128. char *p;
  129. p = page_address(page);
  130. /* copy small packet so we can reuse these pages for small data */
  131. skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
  132. if (unlikely(!skb))
  133. return NULL;
  134. hdr = skb_vnet_hdr(skb);
  135. if (vi->mergeable_rx_bufs) {
  136. hdr_len = sizeof hdr->mhdr;
  137. offset = hdr_len;
  138. } else {
  139. hdr_len = sizeof hdr->hdr;
  140. offset = sizeof(struct padded_vnet_hdr);
  141. }
  142. memcpy(hdr, p, hdr_len);
  143. len -= hdr_len;
  144. p += offset;
  145. copy = len;
  146. if (copy > skb_tailroom(skb))
  147. copy = skb_tailroom(skb);
  148. memcpy(skb_put(skb, copy), p, copy);
  149. len -= copy;
  150. offset += copy;
  151. while (len) {
  152. set_skb_frag(skb, page, offset, &len);
  153. page = (struct page *)page->private;
  154. offset = 0;
  155. }
  156. if (page)
  157. give_pages(vi, page);
  158. return skb;
  159. }
  160. static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
  161. {
  162. struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
  163. struct page *page;
  164. int num_buf, i, len;
  165. num_buf = hdr->mhdr.num_buffers;
  166. while (--num_buf) {
  167. i = skb_shinfo(skb)->nr_frags;
  168. if (i >= MAX_SKB_FRAGS) {
  169. pr_debug("%s: packet too long\n", skb->dev->name);
  170. skb->dev->stats.rx_length_errors++;
  171. return -EINVAL;
  172. }
  173. page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
  174. if (!page) {
  175. pr_debug("%s: rx error: %d buffers missing\n",
  176. skb->dev->name, hdr->mhdr.num_buffers);
  177. skb->dev->stats.rx_length_errors++;
  178. return -EINVAL;
  179. }
  180. if (len > PAGE_SIZE)
  181. len = PAGE_SIZE;
  182. set_skb_frag(skb, page, 0, &len);
  183. --vi->num;
  184. }
  185. return 0;
  186. }
  187. static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
  188. {
  189. struct virtnet_info *vi = netdev_priv(dev);
  190. struct sk_buff *skb;
  191. struct page *page;
  192. struct skb_vnet_hdr *hdr;
  193. if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
  194. pr_debug("%s: short packet %i\n", dev->name, len);
  195. dev->stats.rx_length_errors++;
  196. if (vi->mergeable_rx_bufs || vi->big_packets)
  197. give_pages(vi, buf);
  198. else
  199. dev_kfree_skb(buf);
  200. return;
  201. }
  202. if (!vi->mergeable_rx_bufs && !vi->big_packets) {
  203. skb = buf;
  204. len -= sizeof(struct virtio_net_hdr);
  205. skb_trim(skb, len);
  206. } else {
  207. page = buf;
  208. skb = page_to_skb(vi, page, len);
  209. if (unlikely(!skb)) {
  210. dev->stats.rx_dropped++;
  211. give_pages(vi, page);
  212. return;
  213. }
  214. if (vi->mergeable_rx_bufs)
  215. if (receive_mergeable(vi, skb)) {
  216. dev_kfree_skb(skb);
  217. return;
  218. }
  219. }
  220. hdr = skb_vnet_hdr(skb);
  221. skb->truesize += skb->data_len;
  222. dev->stats.rx_bytes += skb->len;
  223. dev->stats.rx_packets++;
  224. if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  225. pr_debug("Needs csum!\n");
  226. if (!skb_partial_csum_set(skb,
  227. hdr->hdr.csum_start,
  228. hdr->hdr.csum_offset))
  229. goto frame_err;
  230. }
  231. skb->protocol = eth_type_trans(skb, dev);
  232. pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
  233. ntohs(skb->protocol), skb->len, skb->pkt_type);
  234. if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
  235. pr_debug("GSO!\n");
  236. switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  237. case VIRTIO_NET_HDR_GSO_TCPV4:
  238. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  239. break;
  240. case VIRTIO_NET_HDR_GSO_UDP:
  241. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  242. break;
  243. case VIRTIO_NET_HDR_GSO_TCPV6:
  244. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  245. break;
  246. default:
  247. if (net_ratelimit())
  248. printk(KERN_WARNING "%s: bad gso type %u.\n",
  249. dev->name, hdr->hdr.gso_type);
  250. goto frame_err;
  251. }
  252. if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
  253. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  254. skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
  255. if (skb_shinfo(skb)->gso_size == 0) {
  256. if (net_ratelimit())
  257. printk(KERN_WARNING "%s: zero gso size.\n",
  258. dev->name);
  259. goto frame_err;
  260. }
  261. /* Header must be checked, and gso_segs computed. */
  262. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  263. skb_shinfo(skb)->gso_segs = 0;
  264. }
  265. netif_receive_skb(skb);
  266. return;
  267. frame_err:
  268. dev->stats.rx_frame_errors++;
  269. dev_kfree_skb(skb);
  270. }
  271. static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
  272. {
  273. struct sk_buff *skb;
  274. struct skb_vnet_hdr *hdr;
  275. struct scatterlist sg[2];
  276. int err;
  277. skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
  278. if (unlikely(!skb))
  279. return -ENOMEM;
  280. skb_put(skb, MAX_PACKET_LEN);
  281. hdr = skb_vnet_hdr(skb);
  282. sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
  283. skb_to_sgvec(skb, sg + 1, 0, skb->len);
  284. err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
  285. if (err < 0)
  286. dev_kfree_skb(skb);
  287. return err;
  288. }
  289. static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
  290. {
  291. struct scatterlist sg[MAX_SKB_FRAGS + 2];
  292. struct page *first, *list = NULL;
  293. char *p;
  294. int i, err, offset;
  295. /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
  296. for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
  297. first = get_a_page(vi, gfp);
  298. if (!first) {
  299. if (list)
  300. give_pages(vi, list);
  301. return -ENOMEM;
  302. }
  303. sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
  304. /* chain new page in list head to match sg */
  305. first->private = (unsigned long)list;
  306. list = first;
  307. }
  308. first = get_a_page(vi, gfp);
  309. if (!first) {
  310. give_pages(vi, list);
  311. return -ENOMEM;
  312. }
  313. p = page_address(first);
  314. /* sg[0], sg[1] share the same page */
  315. /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
  316. sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
  317. /* sg[1] for data packet, from offset */
  318. offset = sizeof(struct padded_vnet_hdr);
  319. sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
  320. /* chain first in list head */
  321. first->private = (unsigned long)list;
  322. err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
  323. first);
  324. if (err < 0)
  325. give_pages(vi, first);
  326. return err;
  327. }
  328. static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
  329. {
  330. struct page *page;
  331. struct scatterlist sg;
  332. int err;
  333. page = get_a_page(vi, gfp);
  334. if (!page)
  335. return -ENOMEM;
  336. sg_init_one(&sg, page_address(page), PAGE_SIZE);
  337. err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
  338. if (err < 0)
  339. give_pages(vi, page);
  340. return err;
  341. }
  342. /* Returns false if we couldn't fill entirely (OOM). */
  343. static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
  344. {
  345. int err;
  346. bool oom = false;
  347. do {
  348. if (vi->mergeable_rx_bufs)
  349. err = add_recvbuf_mergeable(vi, gfp);
  350. else if (vi->big_packets)
  351. err = add_recvbuf_big(vi, gfp);
  352. else
  353. err = add_recvbuf_small(vi, gfp);
  354. if (err < 0) {
  355. oom = true;
  356. break;
  357. }
  358. ++vi->num;
  359. } while (err > 0);
  360. if (unlikely(vi->num > vi->max))
  361. vi->max = vi->num;
  362. vi->rvq->vq_ops->kick(vi->rvq);
  363. return !oom;
  364. }
  365. static void skb_recv_done(struct virtqueue *rvq)
  366. {
  367. struct virtnet_info *vi = rvq->vdev->priv;
  368. /* Schedule NAPI, Suppress further interrupts if successful. */
  369. if (napi_schedule_prep(&vi->napi)) {
  370. rvq->vq_ops->disable_cb(rvq);
  371. __napi_schedule(&vi->napi);
  372. }
  373. }
  374. static void refill_work(struct work_struct *work)
  375. {
  376. struct virtnet_info *vi;
  377. bool still_empty;
  378. vi = container_of(work, struct virtnet_info, refill.work);
  379. napi_disable(&vi->napi);
  380. still_empty = !try_fill_recv(vi, GFP_KERNEL);
  381. napi_enable(&vi->napi);
  382. /* In theory, this can happen: if we don't get any buffers in
  383. * we will *never* try to fill again. */
  384. if (still_empty)
  385. schedule_delayed_work(&vi->refill, HZ/2);
  386. }
  387. static int virtnet_poll(struct napi_struct *napi, int budget)
  388. {
  389. struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
  390. void *buf;
  391. unsigned int len, received = 0;
  392. again:
  393. while (received < budget &&
  394. (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
  395. receive_buf(vi->dev, buf, len);
  396. --vi->num;
  397. received++;
  398. }
  399. if (vi->num < vi->max / 2) {
  400. if (!try_fill_recv(vi, GFP_ATOMIC))
  401. schedule_delayed_work(&vi->refill, 0);
  402. }
  403. /* Out of packets? */
  404. if (received < budget) {
  405. napi_complete(napi);
  406. if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
  407. napi_schedule_prep(napi)) {
  408. vi->rvq->vq_ops->disable_cb(vi->rvq);
  409. __napi_schedule(napi);
  410. goto again;
  411. }
  412. }
  413. return received;
  414. }
  415. static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
  416. {
  417. struct sk_buff *skb;
  418. unsigned int len, tot_sgs = 0;
  419. while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
  420. pr_debug("Sent skb %p\n", skb);
  421. __skb_unlink(skb, &vi->send);
  422. vi->dev->stats.tx_bytes += skb->len;
  423. vi->dev->stats.tx_packets++;
  424. tot_sgs += skb_vnet_hdr(skb)->num_sg;
  425. dev_kfree_skb_any(skb);
  426. }
  427. return tot_sgs;
  428. }
  429. static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
  430. {
  431. struct scatterlist sg[2+MAX_SKB_FRAGS];
  432. struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
  433. const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
  434. sg_init_table(sg, 2+MAX_SKB_FRAGS);
  435. pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
  436. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  437. hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
  438. hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
  439. hdr->hdr.csum_offset = skb->csum_offset;
  440. } else {
  441. hdr->hdr.flags = 0;
  442. hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
  443. }
  444. if (skb_is_gso(skb)) {
  445. hdr->hdr.hdr_len = skb_headlen(skb);
  446. hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
  447. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  448. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
  449. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  450. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
  451. else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  452. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
  453. else
  454. BUG();
  455. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
  456. hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
  457. } else {
  458. hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
  459. hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
  460. }
  461. hdr->mhdr.num_buffers = 0;
  462. /* Encode metadata header at front. */
  463. if (vi->mergeable_rx_bufs)
  464. sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
  465. else
  466. sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
  467. hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
  468. return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
  469. }
  470. static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
  471. {
  472. struct virtnet_info *vi = netdev_priv(dev);
  473. int capacity;
  474. again:
  475. /* Free up any pending old buffers before queueing new ones. */
  476. free_old_xmit_skbs(vi);
  477. /* Try to transmit */
  478. capacity = xmit_skb(vi, skb);
  479. /* This can happen with OOM and indirect buffers. */
  480. if (unlikely(capacity < 0)) {
  481. netif_stop_queue(dev);
  482. dev_warn(&dev->dev, "Unexpected full queue\n");
  483. if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
  484. vi->svq->vq_ops->disable_cb(vi->svq);
  485. netif_start_queue(dev);
  486. goto again;
  487. }
  488. return NETDEV_TX_BUSY;
  489. }
  490. vi->svq->vq_ops->kick(vi->svq);
  491. /*
  492. * Put new one in send queue. You'd expect we'd need this before
  493. * xmit_skb calls add_buf(), since the callback can be triggered
  494. * immediately after that. But since the callback just triggers
  495. * another call back here, normal network xmit locking prevents the
  496. * race.
  497. */
  498. __skb_queue_head(&vi->send, skb);
  499. /* Don't wait up for transmitted skbs to be freed. */
  500. skb_orphan(skb);
  501. nf_reset(skb);
  502. /* Apparently nice girls don't return TX_BUSY; stop the queue
  503. * before it gets out of hand. Naturally, this wastes entries. */
  504. if (capacity < 2+MAX_SKB_FRAGS) {
  505. netif_stop_queue(dev);
  506. if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
  507. /* More just got used, free them then recheck. */
  508. capacity += free_old_xmit_skbs(vi);
  509. if (capacity >= 2+MAX_SKB_FRAGS) {
  510. netif_start_queue(dev);
  511. vi->svq->vq_ops->disable_cb(vi->svq);
  512. }
  513. }
  514. }
  515. return NETDEV_TX_OK;
  516. }
  517. static int virtnet_set_mac_address(struct net_device *dev, void *p)
  518. {
  519. struct virtnet_info *vi = netdev_priv(dev);
  520. struct virtio_device *vdev = vi->vdev;
  521. int ret;
  522. ret = eth_mac_addr(dev, p);
  523. if (ret)
  524. return ret;
  525. if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
  526. vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
  527. dev->dev_addr, dev->addr_len);
  528. return 0;
  529. }
  530. #ifdef CONFIG_NET_POLL_CONTROLLER
  531. static void virtnet_netpoll(struct net_device *dev)
  532. {
  533. struct virtnet_info *vi = netdev_priv(dev);
  534. napi_schedule(&vi->napi);
  535. }
  536. #endif
  537. static int virtnet_open(struct net_device *dev)
  538. {
  539. struct virtnet_info *vi = netdev_priv(dev);
  540. napi_enable(&vi->napi);
  541. /* If all buffers were filled by other side before we napi_enabled, we
  542. * won't get another interrupt, so process any outstanding packets
  543. * now. virtnet_poll wants re-enable the queue, so we disable here.
  544. * We synchronize against interrupts via NAPI_STATE_SCHED */
  545. if (napi_schedule_prep(&vi->napi)) {
  546. vi->rvq->vq_ops->disable_cb(vi->rvq);
  547. __napi_schedule(&vi->napi);
  548. }
  549. return 0;
  550. }
  551. /*
  552. * Send command via the control virtqueue and check status. Commands
  553. * supported by the hypervisor, as indicated by feature bits, should
  554. * never fail unless improperly formated.
  555. */
  556. static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
  557. struct scatterlist *data, int out, int in)
  558. {
  559. struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
  560. struct virtio_net_ctrl_hdr ctrl;
  561. virtio_net_ctrl_ack status = ~0;
  562. unsigned int tmp;
  563. int i;
  564. /* Caller should know better */
  565. BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
  566. (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
  567. out++; /* Add header */
  568. in++; /* Add return status */
  569. ctrl.class = class;
  570. ctrl.cmd = cmd;
  571. sg_init_table(sg, out + in);
  572. sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
  573. for_each_sg(data, s, out + in - 2, i)
  574. sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
  575. sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
  576. BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
  577. vi->cvq->vq_ops->kick(vi->cvq);
  578. /*
  579. * Spin for a response, the kick causes an ioport write, trapping
  580. * into the hypervisor, so the request should be handled immediately.
  581. */
  582. while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
  583. cpu_relax();
  584. return status == VIRTIO_NET_OK;
  585. }
  586. static int virtnet_close(struct net_device *dev)
  587. {
  588. struct virtnet_info *vi = netdev_priv(dev);
  589. napi_disable(&vi->napi);
  590. return 0;
  591. }
  592. static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
  593. {
  594. struct virtnet_info *vi = netdev_priv(dev);
  595. struct virtio_device *vdev = vi->vdev;
  596. if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
  597. return -ENOSYS;
  598. return ethtool_op_set_tx_hw_csum(dev, data);
  599. }
  600. static void virtnet_set_rx_mode(struct net_device *dev)
  601. {
  602. struct virtnet_info *vi = netdev_priv(dev);
  603. struct scatterlist sg[2];
  604. u8 promisc, allmulti;
  605. struct virtio_net_ctrl_mac *mac_data;
  606. struct dev_addr_list *addr;
  607. struct netdev_hw_addr *ha;
  608. int uc_count;
  609. void *buf;
  610. int i;
  611. /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
  612. if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
  613. return;
  614. promisc = ((dev->flags & IFF_PROMISC) != 0);
  615. allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
  616. sg_init_one(sg, &promisc, sizeof(promisc));
  617. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
  618. VIRTIO_NET_CTRL_RX_PROMISC,
  619. sg, 1, 0))
  620. dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
  621. promisc ? "en" : "dis");
  622. sg_init_one(sg, &allmulti, sizeof(allmulti));
  623. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
  624. VIRTIO_NET_CTRL_RX_ALLMULTI,
  625. sg, 1, 0))
  626. dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
  627. allmulti ? "en" : "dis");
  628. uc_count = netdev_uc_count(dev);
  629. /* MAC filter - use one buffer for both lists */
  630. mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
  631. (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
  632. if (!buf) {
  633. dev_warn(&dev->dev, "No memory for MAC address buffer\n");
  634. return;
  635. }
  636. sg_init_table(sg, 2);
  637. /* Store the unicast list and count in the front of the buffer */
  638. mac_data->entries = uc_count;
  639. i = 0;
  640. netdev_for_each_uc_addr(ha, dev)
  641. memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
  642. sg_set_buf(&sg[0], mac_data,
  643. sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
  644. /* multicast list and count fill the end */
  645. mac_data = (void *)&mac_data->macs[uc_count][0];
  646. mac_data->entries = dev->mc_count;
  647. addr = dev->mc_list;
  648. for (i = 0; i < dev->mc_count; i++, addr = addr->next)
  649. memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
  650. sg_set_buf(&sg[1], mac_data,
  651. sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
  652. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
  653. VIRTIO_NET_CTRL_MAC_TABLE_SET,
  654. sg, 2, 0))
  655. dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
  656. kfree(buf);
  657. }
  658. static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
  659. {
  660. struct virtnet_info *vi = netdev_priv(dev);
  661. struct scatterlist sg;
  662. sg_init_one(&sg, &vid, sizeof(vid));
  663. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
  664. VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
  665. dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
  666. }
  667. static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
  668. {
  669. struct virtnet_info *vi = netdev_priv(dev);
  670. struct scatterlist sg;
  671. sg_init_one(&sg, &vid, sizeof(vid));
  672. if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
  673. VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
  674. dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
  675. }
  676. static const struct ethtool_ops virtnet_ethtool_ops = {
  677. .set_tx_csum = virtnet_set_tx_csum,
  678. .set_sg = ethtool_op_set_sg,
  679. .set_tso = ethtool_op_set_tso,
  680. .set_ufo = ethtool_op_set_ufo,
  681. .get_link = ethtool_op_get_link,
  682. };
  683. #define MIN_MTU 68
  684. #define MAX_MTU 65535
  685. static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
  686. {
  687. if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
  688. return -EINVAL;
  689. dev->mtu = new_mtu;
  690. return 0;
  691. }
  692. static const struct net_device_ops virtnet_netdev = {
  693. .ndo_open = virtnet_open,
  694. .ndo_stop = virtnet_close,
  695. .ndo_start_xmit = start_xmit,
  696. .ndo_validate_addr = eth_validate_addr,
  697. .ndo_set_mac_address = virtnet_set_mac_address,
  698. .ndo_set_rx_mode = virtnet_set_rx_mode,
  699. .ndo_change_mtu = virtnet_change_mtu,
  700. .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
  701. .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
  702. #ifdef CONFIG_NET_POLL_CONTROLLER
  703. .ndo_poll_controller = virtnet_netpoll,
  704. #endif
  705. };
  706. static void virtnet_update_status(struct virtnet_info *vi)
  707. {
  708. u16 v;
  709. if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
  710. return;
  711. vi->vdev->config->get(vi->vdev,
  712. offsetof(struct virtio_net_config, status),
  713. &v, sizeof(v));
  714. /* Ignore unknown (future) status bits */
  715. v &= VIRTIO_NET_S_LINK_UP;
  716. if (vi->status == v)
  717. return;
  718. vi->status = v;
  719. if (vi->status & VIRTIO_NET_S_LINK_UP) {
  720. netif_carrier_on(vi->dev);
  721. netif_wake_queue(vi->dev);
  722. } else {
  723. netif_carrier_off(vi->dev);
  724. netif_stop_queue(vi->dev);
  725. }
  726. }
  727. static void virtnet_config_changed(struct virtio_device *vdev)
  728. {
  729. struct virtnet_info *vi = vdev->priv;
  730. virtnet_update_status(vi);
  731. }
  732. static int virtnet_probe(struct virtio_device *vdev)
  733. {
  734. int err;
  735. struct net_device *dev;
  736. struct virtnet_info *vi;
  737. struct virtqueue *vqs[3];
  738. vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
  739. const char *names[] = { "input", "output", "control" };
  740. int nvqs;
  741. /* Allocate ourselves a network device with room for our info */
  742. dev = alloc_etherdev(sizeof(struct virtnet_info));
  743. if (!dev)
  744. return -ENOMEM;
  745. /* Set up network device as normal. */
  746. dev->netdev_ops = &virtnet_netdev;
  747. dev->features = NETIF_F_HIGHDMA;
  748. SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
  749. SET_NETDEV_DEV(dev, &vdev->dev);
  750. /* Do we support "hardware" checksums? */
  751. if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
  752. /* This opens up the world of extra features. */
  753. dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
  754. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
  755. dev->features |= NETIF_F_TSO | NETIF_F_UFO
  756. | NETIF_F_TSO_ECN | NETIF_F_TSO6;
  757. }
  758. /* Individual feature bits: what can host handle? */
  759. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
  760. dev->features |= NETIF_F_TSO;
  761. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
  762. dev->features |= NETIF_F_TSO6;
  763. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
  764. dev->features |= NETIF_F_TSO_ECN;
  765. if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
  766. dev->features |= NETIF_F_UFO;
  767. }
  768. /* Configuration may specify what MAC to use. Otherwise random. */
  769. if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
  770. vdev->config->get(vdev,
  771. offsetof(struct virtio_net_config, mac),
  772. dev->dev_addr, dev->addr_len);
  773. } else
  774. random_ether_addr(dev->dev_addr);
  775. /* Set up our device-specific information */
  776. vi = netdev_priv(dev);
  777. netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
  778. vi->dev = dev;
  779. vi->vdev = vdev;
  780. vdev->priv = vi;
  781. vi->pages = NULL;
  782. INIT_DELAYED_WORK(&vi->refill, refill_work);
  783. /* If we can receive ANY GSO packets, we must allocate large ones. */
  784. if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
  785. virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
  786. virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
  787. vi->big_packets = true;
  788. if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
  789. vi->mergeable_rx_bufs = true;
  790. /* We expect two virtqueues, receive then send,
  791. * and optionally control. */
  792. nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
  793. err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
  794. if (err)
  795. goto free;
  796. vi->rvq = vqs[0];
  797. vi->svq = vqs[1];
  798. if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
  799. vi->cvq = vqs[2];
  800. if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
  801. dev->features |= NETIF_F_HW_VLAN_FILTER;
  802. }
  803. /* Initialize our empty send queue. */
  804. skb_queue_head_init(&vi->send);
  805. err = register_netdev(dev);
  806. if (err) {
  807. pr_debug("virtio_net: registering device failed\n");
  808. goto free_vqs;
  809. }
  810. /* Last of all, set up some receive buffers. */
  811. try_fill_recv(vi, GFP_KERNEL);
  812. /* If we didn't even get one input buffer, we're useless. */
  813. if (vi->num == 0) {
  814. err = -ENOMEM;
  815. goto unregister;
  816. }
  817. vi->status = VIRTIO_NET_S_LINK_UP;
  818. virtnet_update_status(vi);
  819. netif_carrier_on(dev);
  820. pr_debug("virtnet: registered device %s\n", dev->name);
  821. return 0;
  822. unregister:
  823. unregister_netdev(dev);
  824. cancel_delayed_work_sync(&vi->refill);
  825. free_vqs:
  826. vdev->config->del_vqs(vdev);
  827. free:
  828. free_netdev(dev);
  829. return err;
  830. }
  831. static void free_unused_bufs(struct virtnet_info *vi)
  832. {
  833. void *buf;
  834. while (1) {
  835. buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
  836. if (!buf)
  837. break;
  838. if (vi->mergeable_rx_bufs || vi->big_packets)
  839. give_pages(vi, buf);
  840. else
  841. dev_kfree_skb(buf);
  842. --vi->num;
  843. }
  844. BUG_ON(vi->num != 0);
  845. }
  846. static void __devexit virtnet_remove(struct virtio_device *vdev)
  847. {
  848. struct virtnet_info *vi = vdev->priv;
  849. /* Stop all the virtqueues. */
  850. vdev->config->reset(vdev);
  851. /* Free our skbs in send queue, if any. */
  852. __skb_queue_purge(&vi->send);
  853. unregister_netdev(vi->dev);
  854. cancel_delayed_work_sync(&vi->refill);
  855. free_unused_bufs(vi);
  856. vdev->config->del_vqs(vi->vdev);
  857. while (vi->pages)
  858. __free_pages(get_a_page(vi, GFP_KERNEL), 0);
  859. free_netdev(vi->dev);
  860. }
  861. static struct virtio_device_id id_table[] = {
  862. { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
  863. { 0 },
  864. };
  865. static unsigned int features[] = {
  866. VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
  867. VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
  868. VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
  869. VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
  870. VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
  871. VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
  872. VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
  873. };
  874. static struct virtio_driver virtio_net_driver = {
  875. .feature_table = features,
  876. .feature_table_size = ARRAY_SIZE(features),
  877. .driver.name = KBUILD_MODNAME,
  878. .driver.owner = THIS_MODULE,
  879. .id_table = id_table,
  880. .probe = virtnet_probe,
  881. .remove = __devexit_p(virtnet_remove),
  882. .config_changed = virtnet_config_changed,
  883. };
  884. static int __init init(void)
  885. {
  886. return register_virtio_driver(&virtio_net_driver);
  887. }
  888. static void __exit fini(void)
  889. {
  890. unregister_virtio_driver(&virtio_net_driver);
  891. }
  892. module_init(init);
  893. module_exit(fini);
  894. MODULE_DEVICE_TABLE(virtio, id_table);
  895. MODULE_DESCRIPTION("Virtio network driver");
  896. MODULE_LICENSE("GPL");