xen-netfront.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. /*
  2. * Virtual network driver for conversing with remote driver backends.
  3. *
  4. * Copyright (c) 2002-2005, K A Fraser
  5. * Copyright (c) 2005, XenSource Ltd
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version 2
  9. * as published by the Free Software Foundation; or, when distributed
  10. * separately from the Linux kernel or incorporated into other
  11. * software packages, subject to the following license:
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a copy
  14. * of this source file (the "Software"), to deal in the Software without
  15. * restriction, including without limitation the rights to use, copy, modify,
  16. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17. * and to permit persons to whom the Software is furnished to do so, subject to
  18. * the following conditions:
  19. *
  20. * The above copyright notice and this permission notice shall be included in
  21. * all copies or substantial portions of the Software.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29. * IN THE SOFTWARE.
  30. */
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/tcp.h>
  39. #include <linux/udp.h>
  40. #include <linux/moduleparam.h>
  41. #include <linux/mm.h>
  42. #include <net/ip.h>
  43. #include <xen/xenbus.h>
  44. #include <xen/events.h>
  45. #include <xen/page.h>
  46. #include <xen/grant_table.h>
  47. #include <xen/interface/io/netif.h>
  48. #include <xen/interface/memory.h>
  49. #include <xen/interface/grant_table.h>
  50. static struct ethtool_ops xennet_ethtool_ops;
  51. struct netfront_cb {
  52. struct page *page;
  53. unsigned offset;
  54. };
  55. #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
  56. #define RX_COPY_THRESHOLD 256
  57. #define GRANT_INVALID_REF 0
  58. #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
  59. #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
  60. #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
  61. struct netfront_info {
  62. struct list_head list;
  63. struct net_device *netdev;
  64. struct napi_struct napi;
  65. struct net_device_stats stats;
  66. struct xen_netif_tx_front_ring tx;
  67. struct xen_netif_rx_front_ring rx;
  68. spinlock_t tx_lock;
  69. spinlock_t rx_lock;
  70. unsigned int evtchn;
  71. /* Receive-ring batched refills. */
  72. #define RX_MIN_TARGET 8
  73. #define RX_DFL_MIN_TARGET 64
  74. #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
  75. unsigned rx_min_target, rx_max_target, rx_target;
  76. struct sk_buff_head rx_batch;
  77. struct timer_list rx_refill_timer;
  78. /*
  79. * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
  80. * are linked from tx_skb_freelist through skb_entry.link.
  81. *
  82. * NB. Freelist index entries are always going to be less than
  83. * PAGE_OFFSET, whereas pointers to skbs will always be equal or
  84. * greater than PAGE_OFFSET: we use this property to distinguish
  85. * them.
  86. */
  87. union skb_entry {
  88. struct sk_buff *skb;
  89. unsigned link;
  90. } tx_skbs[NET_TX_RING_SIZE];
  91. grant_ref_t gref_tx_head;
  92. grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
  93. unsigned tx_skb_freelist;
  94. struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
  95. grant_ref_t gref_rx_head;
  96. grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
  97. struct xenbus_device *xbdev;
  98. int tx_ring_ref;
  99. int rx_ring_ref;
  100. unsigned long rx_pfn_array[NET_RX_RING_SIZE];
  101. struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
  102. struct mmu_update rx_mmu[NET_RX_RING_SIZE];
  103. };
  104. struct netfront_rx_info {
  105. struct xen_netif_rx_response rx;
  106. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
  107. };
  108. /*
  109. * Access macros for acquiring freeing slots in tx_skbs[].
  110. */
  111. static void add_id_to_freelist(unsigned *head, union skb_entry *list,
  112. unsigned short id)
  113. {
  114. list[id].link = *head;
  115. *head = id;
  116. }
  117. static unsigned short get_id_from_freelist(unsigned *head,
  118. union skb_entry *list)
  119. {
  120. unsigned int id = *head;
  121. *head = list[id].link;
  122. return id;
  123. }
  124. static int xennet_rxidx(RING_IDX idx)
  125. {
  126. return idx & (NET_RX_RING_SIZE - 1);
  127. }
  128. static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
  129. RING_IDX ri)
  130. {
  131. int i = xennet_rxidx(ri);
  132. struct sk_buff *skb = np->rx_skbs[i];
  133. np->rx_skbs[i] = NULL;
  134. return skb;
  135. }
  136. static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
  137. RING_IDX ri)
  138. {
  139. int i = xennet_rxidx(ri);
  140. grant_ref_t ref = np->grant_rx_ref[i];
  141. np->grant_rx_ref[i] = GRANT_INVALID_REF;
  142. return ref;
  143. }
  144. #ifdef CONFIG_SYSFS
  145. static int xennet_sysfs_addif(struct net_device *netdev);
  146. static void xennet_sysfs_delif(struct net_device *netdev);
  147. #else /* !CONFIG_SYSFS */
  148. #define xennet_sysfs_addif(dev) (0)
  149. #define xennet_sysfs_delif(dev) do { } while (0)
  150. #endif
  151. static int xennet_can_sg(struct net_device *dev)
  152. {
  153. return dev->features & NETIF_F_SG;
  154. }
  155. static void rx_refill_timeout(unsigned long data)
  156. {
  157. struct net_device *dev = (struct net_device *)data;
  158. struct netfront_info *np = netdev_priv(dev);
  159. netif_rx_schedule(dev, &np->napi);
  160. }
  161. static int netfront_tx_slot_available(struct netfront_info *np)
  162. {
  163. return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
  164. (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
  165. }
  166. static void xennet_maybe_wake_tx(struct net_device *dev)
  167. {
  168. struct netfront_info *np = netdev_priv(dev);
  169. if (unlikely(netif_queue_stopped(dev)) &&
  170. netfront_tx_slot_available(np) &&
  171. likely(netif_running(dev)))
  172. netif_wake_queue(dev);
  173. }
  174. static void xennet_alloc_rx_buffers(struct net_device *dev)
  175. {
  176. unsigned short id;
  177. struct netfront_info *np = netdev_priv(dev);
  178. struct sk_buff *skb;
  179. struct page *page;
  180. int i, batch_target, notify;
  181. RING_IDX req_prod = np->rx.req_prod_pvt;
  182. grant_ref_t ref;
  183. unsigned long pfn;
  184. void *vaddr;
  185. struct xen_netif_rx_request *req;
  186. if (unlikely(!netif_carrier_ok(dev)))
  187. return;
  188. /*
  189. * Allocate skbuffs greedily, even though we batch updates to the
  190. * receive ring. This creates a less bursty demand on the memory
  191. * allocator, so should reduce the chance of failed allocation requests
  192. * both for ourself and for other kernel subsystems.
  193. */
  194. batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
  195. for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
  196. skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
  197. GFP_ATOMIC | __GFP_NOWARN);
  198. if (unlikely(!skb))
  199. goto no_skb;
  200. page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
  201. if (!page) {
  202. kfree_skb(skb);
  203. no_skb:
  204. /* Any skbuffs queued for refill? Force them out. */
  205. if (i != 0)
  206. goto refill;
  207. /* Could not allocate any skbuffs. Try again later. */
  208. mod_timer(&np->rx_refill_timer,
  209. jiffies + (HZ/10));
  210. break;
  211. }
  212. skb_shinfo(skb)->frags[0].page = page;
  213. skb_shinfo(skb)->nr_frags = 1;
  214. __skb_queue_tail(&np->rx_batch, skb);
  215. }
  216. /* Is the batch large enough to be worthwhile? */
  217. if (i < (np->rx_target/2)) {
  218. if (req_prod > np->rx.sring->req_prod)
  219. goto push;
  220. return;
  221. }
  222. /* Adjust our fill target if we risked running out of buffers. */
  223. if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
  224. ((np->rx_target *= 2) > np->rx_max_target))
  225. np->rx_target = np->rx_max_target;
  226. refill:
  227. for (i = 0; ; i++) {
  228. skb = __skb_dequeue(&np->rx_batch);
  229. if (skb == NULL)
  230. break;
  231. skb->dev = dev;
  232. id = xennet_rxidx(req_prod + i);
  233. BUG_ON(np->rx_skbs[id]);
  234. np->rx_skbs[id] = skb;
  235. ref = gnttab_claim_grant_reference(&np->gref_rx_head);
  236. BUG_ON((signed short)ref < 0);
  237. np->grant_rx_ref[id] = ref;
  238. pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
  239. vaddr = page_address(skb_shinfo(skb)->frags[0].page);
  240. req = RING_GET_REQUEST(&np->rx, req_prod + i);
  241. gnttab_grant_foreign_access_ref(ref,
  242. np->xbdev->otherend_id,
  243. pfn_to_mfn(pfn),
  244. 0);
  245. req->id = id;
  246. req->gref = ref;
  247. }
  248. wmb(); /* barrier so backend seens requests */
  249. /* Above is a suitable barrier to ensure backend will see requests. */
  250. np->rx.req_prod_pvt = req_prod + i;
  251. push:
  252. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
  253. if (notify)
  254. notify_remote_via_irq(np->netdev->irq);
  255. }
  256. static int xennet_open(struct net_device *dev)
  257. {
  258. struct netfront_info *np = netdev_priv(dev);
  259. memset(&np->stats, 0, sizeof(np->stats));
  260. napi_enable(&np->napi);
  261. spin_lock_bh(&np->rx_lock);
  262. if (netif_carrier_ok(dev)) {
  263. xennet_alloc_rx_buffers(dev);
  264. np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
  265. if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
  266. netif_rx_schedule(dev, &np->napi);
  267. }
  268. spin_unlock_bh(&np->rx_lock);
  269. xennet_maybe_wake_tx(dev);
  270. return 0;
  271. }
  272. static void xennet_tx_buf_gc(struct net_device *dev)
  273. {
  274. RING_IDX cons, prod;
  275. unsigned short id;
  276. struct netfront_info *np = netdev_priv(dev);
  277. struct sk_buff *skb;
  278. BUG_ON(!netif_carrier_ok(dev));
  279. do {
  280. prod = np->tx.sring->rsp_prod;
  281. rmb(); /* Ensure we see responses up to 'rp'. */
  282. for (cons = np->tx.rsp_cons; cons != prod; cons++) {
  283. struct xen_netif_tx_response *txrsp;
  284. txrsp = RING_GET_RESPONSE(&np->tx, cons);
  285. if (txrsp->status == NETIF_RSP_NULL)
  286. continue;
  287. id = txrsp->id;
  288. skb = np->tx_skbs[id].skb;
  289. if (unlikely(gnttab_query_foreign_access(
  290. np->grant_tx_ref[id]) != 0)) {
  291. printk(KERN_ALERT "xennet_tx_buf_gc: warning "
  292. "-- grant still in use by backend "
  293. "domain.\n");
  294. BUG();
  295. }
  296. gnttab_end_foreign_access_ref(
  297. np->grant_tx_ref[id], GNTMAP_readonly);
  298. gnttab_release_grant_reference(
  299. &np->gref_tx_head, np->grant_tx_ref[id]);
  300. np->grant_tx_ref[id] = GRANT_INVALID_REF;
  301. add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
  302. dev_kfree_skb_irq(skb);
  303. }
  304. np->tx.rsp_cons = prod;
  305. /*
  306. * Set a new event, then check for race with update of tx_cons.
  307. * Note that it is essential to schedule a callback, no matter
  308. * how few buffers are pending. Even if there is space in the
  309. * transmit ring, higher layers may be blocked because too much
  310. * data is outstanding: in such cases notification from Xen is
  311. * likely to be the only kick that we'll get.
  312. */
  313. np->tx.sring->rsp_event =
  314. prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
  315. mb(); /* update shared area */
  316. } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
  317. xennet_maybe_wake_tx(dev);
  318. }
  319. static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
  320. struct xen_netif_tx_request *tx)
  321. {
  322. struct netfront_info *np = netdev_priv(dev);
  323. char *data = skb->data;
  324. unsigned long mfn;
  325. RING_IDX prod = np->tx.req_prod_pvt;
  326. int frags = skb_shinfo(skb)->nr_frags;
  327. unsigned int offset = offset_in_page(data);
  328. unsigned int len = skb_headlen(skb);
  329. unsigned int id;
  330. grant_ref_t ref;
  331. int i;
  332. /* While the header overlaps a page boundary (including being
  333. larger than a page), split it it into page-sized chunks. */
  334. while (len > PAGE_SIZE - offset) {
  335. tx->size = PAGE_SIZE - offset;
  336. tx->flags |= NETTXF_more_data;
  337. len -= tx->size;
  338. data += tx->size;
  339. offset = 0;
  340. id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
  341. np->tx_skbs[id].skb = skb_get(skb);
  342. tx = RING_GET_REQUEST(&np->tx, prod++);
  343. tx->id = id;
  344. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  345. BUG_ON((signed short)ref < 0);
  346. mfn = virt_to_mfn(data);
  347. gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
  348. mfn, GNTMAP_readonly);
  349. tx->gref = np->grant_tx_ref[id] = ref;
  350. tx->offset = offset;
  351. tx->size = len;
  352. tx->flags = 0;
  353. }
  354. /* Grant backend access to each skb fragment page. */
  355. for (i = 0; i < frags; i++) {
  356. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  357. tx->flags |= NETTXF_more_data;
  358. id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
  359. np->tx_skbs[id].skb = skb_get(skb);
  360. tx = RING_GET_REQUEST(&np->tx, prod++);
  361. tx->id = id;
  362. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  363. BUG_ON((signed short)ref < 0);
  364. mfn = pfn_to_mfn(page_to_pfn(frag->page));
  365. gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
  366. mfn, GNTMAP_readonly);
  367. tx->gref = np->grant_tx_ref[id] = ref;
  368. tx->offset = frag->page_offset;
  369. tx->size = frag->size;
  370. tx->flags = 0;
  371. }
  372. np->tx.req_prod_pvt = prod;
  373. }
  374. static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  375. {
  376. unsigned short id;
  377. struct netfront_info *np = netdev_priv(dev);
  378. struct xen_netif_tx_request *tx;
  379. struct xen_netif_extra_info *extra;
  380. char *data = skb->data;
  381. RING_IDX i;
  382. grant_ref_t ref;
  383. unsigned long mfn;
  384. int notify;
  385. int frags = skb_shinfo(skb)->nr_frags;
  386. unsigned int offset = offset_in_page(data);
  387. unsigned int len = skb_headlen(skb);
  388. frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
  389. if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
  390. printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
  391. frags);
  392. dump_stack();
  393. goto drop;
  394. }
  395. spin_lock_irq(&np->tx_lock);
  396. if (unlikely(!netif_carrier_ok(dev) ||
  397. (frags > 1 && !xennet_can_sg(dev)) ||
  398. netif_needs_gso(dev, skb))) {
  399. spin_unlock_irq(&np->tx_lock);
  400. goto drop;
  401. }
  402. i = np->tx.req_prod_pvt;
  403. id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
  404. np->tx_skbs[id].skb = skb;
  405. tx = RING_GET_REQUEST(&np->tx, i);
  406. tx->id = id;
  407. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  408. BUG_ON((signed short)ref < 0);
  409. mfn = virt_to_mfn(data);
  410. gnttab_grant_foreign_access_ref(
  411. ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
  412. tx->gref = np->grant_tx_ref[id] = ref;
  413. tx->offset = offset;
  414. tx->size = len;
  415. extra = NULL;
  416. tx->flags = 0;
  417. if (skb->ip_summed == CHECKSUM_PARTIAL)
  418. /* local packet? */
  419. tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
  420. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  421. /* remote but checksummed. */
  422. tx->flags |= NETTXF_data_validated;
  423. if (skb_shinfo(skb)->gso_size) {
  424. struct xen_netif_extra_info *gso;
  425. gso = (struct xen_netif_extra_info *)
  426. RING_GET_REQUEST(&np->tx, ++i);
  427. if (extra)
  428. extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
  429. else
  430. tx->flags |= NETTXF_extra_info;
  431. gso->u.gso.size = skb_shinfo(skb)->gso_size;
  432. gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
  433. gso->u.gso.pad = 0;
  434. gso->u.gso.features = 0;
  435. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  436. gso->flags = 0;
  437. extra = gso;
  438. }
  439. np->tx.req_prod_pvt = i + 1;
  440. xennet_make_frags(skb, dev, tx);
  441. tx->size = skb->len;
  442. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
  443. if (notify)
  444. notify_remote_via_irq(np->netdev->irq);
  445. np->stats.tx_bytes += skb->len;
  446. np->stats.tx_packets++;
  447. /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
  448. xennet_tx_buf_gc(dev);
  449. if (!netfront_tx_slot_available(np))
  450. netif_stop_queue(dev);
  451. spin_unlock_irq(&np->tx_lock);
  452. return 0;
  453. drop:
  454. np->stats.tx_dropped++;
  455. dev_kfree_skb(skb);
  456. return 0;
  457. }
  458. static int xennet_close(struct net_device *dev)
  459. {
  460. struct netfront_info *np = netdev_priv(dev);
  461. netif_stop_queue(np->netdev);
  462. napi_disable(&np->napi);
  463. return 0;
  464. }
  465. static struct net_device_stats *xennet_get_stats(struct net_device *dev)
  466. {
  467. struct netfront_info *np = netdev_priv(dev);
  468. return &np->stats;
  469. }
  470. static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
  471. grant_ref_t ref)
  472. {
  473. int new = xennet_rxidx(np->rx.req_prod_pvt);
  474. BUG_ON(np->rx_skbs[new]);
  475. np->rx_skbs[new] = skb;
  476. np->grant_rx_ref[new] = ref;
  477. RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
  478. RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
  479. np->rx.req_prod_pvt++;
  480. }
  481. static int xennet_get_extras(struct netfront_info *np,
  482. struct xen_netif_extra_info *extras,
  483. RING_IDX rp)
  484. {
  485. struct xen_netif_extra_info *extra;
  486. struct device *dev = &np->netdev->dev;
  487. RING_IDX cons = np->rx.rsp_cons;
  488. int err = 0;
  489. do {
  490. struct sk_buff *skb;
  491. grant_ref_t ref;
  492. if (unlikely(cons + 1 == rp)) {
  493. if (net_ratelimit())
  494. dev_warn(dev, "Missing extra info\n");
  495. err = -EBADR;
  496. break;
  497. }
  498. extra = (struct xen_netif_extra_info *)
  499. RING_GET_RESPONSE(&np->rx, ++cons);
  500. if (unlikely(!extra->type ||
  501. extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  502. if (net_ratelimit())
  503. dev_warn(dev, "Invalid extra type: %d\n",
  504. extra->type);
  505. err = -EINVAL;
  506. } else {
  507. memcpy(&extras[extra->type - 1], extra,
  508. sizeof(*extra));
  509. }
  510. skb = xennet_get_rx_skb(np, cons);
  511. ref = xennet_get_rx_ref(np, cons);
  512. xennet_move_rx_slot(np, skb, ref);
  513. } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
  514. np->rx.rsp_cons = cons;
  515. return err;
  516. }
  517. static int xennet_get_responses(struct netfront_info *np,
  518. struct netfront_rx_info *rinfo, RING_IDX rp,
  519. struct sk_buff_head *list)
  520. {
  521. struct xen_netif_rx_response *rx = &rinfo->rx;
  522. struct xen_netif_extra_info *extras = rinfo->extras;
  523. struct device *dev = &np->netdev->dev;
  524. RING_IDX cons = np->rx.rsp_cons;
  525. struct sk_buff *skb = xennet_get_rx_skb(np, cons);
  526. grant_ref_t ref = xennet_get_rx_ref(np, cons);
  527. int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
  528. int frags = 1;
  529. int err = 0;
  530. unsigned long ret;
  531. if (rx->flags & NETRXF_extra_info) {
  532. err = xennet_get_extras(np, extras, rp);
  533. cons = np->rx.rsp_cons;
  534. }
  535. for (;;) {
  536. if (unlikely(rx->status < 0 ||
  537. rx->offset + rx->status > PAGE_SIZE)) {
  538. if (net_ratelimit())
  539. dev_warn(dev, "rx->offset: %x, size: %u\n",
  540. rx->offset, rx->status);
  541. xennet_move_rx_slot(np, skb, ref);
  542. err = -EINVAL;
  543. goto next;
  544. }
  545. /*
  546. * This definitely indicates a bug, either in this driver or in
  547. * the backend driver. In future this should flag the bad
  548. * situation to the system controller to reboot the backed.
  549. */
  550. if (ref == GRANT_INVALID_REF) {
  551. if (net_ratelimit())
  552. dev_warn(dev, "Bad rx response id %d.\n",
  553. rx->id);
  554. err = -EINVAL;
  555. goto next;
  556. }
  557. ret = gnttab_end_foreign_access_ref(ref, 0);
  558. BUG_ON(!ret);
  559. gnttab_release_grant_reference(&np->gref_rx_head, ref);
  560. __skb_queue_tail(list, skb);
  561. next:
  562. if (!(rx->flags & NETRXF_more_data))
  563. break;
  564. if (cons + frags == rp) {
  565. if (net_ratelimit())
  566. dev_warn(dev, "Need more frags\n");
  567. err = -ENOENT;
  568. break;
  569. }
  570. rx = RING_GET_RESPONSE(&np->rx, cons + frags);
  571. skb = xennet_get_rx_skb(np, cons + frags);
  572. ref = xennet_get_rx_ref(np, cons + frags);
  573. frags++;
  574. }
  575. if (unlikely(frags > max)) {
  576. if (net_ratelimit())
  577. dev_warn(dev, "Too many frags\n");
  578. err = -E2BIG;
  579. }
  580. if (unlikely(err))
  581. np->rx.rsp_cons = cons + frags;
  582. return err;
  583. }
  584. static int xennet_set_skb_gso(struct sk_buff *skb,
  585. struct xen_netif_extra_info *gso)
  586. {
  587. if (!gso->u.gso.size) {
  588. if (net_ratelimit())
  589. printk(KERN_WARNING "GSO size must not be zero.\n");
  590. return -EINVAL;
  591. }
  592. /* Currently only TCPv4 S.O. is supported. */
  593. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
  594. if (net_ratelimit())
  595. printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
  596. return -EINVAL;
  597. }
  598. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  599. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  600. /* Header must be checked, and gso_segs computed. */
  601. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  602. skb_shinfo(skb)->gso_segs = 0;
  603. return 0;
  604. }
  605. static RING_IDX xennet_fill_frags(struct netfront_info *np,
  606. struct sk_buff *skb,
  607. struct sk_buff_head *list)
  608. {
  609. struct skb_shared_info *shinfo = skb_shinfo(skb);
  610. int nr_frags = shinfo->nr_frags;
  611. RING_IDX cons = np->rx.rsp_cons;
  612. skb_frag_t *frag = shinfo->frags + nr_frags;
  613. struct sk_buff *nskb;
  614. while ((nskb = __skb_dequeue(list))) {
  615. struct xen_netif_rx_response *rx =
  616. RING_GET_RESPONSE(&np->rx, ++cons);
  617. frag->page = skb_shinfo(nskb)->frags[0].page;
  618. frag->page_offset = rx->offset;
  619. frag->size = rx->status;
  620. skb->data_len += rx->status;
  621. skb_shinfo(nskb)->nr_frags = 0;
  622. kfree_skb(nskb);
  623. frag++;
  624. nr_frags++;
  625. }
  626. shinfo->nr_frags = nr_frags;
  627. return cons;
  628. }
  629. static int skb_checksum_setup(struct sk_buff *skb)
  630. {
  631. struct iphdr *iph;
  632. unsigned char *th;
  633. int err = -EPROTO;
  634. if (skb->protocol != htons(ETH_P_IP))
  635. goto out;
  636. iph = (void *)skb->data;
  637. th = skb->data + 4 * iph->ihl;
  638. if (th >= skb_tail_pointer(skb))
  639. goto out;
  640. skb->csum_start = th - skb->head;
  641. switch (iph->protocol) {
  642. case IPPROTO_TCP:
  643. skb->csum_offset = offsetof(struct tcphdr, check);
  644. break;
  645. case IPPROTO_UDP:
  646. skb->csum_offset = offsetof(struct udphdr, check);
  647. break;
  648. default:
  649. if (net_ratelimit())
  650. printk(KERN_ERR "Attempting to checksum a non-"
  651. "TCP/UDP packet, dropping a protocol"
  652. " %d packet", iph->protocol);
  653. goto out;
  654. }
  655. if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
  656. goto out;
  657. err = 0;
  658. out:
  659. return err;
  660. }
  661. static int handle_incoming_queue(struct net_device *dev,
  662. struct sk_buff_head *rxq)
  663. {
  664. struct netfront_info *np = netdev_priv(dev);
  665. int packets_dropped = 0;
  666. struct sk_buff *skb;
  667. while ((skb = __skb_dequeue(rxq)) != NULL) {
  668. struct page *page = NETFRONT_SKB_CB(skb)->page;
  669. void *vaddr = page_address(page);
  670. unsigned offset = NETFRONT_SKB_CB(skb)->offset;
  671. memcpy(skb->data, vaddr + offset,
  672. skb_headlen(skb));
  673. if (page != skb_shinfo(skb)->frags[0].page)
  674. __free_page(page);
  675. /* Ethernet work: Delayed to here as it peeks the header. */
  676. skb->protocol = eth_type_trans(skb, dev);
  677. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  678. if (skb_checksum_setup(skb)) {
  679. kfree_skb(skb);
  680. packets_dropped++;
  681. np->stats.rx_errors++;
  682. continue;
  683. }
  684. }
  685. np->stats.rx_packets++;
  686. np->stats.rx_bytes += skb->len;
  687. /* Pass it up. */
  688. netif_receive_skb(skb);
  689. dev->last_rx = jiffies;
  690. }
  691. return packets_dropped;
  692. }
  693. static int xennet_poll(struct napi_struct *napi, int budget)
  694. {
  695. struct netfront_info *np = container_of(napi, struct netfront_info, napi);
  696. struct net_device *dev = np->netdev;
  697. struct sk_buff *skb;
  698. struct netfront_rx_info rinfo;
  699. struct xen_netif_rx_response *rx = &rinfo.rx;
  700. struct xen_netif_extra_info *extras = rinfo.extras;
  701. RING_IDX i, rp;
  702. int work_done;
  703. struct sk_buff_head rxq;
  704. struct sk_buff_head errq;
  705. struct sk_buff_head tmpq;
  706. unsigned long flags;
  707. unsigned int len;
  708. int err;
  709. spin_lock(&np->rx_lock);
  710. if (unlikely(!netif_carrier_ok(dev))) {
  711. spin_unlock(&np->rx_lock);
  712. return 0;
  713. }
  714. skb_queue_head_init(&rxq);
  715. skb_queue_head_init(&errq);
  716. skb_queue_head_init(&tmpq);
  717. rp = np->rx.sring->rsp_prod;
  718. rmb(); /* Ensure we see queued responses up to 'rp'. */
  719. i = np->rx.rsp_cons;
  720. work_done = 0;
  721. while ((i != rp) && (work_done < budget)) {
  722. memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
  723. memset(extras, 0, sizeof(rinfo.extras));
  724. err = xennet_get_responses(np, &rinfo, rp, &tmpq);
  725. if (unlikely(err)) {
  726. err:
  727. while ((skb = __skb_dequeue(&tmpq)))
  728. __skb_queue_tail(&errq, skb);
  729. np->stats.rx_errors++;
  730. i = np->rx.rsp_cons;
  731. continue;
  732. }
  733. skb = __skb_dequeue(&tmpq);
  734. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  735. struct xen_netif_extra_info *gso;
  736. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  737. if (unlikely(xennet_set_skb_gso(skb, gso))) {
  738. __skb_queue_head(&tmpq, skb);
  739. np->rx.rsp_cons += skb_queue_len(&tmpq);
  740. goto err;
  741. }
  742. }
  743. NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
  744. NETFRONT_SKB_CB(skb)->offset = rx->offset;
  745. len = rx->status;
  746. if (len > RX_COPY_THRESHOLD)
  747. len = RX_COPY_THRESHOLD;
  748. skb_put(skb, len);
  749. if (rx->status > len) {
  750. skb_shinfo(skb)->frags[0].page_offset =
  751. rx->offset + len;
  752. skb_shinfo(skb)->frags[0].size = rx->status - len;
  753. skb->data_len = rx->status - len;
  754. } else {
  755. skb_shinfo(skb)->frags[0].page = NULL;
  756. skb_shinfo(skb)->nr_frags = 0;
  757. }
  758. i = xennet_fill_frags(np, skb, &tmpq);
  759. /*
  760. * Truesize approximates the size of true data plus
  761. * any supervisor overheads. Adding hypervisor
  762. * overheads has been shown to significantly reduce
  763. * achievable bandwidth with the default receive
  764. * buffer size. It is therefore not wise to account
  765. * for it here.
  766. *
  767. * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
  768. * to RX_COPY_THRESHOLD + the supervisor
  769. * overheads. Here, we add the size of the data pulled
  770. * in xennet_fill_frags().
  771. *
  772. * We also adjust for any unused space in the main
  773. * data area by subtracting (RX_COPY_THRESHOLD -
  774. * len). This is especially important with drivers
  775. * which split incoming packets into header and data,
  776. * using only 66 bytes of the main data area (see the
  777. * e1000 driver for example.) On such systems,
  778. * without this last adjustement, our achievable
  779. * receive throughout using the standard receive
  780. * buffer size was cut by 25%(!!!).
  781. */
  782. skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
  783. skb->len += skb->data_len;
  784. if (rx->flags & NETRXF_csum_blank)
  785. skb->ip_summed = CHECKSUM_PARTIAL;
  786. else if (rx->flags & NETRXF_data_validated)
  787. skb->ip_summed = CHECKSUM_UNNECESSARY;
  788. __skb_queue_tail(&rxq, skb);
  789. np->rx.rsp_cons = ++i;
  790. work_done++;
  791. }
  792. while ((skb = __skb_dequeue(&errq)))
  793. kfree_skb(skb);
  794. work_done -= handle_incoming_queue(dev, &rxq);
  795. /* If we get a callback with very few responses, reduce fill target. */
  796. /* NB. Note exponential increase, linear decrease. */
  797. if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
  798. ((3*np->rx_target) / 4)) &&
  799. (--np->rx_target < np->rx_min_target))
  800. np->rx_target = np->rx_min_target;
  801. xennet_alloc_rx_buffers(dev);
  802. if (work_done < budget) {
  803. int more_to_do = 0;
  804. local_irq_save(flags);
  805. RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
  806. if (!more_to_do)
  807. __netif_rx_complete(dev, napi);
  808. local_irq_restore(flags);
  809. }
  810. spin_unlock(&np->rx_lock);
  811. return work_done;
  812. }
  813. static int xennet_change_mtu(struct net_device *dev, int mtu)
  814. {
  815. int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
  816. if (mtu > max)
  817. return -EINVAL;
  818. dev->mtu = mtu;
  819. return 0;
  820. }
  821. static void xennet_release_tx_bufs(struct netfront_info *np)
  822. {
  823. struct sk_buff *skb;
  824. int i;
  825. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  826. /* Skip over entries which are actually freelist references */
  827. if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
  828. continue;
  829. skb = np->tx_skbs[i].skb;
  830. gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
  831. GNTMAP_readonly);
  832. gnttab_release_grant_reference(&np->gref_tx_head,
  833. np->grant_tx_ref[i]);
  834. np->grant_tx_ref[i] = GRANT_INVALID_REF;
  835. add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
  836. dev_kfree_skb_irq(skb);
  837. }
  838. }
  839. static void xennet_release_rx_bufs(struct netfront_info *np)
  840. {
  841. struct mmu_update *mmu = np->rx_mmu;
  842. struct multicall_entry *mcl = np->rx_mcl;
  843. struct sk_buff_head free_list;
  844. struct sk_buff *skb;
  845. unsigned long mfn;
  846. int xfer = 0, noxfer = 0, unused = 0;
  847. int id, ref;
  848. dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
  849. __func__);
  850. return;
  851. skb_queue_head_init(&free_list);
  852. spin_lock_bh(&np->rx_lock);
  853. for (id = 0; id < NET_RX_RING_SIZE; id++) {
  854. ref = np->grant_rx_ref[id];
  855. if (ref == GRANT_INVALID_REF) {
  856. unused++;
  857. continue;
  858. }
  859. skb = np->rx_skbs[id];
  860. mfn = gnttab_end_foreign_transfer_ref(ref);
  861. gnttab_release_grant_reference(&np->gref_rx_head, ref);
  862. np->grant_rx_ref[id] = GRANT_INVALID_REF;
  863. if (0 == mfn) {
  864. skb_shinfo(skb)->nr_frags = 0;
  865. dev_kfree_skb(skb);
  866. noxfer++;
  867. continue;
  868. }
  869. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  870. /* Remap the page. */
  871. struct page *page = skb_shinfo(skb)->frags[0].page;
  872. unsigned long pfn = page_to_pfn(page);
  873. void *vaddr = page_address(page);
  874. MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
  875. mfn_pte(mfn, PAGE_KERNEL),
  876. 0);
  877. mcl++;
  878. mmu->ptr = ((u64)mfn << PAGE_SHIFT)
  879. | MMU_MACHPHYS_UPDATE;
  880. mmu->val = pfn;
  881. mmu++;
  882. set_phys_to_machine(pfn, mfn);
  883. }
  884. __skb_queue_tail(&free_list, skb);
  885. xfer++;
  886. }
  887. dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
  888. __func__, xfer, noxfer, unused);
  889. if (xfer) {
  890. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  891. /* Do all the remapping work and M2P updates. */
  892. MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
  893. 0, DOMID_SELF);
  894. mcl++;
  895. HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
  896. }
  897. }
  898. while ((skb = __skb_dequeue(&free_list)) != NULL)
  899. dev_kfree_skb(skb);
  900. spin_unlock_bh(&np->rx_lock);
  901. }
  902. static void xennet_uninit(struct net_device *dev)
  903. {
  904. struct netfront_info *np = netdev_priv(dev);
  905. xennet_release_tx_bufs(np);
  906. xennet_release_rx_bufs(np);
  907. gnttab_free_grant_references(np->gref_tx_head);
  908. gnttab_free_grant_references(np->gref_rx_head);
  909. }
  910. static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
  911. {
  912. int i, err;
  913. struct net_device *netdev;
  914. struct netfront_info *np;
  915. netdev = alloc_etherdev(sizeof(struct netfront_info));
  916. if (!netdev) {
  917. printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
  918. __func__);
  919. return ERR_PTR(-ENOMEM);
  920. }
  921. np = netdev_priv(netdev);
  922. np->xbdev = dev;
  923. spin_lock_init(&np->tx_lock);
  924. spin_lock_init(&np->rx_lock);
  925. skb_queue_head_init(&np->rx_batch);
  926. np->rx_target = RX_DFL_MIN_TARGET;
  927. np->rx_min_target = RX_DFL_MIN_TARGET;
  928. np->rx_max_target = RX_MAX_TARGET;
  929. init_timer(&np->rx_refill_timer);
  930. np->rx_refill_timer.data = (unsigned long)netdev;
  931. np->rx_refill_timer.function = rx_refill_timeout;
  932. /* Initialise tx_skbs as a free chain containing every entry. */
  933. np->tx_skb_freelist = 0;
  934. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  935. np->tx_skbs[i].link = i+1;
  936. np->grant_tx_ref[i] = GRANT_INVALID_REF;
  937. }
  938. /* Clear out rx_skbs */
  939. for (i = 0; i < NET_RX_RING_SIZE; i++) {
  940. np->rx_skbs[i] = NULL;
  941. np->grant_rx_ref[i] = GRANT_INVALID_REF;
  942. }
  943. /* A grant for every tx ring slot */
  944. if (gnttab_alloc_grant_references(TX_MAX_TARGET,
  945. &np->gref_tx_head) < 0) {
  946. printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  947. err = -ENOMEM;
  948. goto exit;
  949. }
  950. /* A grant for every rx ring slot */
  951. if (gnttab_alloc_grant_references(RX_MAX_TARGET,
  952. &np->gref_rx_head) < 0) {
  953. printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  954. err = -ENOMEM;
  955. goto exit_free_tx;
  956. }
  957. netdev->open = xennet_open;
  958. netdev->hard_start_xmit = xennet_start_xmit;
  959. netdev->stop = xennet_close;
  960. netdev->get_stats = xennet_get_stats;
  961. netif_napi_add(netdev, &np->napi, xennet_poll, 64);
  962. netdev->uninit = xennet_uninit;
  963. netdev->change_mtu = xennet_change_mtu;
  964. netdev->features = NETIF_F_IP_CSUM;
  965. SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
  966. SET_MODULE_OWNER(netdev);
  967. SET_NETDEV_DEV(netdev, &dev->dev);
  968. np->netdev = netdev;
  969. netif_carrier_off(netdev);
  970. return netdev;
  971. exit_free_tx:
  972. gnttab_free_grant_references(np->gref_tx_head);
  973. exit:
  974. free_netdev(netdev);
  975. return ERR_PTR(err);
  976. }
  977. /**
  978. * Entry point to this code when a new device is created. Allocate the basic
  979. * structures and the ring buffers for communication with the backend, and
  980. * inform the backend of the appropriate details for those.
  981. */
  982. static int __devinit netfront_probe(struct xenbus_device *dev,
  983. const struct xenbus_device_id *id)
  984. {
  985. int err;
  986. struct net_device *netdev;
  987. struct netfront_info *info;
  988. netdev = xennet_create_dev(dev);
  989. if (IS_ERR(netdev)) {
  990. err = PTR_ERR(netdev);
  991. xenbus_dev_fatal(dev, err, "creating netdev");
  992. return err;
  993. }
  994. info = netdev_priv(netdev);
  995. dev->dev.driver_data = info;
  996. err = register_netdev(info->netdev);
  997. if (err) {
  998. printk(KERN_WARNING "%s: register_netdev err=%d\n",
  999. __func__, err);
  1000. goto fail;
  1001. }
  1002. err = xennet_sysfs_addif(info->netdev);
  1003. if (err) {
  1004. unregister_netdev(info->netdev);
  1005. printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
  1006. __func__, err);
  1007. goto fail;
  1008. }
  1009. return 0;
  1010. fail:
  1011. free_netdev(netdev);
  1012. dev->dev.driver_data = NULL;
  1013. return err;
  1014. }
  1015. static void xennet_end_access(int ref, void *page)
  1016. {
  1017. /* This frees the page as a side-effect */
  1018. if (ref != GRANT_INVALID_REF)
  1019. gnttab_end_foreign_access(ref, 0, (unsigned long)page);
  1020. }
  1021. static void xennet_disconnect_backend(struct netfront_info *info)
  1022. {
  1023. /* Stop old i/f to prevent errors whilst we rebuild the state. */
  1024. spin_lock_bh(&info->rx_lock);
  1025. spin_lock_irq(&info->tx_lock);
  1026. netif_carrier_off(info->netdev);
  1027. spin_unlock_irq(&info->tx_lock);
  1028. spin_unlock_bh(&info->rx_lock);
  1029. if (info->netdev->irq)
  1030. unbind_from_irqhandler(info->netdev->irq, info->netdev);
  1031. info->evtchn = info->netdev->irq = 0;
  1032. /* End access and free the pages */
  1033. xennet_end_access(info->tx_ring_ref, info->tx.sring);
  1034. xennet_end_access(info->rx_ring_ref, info->rx.sring);
  1035. info->tx_ring_ref = GRANT_INVALID_REF;
  1036. info->rx_ring_ref = GRANT_INVALID_REF;
  1037. info->tx.sring = NULL;
  1038. info->rx.sring = NULL;
  1039. }
  1040. /**
  1041. * We are reconnecting to the backend, due to a suspend/resume, or a backend
  1042. * driver restart. We tear down our netif structure and recreate it, but
  1043. * leave the device-layer structures intact so that this is transparent to the
  1044. * rest of the kernel.
  1045. */
  1046. static int netfront_resume(struct xenbus_device *dev)
  1047. {
  1048. struct netfront_info *info = dev->dev.driver_data;
  1049. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1050. xennet_disconnect_backend(info);
  1051. return 0;
  1052. }
  1053. static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
  1054. {
  1055. char *s, *e, *macstr;
  1056. int i;
  1057. macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
  1058. if (IS_ERR(macstr))
  1059. return PTR_ERR(macstr);
  1060. for (i = 0; i < ETH_ALEN; i++) {
  1061. mac[i] = simple_strtoul(s, &e, 16);
  1062. if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
  1063. kfree(macstr);
  1064. return -ENOENT;
  1065. }
  1066. s = e+1;
  1067. }
  1068. kfree(macstr);
  1069. return 0;
  1070. }
  1071. static irqreturn_t xennet_interrupt(int irq, void *dev_id)
  1072. {
  1073. struct net_device *dev = dev_id;
  1074. struct netfront_info *np = netdev_priv(dev);
  1075. unsigned long flags;
  1076. spin_lock_irqsave(&np->tx_lock, flags);
  1077. if (likely(netif_carrier_ok(dev))) {
  1078. xennet_tx_buf_gc(dev);
  1079. /* Under tx_lock: protects access to rx shared-ring indexes. */
  1080. if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
  1081. netif_rx_schedule(dev, &np->napi);
  1082. }
  1083. spin_unlock_irqrestore(&np->tx_lock, flags);
  1084. return IRQ_HANDLED;
  1085. }
  1086. static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
  1087. {
  1088. struct xen_netif_tx_sring *txs;
  1089. struct xen_netif_rx_sring *rxs;
  1090. int err;
  1091. struct net_device *netdev = info->netdev;
  1092. info->tx_ring_ref = GRANT_INVALID_REF;
  1093. info->rx_ring_ref = GRANT_INVALID_REF;
  1094. info->rx.sring = NULL;
  1095. info->tx.sring = NULL;
  1096. netdev->irq = 0;
  1097. err = xen_net_read_mac(dev, netdev->dev_addr);
  1098. if (err) {
  1099. xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
  1100. goto fail;
  1101. }
  1102. txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
  1103. if (!txs) {
  1104. err = -ENOMEM;
  1105. xenbus_dev_fatal(dev, err, "allocating tx ring page");
  1106. goto fail;
  1107. }
  1108. SHARED_RING_INIT(txs);
  1109. FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
  1110. err = xenbus_grant_ring(dev, virt_to_mfn(txs));
  1111. if (err < 0) {
  1112. free_page((unsigned long)txs);
  1113. goto fail;
  1114. }
  1115. info->tx_ring_ref = err;
  1116. rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
  1117. if (!rxs) {
  1118. err = -ENOMEM;
  1119. xenbus_dev_fatal(dev, err, "allocating rx ring page");
  1120. goto fail;
  1121. }
  1122. SHARED_RING_INIT(rxs);
  1123. FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
  1124. err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
  1125. if (err < 0) {
  1126. free_page((unsigned long)rxs);
  1127. goto fail;
  1128. }
  1129. info->rx_ring_ref = err;
  1130. err = xenbus_alloc_evtchn(dev, &info->evtchn);
  1131. if (err)
  1132. goto fail;
  1133. err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
  1134. IRQF_SAMPLE_RANDOM, netdev->name,
  1135. netdev);
  1136. if (err < 0)
  1137. goto fail;
  1138. netdev->irq = err;
  1139. return 0;
  1140. fail:
  1141. return err;
  1142. }
  1143. /* Common code used when first setting up, and when resuming. */
  1144. static int talk_to_backend(struct xenbus_device *dev,
  1145. struct netfront_info *info)
  1146. {
  1147. const char *message;
  1148. struct xenbus_transaction xbt;
  1149. int err;
  1150. /* Create shared ring, alloc event channel. */
  1151. err = setup_netfront(dev, info);
  1152. if (err)
  1153. goto out;
  1154. again:
  1155. err = xenbus_transaction_start(&xbt);
  1156. if (err) {
  1157. xenbus_dev_fatal(dev, err, "starting transaction");
  1158. goto destroy_ring;
  1159. }
  1160. err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
  1161. info->tx_ring_ref);
  1162. if (err) {
  1163. message = "writing tx ring-ref";
  1164. goto abort_transaction;
  1165. }
  1166. err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
  1167. info->rx_ring_ref);
  1168. if (err) {
  1169. message = "writing rx ring-ref";
  1170. goto abort_transaction;
  1171. }
  1172. err = xenbus_printf(xbt, dev->nodename,
  1173. "event-channel", "%u", info->evtchn);
  1174. if (err) {
  1175. message = "writing event-channel";
  1176. goto abort_transaction;
  1177. }
  1178. err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
  1179. 1);
  1180. if (err) {
  1181. message = "writing request-rx-copy";
  1182. goto abort_transaction;
  1183. }
  1184. err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
  1185. if (err) {
  1186. message = "writing feature-rx-notify";
  1187. goto abort_transaction;
  1188. }
  1189. err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
  1190. if (err) {
  1191. message = "writing feature-sg";
  1192. goto abort_transaction;
  1193. }
  1194. err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
  1195. if (err) {
  1196. message = "writing feature-gso-tcpv4";
  1197. goto abort_transaction;
  1198. }
  1199. err = xenbus_transaction_end(xbt, 0);
  1200. if (err) {
  1201. if (err == -EAGAIN)
  1202. goto again;
  1203. xenbus_dev_fatal(dev, err, "completing transaction");
  1204. goto destroy_ring;
  1205. }
  1206. return 0;
  1207. abort_transaction:
  1208. xenbus_transaction_end(xbt, 1);
  1209. xenbus_dev_fatal(dev, err, "%s", message);
  1210. destroy_ring:
  1211. xennet_disconnect_backend(info);
  1212. out:
  1213. return err;
  1214. }
  1215. static int xennet_set_sg(struct net_device *dev, u32 data)
  1216. {
  1217. if (data) {
  1218. struct netfront_info *np = netdev_priv(dev);
  1219. int val;
  1220. if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
  1221. "%d", &val) < 0)
  1222. val = 0;
  1223. if (!val)
  1224. return -ENOSYS;
  1225. } else if (dev->mtu > ETH_DATA_LEN)
  1226. dev->mtu = ETH_DATA_LEN;
  1227. return ethtool_op_set_sg(dev, data);
  1228. }
  1229. static int xennet_set_tso(struct net_device *dev, u32 data)
  1230. {
  1231. if (data) {
  1232. struct netfront_info *np = netdev_priv(dev);
  1233. int val;
  1234. if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
  1235. "feature-gso-tcpv4", "%d", &val) < 0)
  1236. val = 0;
  1237. if (!val)
  1238. return -ENOSYS;
  1239. }
  1240. return ethtool_op_set_tso(dev, data);
  1241. }
  1242. static void xennet_set_features(struct net_device *dev)
  1243. {
  1244. /* Turn off all GSO bits except ROBUST. */
  1245. dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
  1246. dev->features |= NETIF_F_GSO_ROBUST;
  1247. xennet_set_sg(dev, 0);
  1248. /* We need checksum offload to enable scatter/gather and TSO. */
  1249. if (!(dev->features & NETIF_F_IP_CSUM))
  1250. return;
  1251. if (!xennet_set_sg(dev, 1))
  1252. xennet_set_tso(dev, 1);
  1253. }
  1254. static int xennet_connect(struct net_device *dev)
  1255. {
  1256. struct netfront_info *np = netdev_priv(dev);
  1257. int i, requeue_idx, err;
  1258. struct sk_buff *skb;
  1259. grant_ref_t ref;
  1260. struct xen_netif_rx_request *req;
  1261. unsigned int feature_rx_copy;
  1262. err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
  1263. "feature-rx-copy", "%u", &feature_rx_copy);
  1264. if (err != 1)
  1265. feature_rx_copy = 0;
  1266. if (!feature_rx_copy) {
  1267. dev_info(&dev->dev,
  1268. "backend does not support copying recieve path");
  1269. return -ENODEV;
  1270. }
  1271. err = talk_to_backend(np->xbdev, np);
  1272. if (err)
  1273. return err;
  1274. xennet_set_features(dev);
  1275. spin_lock_bh(&np->rx_lock);
  1276. spin_lock_irq(&np->tx_lock);
  1277. /* Step 1: Discard all pending TX packet fragments. */
  1278. xennet_release_tx_bufs(np);
  1279. /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
  1280. for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
  1281. if (!np->rx_skbs[i])
  1282. continue;
  1283. skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
  1284. ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
  1285. req = RING_GET_REQUEST(&np->rx, requeue_idx);
  1286. gnttab_grant_foreign_access_ref(
  1287. ref, np->xbdev->otherend_id,
  1288. pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
  1289. frags->page)),
  1290. 0);
  1291. req->gref = ref;
  1292. req->id = requeue_idx;
  1293. requeue_idx++;
  1294. }
  1295. np->rx.req_prod_pvt = requeue_idx;
  1296. /*
  1297. * Step 3: All public and private state should now be sane. Get
  1298. * ready to start sending and receiving packets and give the driver
  1299. * domain a kick because we've probably just requeued some
  1300. * packets.
  1301. */
  1302. netif_carrier_on(np->netdev);
  1303. notify_remote_via_irq(np->netdev->irq);
  1304. xennet_tx_buf_gc(dev);
  1305. xennet_alloc_rx_buffers(dev);
  1306. spin_unlock_irq(&np->tx_lock);
  1307. spin_unlock_bh(&np->rx_lock);
  1308. return 0;
  1309. }
  1310. /**
  1311. * Callback received when the backend's state changes.
  1312. */
  1313. static void backend_changed(struct xenbus_device *dev,
  1314. enum xenbus_state backend_state)
  1315. {
  1316. struct netfront_info *np = dev->dev.driver_data;
  1317. struct net_device *netdev = np->netdev;
  1318. dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
  1319. switch (backend_state) {
  1320. case XenbusStateInitialising:
  1321. case XenbusStateInitialised:
  1322. case XenbusStateConnected:
  1323. case XenbusStateUnknown:
  1324. case XenbusStateClosed:
  1325. break;
  1326. case XenbusStateInitWait:
  1327. if (dev->state != XenbusStateInitialising)
  1328. break;
  1329. if (xennet_connect(netdev) != 0)
  1330. break;
  1331. xenbus_switch_state(dev, XenbusStateConnected);
  1332. break;
  1333. case XenbusStateClosing:
  1334. xenbus_frontend_closed(dev);
  1335. break;
  1336. }
  1337. }
  1338. static struct ethtool_ops xennet_ethtool_ops =
  1339. {
  1340. .get_tx_csum = ethtool_op_get_tx_csum,
  1341. .set_tx_csum = ethtool_op_set_tx_csum,
  1342. .get_sg = ethtool_op_get_sg,
  1343. .set_sg = xennet_set_sg,
  1344. .get_tso = ethtool_op_get_tso,
  1345. .set_tso = xennet_set_tso,
  1346. .get_link = ethtool_op_get_link,
  1347. };
  1348. #ifdef CONFIG_SYSFS
  1349. static ssize_t show_rxbuf_min(struct device *dev,
  1350. struct device_attribute *attr, char *buf)
  1351. {
  1352. struct net_device *netdev = to_net_dev(dev);
  1353. struct netfront_info *info = netdev_priv(netdev);
  1354. return sprintf(buf, "%u\n", info->rx_min_target);
  1355. }
  1356. static ssize_t store_rxbuf_min(struct device *dev,
  1357. struct device_attribute *attr,
  1358. const char *buf, size_t len)
  1359. {
  1360. struct net_device *netdev = to_net_dev(dev);
  1361. struct netfront_info *np = netdev_priv(netdev);
  1362. char *endp;
  1363. unsigned long target;
  1364. if (!capable(CAP_NET_ADMIN))
  1365. return -EPERM;
  1366. target = simple_strtoul(buf, &endp, 0);
  1367. if (endp == buf)
  1368. return -EBADMSG;
  1369. if (target < RX_MIN_TARGET)
  1370. target = RX_MIN_TARGET;
  1371. if (target > RX_MAX_TARGET)
  1372. target = RX_MAX_TARGET;
  1373. spin_lock_bh(&np->rx_lock);
  1374. if (target > np->rx_max_target)
  1375. np->rx_max_target = target;
  1376. np->rx_min_target = target;
  1377. if (target > np->rx_target)
  1378. np->rx_target = target;
  1379. xennet_alloc_rx_buffers(netdev);
  1380. spin_unlock_bh(&np->rx_lock);
  1381. return len;
  1382. }
  1383. static ssize_t show_rxbuf_max(struct device *dev,
  1384. struct device_attribute *attr, char *buf)
  1385. {
  1386. struct net_device *netdev = to_net_dev(dev);
  1387. struct netfront_info *info = netdev_priv(netdev);
  1388. return sprintf(buf, "%u\n", info->rx_max_target);
  1389. }
  1390. static ssize_t store_rxbuf_max(struct device *dev,
  1391. struct device_attribute *attr,
  1392. const char *buf, size_t len)
  1393. {
  1394. struct net_device *netdev = to_net_dev(dev);
  1395. struct netfront_info *np = netdev_priv(netdev);
  1396. char *endp;
  1397. unsigned long target;
  1398. if (!capable(CAP_NET_ADMIN))
  1399. return -EPERM;
  1400. target = simple_strtoul(buf, &endp, 0);
  1401. if (endp == buf)
  1402. return -EBADMSG;
  1403. if (target < RX_MIN_TARGET)
  1404. target = RX_MIN_TARGET;
  1405. if (target > RX_MAX_TARGET)
  1406. target = RX_MAX_TARGET;
  1407. spin_lock_bh(&np->rx_lock);
  1408. if (target < np->rx_min_target)
  1409. np->rx_min_target = target;
  1410. np->rx_max_target = target;
  1411. if (target < np->rx_target)
  1412. np->rx_target = target;
  1413. xennet_alloc_rx_buffers(netdev);
  1414. spin_unlock_bh(&np->rx_lock);
  1415. return len;
  1416. }
  1417. static ssize_t show_rxbuf_cur(struct device *dev,
  1418. struct device_attribute *attr, char *buf)
  1419. {
  1420. struct net_device *netdev = to_net_dev(dev);
  1421. struct netfront_info *info = netdev_priv(netdev);
  1422. return sprintf(buf, "%u\n", info->rx_target);
  1423. }
  1424. static struct device_attribute xennet_attrs[] = {
  1425. __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
  1426. __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
  1427. __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
  1428. };
  1429. static int xennet_sysfs_addif(struct net_device *netdev)
  1430. {
  1431. int i;
  1432. int err;
  1433. for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
  1434. err = device_create_file(&netdev->dev,
  1435. &xennet_attrs[i]);
  1436. if (err)
  1437. goto fail;
  1438. }
  1439. return 0;
  1440. fail:
  1441. while (--i >= 0)
  1442. device_remove_file(&netdev->dev, &xennet_attrs[i]);
  1443. return err;
  1444. }
  1445. static void xennet_sysfs_delif(struct net_device *netdev)
  1446. {
  1447. int i;
  1448. for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
  1449. device_remove_file(&netdev->dev, &xennet_attrs[i]);
  1450. }
  1451. #endif /* CONFIG_SYSFS */
  1452. static struct xenbus_device_id netfront_ids[] = {
  1453. { "vif" },
  1454. { "" }
  1455. };
  1456. static int __devexit xennet_remove(struct xenbus_device *dev)
  1457. {
  1458. struct netfront_info *info = dev->dev.driver_data;
  1459. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1460. unregister_netdev(info->netdev);
  1461. xennet_disconnect_backend(info);
  1462. del_timer_sync(&info->rx_refill_timer);
  1463. xennet_sysfs_delif(info->netdev);
  1464. free_netdev(info->netdev);
  1465. return 0;
  1466. }
  1467. static struct xenbus_driver netfront = {
  1468. .name = "vif",
  1469. .owner = THIS_MODULE,
  1470. .ids = netfront_ids,
  1471. .probe = netfront_probe,
  1472. .remove = __devexit_p(xennet_remove),
  1473. .resume = netfront_resume,
  1474. .otherend_changed = backend_changed,
  1475. };
  1476. static int __init netif_init(void)
  1477. {
  1478. if (!is_running_on_xen())
  1479. return -ENODEV;
  1480. if (is_initial_xendomain())
  1481. return 0;
  1482. printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
  1483. return xenbus_register_frontend(&netfront);
  1484. }
  1485. module_init(netif_init);
  1486. static void __exit netif_exit(void)
  1487. {
  1488. if (is_initial_xendomain())
  1489. return;
  1490. return xenbus_unregister_driver(&netfront);
  1491. }
  1492. module_exit(netif_exit);
  1493. MODULE_DESCRIPTION("Xen virtual network device frontend");
  1494. MODULE_LICENSE("GPL");