xen-netfront.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. /*
  2. * Virtual network driver for conversing with remote driver backends.
  3. *
  4. * Copyright (c) 2002-2005, K A Fraser
  5. * Copyright (c) 2005, XenSource Ltd
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version 2
  9. * as published by the Free Software Foundation; or, when distributed
  10. * separately from the Linux kernel or incorporated into other
  11. * software packages, subject to the following license:
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a copy
  14. * of this source file (the "Software"), to deal in the Software without
  15. * restriction, including without limitation the rights to use, copy, modify,
  16. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17. * and to permit persons to whom the Software is furnished to do so, subject to
  18. * the following conditions:
  19. *
  20. * The above copyright notice and this permission notice shall be included in
  21. * all copies or substantial portions of the Software.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29. * IN THE SOFTWARE.
  30. */
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/tcp.h>
  39. #include <linux/udp.h>
  40. #include <linux/moduleparam.h>
  41. #include <linux/mm.h>
  42. #include <linux/slab.h>
  43. #include <net/ip.h>
  44. #include <asm/xen/page.h>
  45. #include <xen/xen.h>
  46. #include <xen/xenbus.h>
  47. #include <xen/events.h>
  48. #include <xen/page.h>
  49. #include <xen/platform_pci.h>
  50. #include <xen/grant_table.h>
  51. #include <xen/interface/io/netif.h>
  52. #include <xen/interface/memory.h>
  53. #include <xen/interface/grant_table.h>
  54. static const struct ethtool_ops xennet_ethtool_ops;
  55. struct netfront_cb {
  56. int pull_to;
  57. };
  58. #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
  59. #define RX_COPY_THRESHOLD 256
  60. #define GRANT_INVALID_REF 0
  61. #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  62. #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
  63. #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
  64. struct netfront_stats {
  65. u64 rx_packets;
  66. u64 tx_packets;
  67. u64 rx_bytes;
  68. u64 tx_bytes;
  69. struct u64_stats_sync syncp;
  70. };
  71. struct netfront_info {
  72. struct list_head list;
  73. struct net_device *netdev;
  74. struct napi_struct napi;
  75. unsigned int evtchn;
  76. struct xenbus_device *xbdev;
  77. spinlock_t tx_lock;
  78. struct xen_netif_tx_front_ring tx;
  79. int tx_ring_ref;
  80. /*
  81. * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
  82. * are linked from tx_skb_freelist through skb_entry.link.
  83. *
  84. * NB. Freelist index entries are always going to be less than
  85. * PAGE_OFFSET, whereas pointers to skbs will always be equal or
  86. * greater than PAGE_OFFSET: we use this property to distinguish
  87. * them.
  88. */
  89. union skb_entry {
  90. struct sk_buff *skb;
  91. unsigned long link;
  92. } tx_skbs[NET_TX_RING_SIZE];
  93. grant_ref_t gref_tx_head;
  94. grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
  95. unsigned tx_skb_freelist;
  96. spinlock_t rx_lock ____cacheline_aligned_in_smp;
  97. struct xen_netif_rx_front_ring rx;
  98. int rx_ring_ref;
  99. /* Receive-ring batched refills. */
  100. #define RX_MIN_TARGET 8
  101. #define RX_DFL_MIN_TARGET 64
  102. #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
  103. unsigned rx_min_target, rx_max_target, rx_target;
  104. struct sk_buff_head rx_batch;
  105. struct timer_list rx_refill_timer;
  106. struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
  107. grant_ref_t gref_rx_head;
  108. grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
  109. unsigned long rx_pfn_array[NET_RX_RING_SIZE];
  110. struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
  111. struct mmu_update rx_mmu[NET_RX_RING_SIZE];
  112. /* Statistics */
  113. struct netfront_stats __percpu *stats;
  114. unsigned long rx_gso_checksum_fixup;
  115. };
  116. struct netfront_rx_info {
  117. struct xen_netif_rx_response rx;
  118. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
  119. };
  120. static void skb_entry_set_link(union skb_entry *list, unsigned short id)
  121. {
  122. list->link = id;
  123. }
  124. static int skb_entry_is_link(const union skb_entry *list)
  125. {
  126. BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
  127. return (unsigned long)list->skb < PAGE_OFFSET;
  128. }
  129. /*
  130. * Access macros for acquiring freeing slots in tx_skbs[].
  131. */
  132. static void add_id_to_freelist(unsigned *head, union skb_entry *list,
  133. unsigned short id)
  134. {
  135. skb_entry_set_link(&list[id], *head);
  136. *head = id;
  137. }
  138. static unsigned short get_id_from_freelist(unsigned *head,
  139. union skb_entry *list)
  140. {
  141. unsigned int id = *head;
  142. *head = list[id].link;
  143. return id;
  144. }
  145. static int xennet_rxidx(RING_IDX idx)
  146. {
  147. return idx & (NET_RX_RING_SIZE - 1);
  148. }
  149. static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
  150. RING_IDX ri)
  151. {
  152. int i = xennet_rxidx(ri);
  153. struct sk_buff *skb = np->rx_skbs[i];
  154. np->rx_skbs[i] = NULL;
  155. return skb;
  156. }
  157. static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
  158. RING_IDX ri)
  159. {
  160. int i = xennet_rxidx(ri);
  161. grant_ref_t ref = np->grant_rx_ref[i];
  162. np->grant_rx_ref[i] = GRANT_INVALID_REF;
  163. return ref;
  164. }
  165. #ifdef CONFIG_SYSFS
  166. static int xennet_sysfs_addif(struct net_device *netdev);
  167. static void xennet_sysfs_delif(struct net_device *netdev);
  168. #else /* !CONFIG_SYSFS */
  169. #define xennet_sysfs_addif(dev) (0)
  170. #define xennet_sysfs_delif(dev) do { } while (0)
  171. #endif
  172. static bool xennet_can_sg(struct net_device *dev)
  173. {
  174. return dev->features & NETIF_F_SG;
  175. }
  176. static void rx_refill_timeout(unsigned long data)
  177. {
  178. struct net_device *dev = (struct net_device *)data;
  179. struct netfront_info *np = netdev_priv(dev);
  180. napi_schedule(&np->napi);
  181. }
  182. static int netfront_tx_slot_available(struct netfront_info *np)
  183. {
  184. return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
  185. (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
  186. }
  187. static void xennet_maybe_wake_tx(struct net_device *dev)
  188. {
  189. struct netfront_info *np = netdev_priv(dev);
  190. if (unlikely(netif_queue_stopped(dev)) &&
  191. netfront_tx_slot_available(np) &&
  192. likely(netif_running(dev)))
  193. netif_wake_queue(dev);
  194. }
  195. static void xennet_alloc_rx_buffers(struct net_device *dev)
  196. {
  197. unsigned short id;
  198. struct netfront_info *np = netdev_priv(dev);
  199. struct sk_buff *skb;
  200. struct page *page;
  201. int i, batch_target, notify;
  202. RING_IDX req_prod = np->rx.req_prod_pvt;
  203. grant_ref_t ref;
  204. unsigned long pfn;
  205. void *vaddr;
  206. struct xen_netif_rx_request *req;
  207. if (unlikely(!netif_carrier_ok(dev)))
  208. return;
  209. /*
  210. * Allocate skbuffs greedily, even though we batch updates to the
  211. * receive ring. This creates a less bursty demand on the memory
  212. * allocator, so should reduce the chance of failed allocation requests
  213. * both for ourself and for other kernel subsystems.
  214. */
  215. batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
  216. for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
  217. skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
  218. GFP_ATOMIC | __GFP_NOWARN);
  219. if (unlikely(!skb))
  220. goto no_skb;
  221. /* Align ip header to a 16 bytes boundary */
  222. skb_reserve(skb, NET_IP_ALIGN);
  223. page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
  224. if (!page) {
  225. kfree_skb(skb);
  226. no_skb:
  227. /* Any skbuffs queued for refill? Force them out. */
  228. if (i != 0)
  229. goto refill;
  230. /* Could not allocate any skbuffs. Try again later. */
  231. mod_timer(&np->rx_refill_timer,
  232. jiffies + (HZ/10));
  233. break;
  234. }
  235. __skb_fill_page_desc(skb, 0, page, 0, 0);
  236. skb_shinfo(skb)->nr_frags = 1;
  237. __skb_queue_tail(&np->rx_batch, skb);
  238. }
  239. /* Is the batch large enough to be worthwhile? */
  240. if (i < (np->rx_target/2)) {
  241. if (req_prod > np->rx.sring->req_prod)
  242. goto push;
  243. return;
  244. }
  245. /* Adjust our fill target if we risked running out of buffers. */
  246. if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
  247. ((np->rx_target *= 2) > np->rx_max_target))
  248. np->rx_target = np->rx_max_target;
  249. refill:
  250. for (i = 0; ; i++) {
  251. skb = __skb_dequeue(&np->rx_batch);
  252. if (skb == NULL)
  253. break;
  254. skb->dev = dev;
  255. id = xennet_rxidx(req_prod + i);
  256. BUG_ON(np->rx_skbs[id]);
  257. np->rx_skbs[id] = skb;
  258. ref = gnttab_claim_grant_reference(&np->gref_rx_head);
  259. BUG_ON((signed short)ref < 0);
  260. np->grant_rx_ref[id] = ref;
  261. pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
  262. vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
  263. req = RING_GET_REQUEST(&np->rx, req_prod + i);
  264. gnttab_grant_foreign_access_ref(ref,
  265. np->xbdev->otherend_id,
  266. pfn_to_mfn(pfn),
  267. 0);
  268. req->id = id;
  269. req->gref = ref;
  270. }
  271. wmb(); /* barrier so backend seens requests */
  272. /* Above is a suitable barrier to ensure backend will see requests. */
  273. np->rx.req_prod_pvt = req_prod + i;
  274. push:
  275. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
  276. if (notify)
  277. notify_remote_via_irq(np->netdev->irq);
  278. }
  279. static int xennet_open(struct net_device *dev)
  280. {
  281. struct netfront_info *np = netdev_priv(dev);
  282. napi_enable(&np->napi);
  283. spin_lock_bh(&np->rx_lock);
  284. if (netif_carrier_ok(dev)) {
  285. xennet_alloc_rx_buffers(dev);
  286. np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
  287. if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
  288. napi_schedule(&np->napi);
  289. }
  290. spin_unlock_bh(&np->rx_lock);
  291. netif_start_queue(dev);
  292. return 0;
  293. }
  294. static void xennet_tx_buf_gc(struct net_device *dev)
  295. {
  296. RING_IDX cons, prod;
  297. unsigned short id;
  298. struct netfront_info *np = netdev_priv(dev);
  299. struct sk_buff *skb;
  300. BUG_ON(!netif_carrier_ok(dev));
  301. do {
  302. prod = np->tx.sring->rsp_prod;
  303. rmb(); /* Ensure we see responses up to 'rp'. */
  304. for (cons = np->tx.rsp_cons; cons != prod; cons++) {
  305. struct xen_netif_tx_response *txrsp;
  306. txrsp = RING_GET_RESPONSE(&np->tx, cons);
  307. if (txrsp->status == XEN_NETIF_RSP_NULL)
  308. continue;
  309. id = txrsp->id;
  310. skb = np->tx_skbs[id].skb;
  311. if (unlikely(gnttab_query_foreign_access(
  312. np->grant_tx_ref[id]) != 0)) {
  313. printk(KERN_ALERT "xennet_tx_buf_gc: warning "
  314. "-- grant still in use by backend "
  315. "domain.\n");
  316. BUG();
  317. }
  318. gnttab_end_foreign_access_ref(
  319. np->grant_tx_ref[id], GNTMAP_readonly);
  320. gnttab_release_grant_reference(
  321. &np->gref_tx_head, np->grant_tx_ref[id]);
  322. np->grant_tx_ref[id] = GRANT_INVALID_REF;
  323. add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
  324. dev_kfree_skb_irq(skb);
  325. }
  326. np->tx.rsp_cons = prod;
  327. /*
  328. * Set a new event, then check for race with update of tx_cons.
  329. * Note that it is essential to schedule a callback, no matter
  330. * how few buffers are pending. Even if there is space in the
  331. * transmit ring, higher layers may be blocked because too much
  332. * data is outstanding: in such cases notification from Xen is
  333. * likely to be the only kick that we'll get.
  334. */
  335. np->tx.sring->rsp_event =
  336. prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
  337. mb(); /* update shared area */
  338. } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
  339. xennet_maybe_wake_tx(dev);
  340. }
  341. static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
  342. struct xen_netif_tx_request *tx)
  343. {
  344. struct netfront_info *np = netdev_priv(dev);
  345. char *data = skb->data;
  346. unsigned long mfn;
  347. RING_IDX prod = np->tx.req_prod_pvt;
  348. int frags = skb_shinfo(skb)->nr_frags;
  349. unsigned int offset = offset_in_page(data);
  350. unsigned int len = skb_headlen(skb);
  351. unsigned int id;
  352. grant_ref_t ref;
  353. int i;
  354. /* While the header overlaps a page boundary (including being
  355. larger than a page), split it it into page-sized chunks. */
  356. while (len > PAGE_SIZE - offset) {
  357. tx->size = PAGE_SIZE - offset;
  358. tx->flags |= XEN_NETTXF_more_data;
  359. len -= tx->size;
  360. data += tx->size;
  361. offset = 0;
  362. id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
  363. np->tx_skbs[id].skb = skb_get(skb);
  364. tx = RING_GET_REQUEST(&np->tx, prod++);
  365. tx->id = id;
  366. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  367. BUG_ON((signed short)ref < 0);
  368. mfn = virt_to_mfn(data);
  369. gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
  370. mfn, GNTMAP_readonly);
  371. tx->gref = np->grant_tx_ref[id] = ref;
  372. tx->offset = offset;
  373. tx->size = len;
  374. tx->flags = 0;
  375. }
  376. /* Grant backend access to each skb fragment page. */
  377. for (i = 0; i < frags; i++) {
  378. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  379. struct page *page = skb_frag_page(frag);
  380. len = skb_frag_size(frag);
  381. offset = frag->page_offset;
  382. /* Data must not cross a page boundary. */
  383. BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
  384. /* Skip unused frames from start of page */
  385. page += offset >> PAGE_SHIFT;
  386. offset &= ~PAGE_MASK;
  387. while (len > 0) {
  388. unsigned long bytes;
  389. BUG_ON(offset >= PAGE_SIZE);
  390. bytes = PAGE_SIZE - offset;
  391. if (bytes > len)
  392. bytes = len;
  393. tx->flags |= XEN_NETTXF_more_data;
  394. id = get_id_from_freelist(&np->tx_skb_freelist,
  395. np->tx_skbs);
  396. np->tx_skbs[id].skb = skb_get(skb);
  397. tx = RING_GET_REQUEST(&np->tx, prod++);
  398. tx->id = id;
  399. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  400. BUG_ON((signed short)ref < 0);
  401. mfn = pfn_to_mfn(page_to_pfn(page));
  402. gnttab_grant_foreign_access_ref(ref,
  403. np->xbdev->otherend_id,
  404. mfn, GNTMAP_readonly);
  405. tx->gref = np->grant_tx_ref[id] = ref;
  406. tx->offset = offset;
  407. tx->size = bytes;
  408. tx->flags = 0;
  409. offset += bytes;
  410. len -= bytes;
  411. /* Next frame */
  412. if (offset == PAGE_SIZE && len) {
  413. BUG_ON(!PageCompound(page));
  414. page++;
  415. offset = 0;
  416. }
  417. }
  418. }
  419. np->tx.req_prod_pvt = prod;
  420. }
  421. /*
  422. * Count how many ring slots are required to send the frags of this
  423. * skb. Each frag might be a compound page.
  424. */
  425. static int xennet_count_skb_frag_slots(struct sk_buff *skb)
  426. {
  427. int i, frags = skb_shinfo(skb)->nr_frags;
  428. int pages = 0;
  429. for (i = 0; i < frags; i++) {
  430. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  431. unsigned long size = skb_frag_size(frag);
  432. unsigned long offset = frag->page_offset;
  433. /* Skip unused frames from start of page */
  434. offset &= ~PAGE_MASK;
  435. pages += PFN_UP(offset + size);
  436. }
  437. return pages;
  438. }
  439. static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  440. {
  441. unsigned short id;
  442. struct netfront_info *np = netdev_priv(dev);
  443. struct netfront_stats *stats = this_cpu_ptr(np->stats);
  444. struct xen_netif_tx_request *tx;
  445. char *data = skb->data;
  446. RING_IDX i;
  447. grant_ref_t ref;
  448. unsigned long mfn;
  449. int notify;
  450. int slots;
  451. unsigned int offset = offset_in_page(data);
  452. unsigned int len = skb_headlen(skb);
  453. unsigned long flags;
  454. slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
  455. xennet_count_skb_frag_slots(skb);
  456. if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
  457. net_alert_ratelimited(
  458. "xennet: skb rides the rocket: %d slots\n", slots);
  459. goto drop;
  460. }
  461. spin_lock_irqsave(&np->tx_lock, flags);
  462. if (unlikely(!netif_carrier_ok(dev) ||
  463. (slots > 1 && !xennet_can_sg(dev)) ||
  464. netif_needs_gso(skb, netif_skb_features(skb)))) {
  465. spin_unlock_irqrestore(&np->tx_lock, flags);
  466. goto drop;
  467. }
  468. i = np->tx.req_prod_pvt;
  469. id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
  470. np->tx_skbs[id].skb = skb;
  471. tx = RING_GET_REQUEST(&np->tx, i);
  472. tx->id = id;
  473. ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  474. BUG_ON((signed short)ref < 0);
  475. mfn = virt_to_mfn(data);
  476. gnttab_grant_foreign_access_ref(
  477. ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
  478. tx->gref = np->grant_tx_ref[id] = ref;
  479. tx->offset = offset;
  480. tx->size = len;
  481. tx->flags = 0;
  482. if (skb->ip_summed == CHECKSUM_PARTIAL)
  483. /* local packet? */
  484. tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
  485. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  486. /* remote but checksummed. */
  487. tx->flags |= XEN_NETTXF_data_validated;
  488. if (skb_shinfo(skb)->gso_size) {
  489. struct xen_netif_extra_info *gso;
  490. gso = (struct xen_netif_extra_info *)
  491. RING_GET_REQUEST(&np->tx, ++i);
  492. tx->flags |= XEN_NETTXF_extra_info;
  493. gso->u.gso.size = skb_shinfo(skb)->gso_size;
  494. gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
  495. gso->u.gso.pad = 0;
  496. gso->u.gso.features = 0;
  497. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  498. gso->flags = 0;
  499. }
  500. np->tx.req_prod_pvt = i + 1;
  501. xennet_make_frags(skb, dev, tx);
  502. tx->size = skb->len;
  503. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
  504. if (notify)
  505. notify_remote_via_irq(np->netdev->irq);
  506. u64_stats_update_begin(&stats->syncp);
  507. stats->tx_bytes += skb->len;
  508. stats->tx_packets++;
  509. u64_stats_update_end(&stats->syncp);
  510. /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
  511. xennet_tx_buf_gc(dev);
  512. if (!netfront_tx_slot_available(np))
  513. netif_stop_queue(dev);
  514. spin_unlock_irqrestore(&np->tx_lock, flags);
  515. return NETDEV_TX_OK;
  516. drop:
  517. dev->stats.tx_dropped++;
  518. dev_kfree_skb(skb);
  519. return NETDEV_TX_OK;
  520. }
  521. static int xennet_close(struct net_device *dev)
  522. {
  523. struct netfront_info *np = netdev_priv(dev);
  524. netif_stop_queue(np->netdev);
  525. napi_disable(&np->napi);
  526. return 0;
  527. }
  528. static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
  529. grant_ref_t ref)
  530. {
  531. int new = xennet_rxidx(np->rx.req_prod_pvt);
  532. BUG_ON(np->rx_skbs[new]);
  533. np->rx_skbs[new] = skb;
  534. np->grant_rx_ref[new] = ref;
  535. RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
  536. RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
  537. np->rx.req_prod_pvt++;
  538. }
  539. static int xennet_get_extras(struct netfront_info *np,
  540. struct xen_netif_extra_info *extras,
  541. RING_IDX rp)
  542. {
  543. struct xen_netif_extra_info *extra;
  544. struct device *dev = &np->netdev->dev;
  545. RING_IDX cons = np->rx.rsp_cons;
  546. int err = 0;
  547. do {
  548. struct sk_buff *skb;
  549. grant_ref_t ref;
  550. if (unlikely(cons + 1 == rp)) {
  551. if (net_ratelimit())
  552. dev_warn(dev, "Missing extra info\n");
  553. err = -EBADR;
  554. break;
  555. }
  556. extra = (struct xen_netif_extra_info *)
  557. RING_GET_RESPONSE(&np->rx, ++cons);
  558. if (unlikely(!extra->type ||
  559. extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  560. if (net_ratelimit())
  561. dev_warn(dev, "Invalid extra type: %d\n",
  562. extra->type);
  563. err = -EINVAL;
  564. } else {
  565. memcpy(&extras[extra->type - 1], extra,
  566. sizeof(*extra));
  567. }
  568. skb = xennet_get_rx_skb(np, cons);
  569. ref = xennet_get_rx_ref(np, cons);
  570. xennet_move_rx_slot(np, skb, ref);
  571. } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
  572. np->rx.rsp_cons = cons;
  573. return err;
  574. }
  575. static int xennet_get_responses(struct netfront_info *np,
  576. struct netfront_rx_info *rinfo, RING_IDX rp,
  577. struct sk_buff_head *list)
  578. {
  579. struct xen_netif_rx_response *rx = &rinfo->rx;
  580. struct xen_netif_extra_info *extras = rinfo->extras;
  581. struct device *dev = &np->netdev->dev;
  582. RING_IDX cons = np->rx.rsp_cons;
  583. struct sk_buff *skb = xennet_get_rx_skb(np, cons);
  584. grant_ref_t ref = xennet_get_rx_ref(np, cons);
  585. int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
  586. int slots = 1;
  587. int err = 0;
  588. unsigned long ret;
  589. if (rx->flags & XEN_NETRXF_extra_info) {
  590. err = xennet_get_extras(np, extras, rp);
  591. cons = np->rx.rsp_cons;
  592. }
  593. for (;;) {
  594. if (unlikely(rx->status < 0 ||
  595. rx->offset + rx->status > PAGE_SIZE)) {
  596. if (net_ratelimit())
  597. dev_warn(dev, "rx->offset: %x, size: %u\n",
  598. rx->offset, rx->status);
  599. xennet_move_rx_slot(np, skb, ref);
  600. err = -EINVAL;
  601. goto next;
  602. }
  603. /*
  604. * This definitely indicates a bug, either in this driver or in
  605. * the backend driver. In future this should flag the bad
  606. * situation to the system controller to reboot the backed.
  607. */
  608. if (ref == GRANT_INVALID_REF) {
  609. if (net_ratelimit())
  610. dev_warn(dev, "Bad rx response id %d.\n",
  611. rx->id);
  612. err = -EINVAL;
  613. goto next;
  614. }
  615. ret = gnttab_end_foreign_access_ref(ref, 0);
  616. BUG_ON(!ret);
  617. gnttab_release_grant_reference(&np->gref_rx_head, ref);
  618. __skb_queue_tail(list, skb);
  619. next:
  620. if (!(rx->flags & XEN_NETRXF_more_data))
  621. break;
  622. if (cons + slots == rp) {
  623. if (net_ratelimit())
  624. dev_warn(dev, "Need more slots\n");
  625. err = -ENOENT;
  626. break;
  627. }
  628. rx = RING_GET_RESPONSE(&np->rx, cons + slots);
  629. skb = xennet_get_rx_skb(np, cons + slots);
  630. ref = xennet_get_rx_ref(np, cons + slots);
  631. slots++;
  632. }
  633. if (unlikely(slots > max)) {
  634. if (net_ratelimit())
  635. dev_warn(dev, "Too many frags\n");
  636. err = -E2BIG;
  637. }
  638. if (unlikely(err))
  639. np->rx.rsp_cons = cons + slots;
  640. return err;
  641. }
  642. static int xennet_set_skb_gso(struct sk_buff *skb,
  643. struct xen_netif_extra_info *gso)
  644. {
  645. if (!gso->u.gso.size) {
  646. if (net_ratelimit())
  647. printk(KERN_WARNING "GSO size must not be zero.\n");
  648. return -EINVAL;
  649. }
  650. /* Currently only TCPv4 S.O. is supported. */
  651. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
  652. if (net_ratelimit())
  653. printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
  654. return -EINVAL;
  655. }
  656. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  657. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  658. /* Header must be checked, and gso_segs computed. */
  659. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  660. skb_shinfo(skb)->gso_segs = 0;
  661. return 0;
  662. }
  663. static RING_IDX xennet_fill_frags(struct netfront_info *np,
  664. struct sk_buff *skb,
  665. struct sk_buff_head *list)
  666. {
  667. struct skb_shared_info *shinfo = skb_shinfo(skb);
  668. int nr_frags = shinfo->nr_frags;
  669. RING_IDX cons = np->rx.rsp_cons;
  670. struct sk_buff *nskb;
  671. while ((nskb = __skb_dequeue(list))) {
  672. struct xen_netif_rx_response *rx =
  673. RING_GET_RESPONSE(&np->rx, ++cons);
  674. skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
  675. __skb_fill_page_desc(skb, nr_frags,
  676. skb_frag_page(nfrag),
  677. rx->offset, rx->status);
  678. skb->data_len += rx->status;
  679. skb_shinfo(nskb)->nr_frags = 0;
  680. kfree_skb(nskb);
  681. nr_frags++;
  682. }
  683. shinfo->nr_frags = nr_frags;
  684. return cons;
  685. }
  686. static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
  687. {
  688. struct iphdr *iph;
  689. unsigned char *th;
  690. int err = -EPROTO;
  691. int recalculate_partial_csum = 0;
  692. /*
  693. * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  694. * peers can fail to set NETRXF_csum_blank when sending a GSO
  695. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  696. * recalculate the partial checksum.
  697. */
  698. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  699. struct netfront_info *np = netdev_priv(dev);
  700. np->rx_gso_checksum_fixup++;
  701. skb->ip_summed = CHECKSUM_PARTIAL;
  702. recalculate_partial_csum = 1;
  703. }
  704. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  705. if (skb->ip_summed != CHECKSUM_PARTIAL)
  706. return 0;
  707. if (skb->protocol != htons(ETH_P_IP))
  708. goto out;
  709. iph = (void *)skb->data;
  710. th = skb->data + 4 * iph->ihl;
  711. if (th >= skb_tail_pointer(skb))
  712. goto out;
  713. skb->csum_start = th - skb->head;
  714. switch (iph->protocol) {
  715. case IPPROTO_TCP:
  716. skb->csum_offset = offsetof(struct tcphdr, check);
  717. if (recalculate_partial_csum) {
  718. struct tcphdr *tcph = (struct tcphdr *)th;
  719. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  720. skb->len - iph->ihl*4,
  721. IPPROTO_TCP, 0);
  722. }
  723. break;
  724. case IPPROTO_UDP:
  725. skb->csum_offset = offsetof(struct udphdr, check);
  726. if (recalculate_partial_csum) {
  727. struct udphdr *udph = (struct udphdr *)th;
  728. udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  729. skb->len - iph->ihl*4,
  730. IPPROTO_UDP, 0);
  731. }
  732. break;
  733. default:
  734. if (net_ratelimit())
  735. printk(KERN_ERR "Attempting to checksum a non-"
  736. "TCP/UDP packet, dropping a protocol"
  737. " %d packet", iph->protocol);
  738. goto out;
  739. }
  740. if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
  741. goto out;
  742. err = 0;
  743. out:
  744. return err;
  745. }
  746. static int handle_incoming_queue(struct net_device *dev,
  747. struct sk_buff_head *rxq)
  748. {
  749. struct netfront_info *np = netdev_priv(dev);
  750. struct netfront_stats *stats = this_cpu_ptr(np->stats);
  751. int packets_dropped = 0;
  752. struct sk_buff *skb;
  753. while ((skb = __skb_dequeue(rxq)) != NULL) {
  754. int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
  755. __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
  756. /* Ethernet work: Delayed to here as it peeks the header. */
  757. skb->protocol = eth_type_trans(skb, dev);
  758. if (checksum_setup(dev, skb)) {
  759. kfree_skb(skb);
  760. packets_dropped++;
  761. dev->stats.rx_errors++;
  762. continue;
  763. }
  764. u64_stats_update_begin(&stats->syncp);
  765. stats->rx_packets++;
  766. stats->rx_bytes += skb->len;
  767. u64_stats_update_end(&stats->syncp);
  768. /* Pass it up. */
  769. netif_receive_skb(skb);
  770. }
  771. return packets_dropped;
  772. }
  773. static int xennet_poll(struct napi_struct *napi, int budget)
  774. {
  775. struct netfront_info *np = container_of(napi, struct netfront_info, napi);
  776. struct net_device *dev = np->netdev;
  777. struct sk_buff *skb;
  778. struct netfront_rx_info rinfo;
  779. struct xen_netif_rx_response *rx = &rinfo.rx;
  780. struct xen_netif_extra_info *extras = rinfo.extras;
  781. RING_IDX i, rp;
  782. int work_done;
  783. struct sk_buff_head rxq;
  784. struct sk_buff_head errq;
  785. struct sk_buff_head tmpq;
  786. unsigned long flags;
  787. int err;
  788. spin_lock(&np->rx_lock);
  789. skb_queue_head_init(&rxq);
  790. skb_queue_head_init(&errq);
  791. skb_queue_head_init(&tmpq);
  792. rp = np->rx.sring->rsp_prod;
  793. rmb(); /* Ensure we see queued responses up to 'rp'. */
  794. i = np->rx.rsp_cons;
  795. work_done = 0;
  796. while ((i != rp) && (work_done < budget)) {
  797. memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
  798. memset(extras, 0, sizeof(rinfo.extras));
  799. err = xennet_get_responses(np, &rinfo, rp, &tmpq);
  800. if (unlikely(err)) {
  801. err:
  802. while ((skb = __skb_dequeue(&tmpq)))
  803. __skb_queue_tail(&errq, skb);
  804. dev->stats.rx_errors++;
  805. i = np->rx.rsp_cons;
  806. continue;
  807. }
  808. skb = __skb_dequeue(&tmpq);
  809. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  810. struct xen_netif_extra_info *gso;
  811. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  812. if (unlikely(xennet_set_skb_gso(skb, gso))) {
  813. __skb_queue_head(&tmpq, skb);
  814. np->rx.rsp_cons += skb_queue_len(&tmpq);
  815. goto err;
  816. }
  817. }
  818. NETFRONT_SKB_CB(skb)->pull_to = rx->status;
  819. if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
  820. NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
  821. skb_shinfo(skb)->frags[0].page_offset = rx->offset;
  822. skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
  823. skb->data_len = rx->status;
  824. i = xennet_fill_frags(np, skb, &tmpq);
  825. /*
  826. * Truesize is the actual allocation size, even if the
  827. * allocation is only partially used.
  828. */
  829. skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
  830. skb->len += skb->data_len;
  831. if (rx->flags & XEN_NETRXF_csum_blank)
  832. skb->ip_summed = CHECKSUM_PARTIAL;
  833. else if (rx->flags & XEN_NETRXF_data_validated)
  834. skb->ip_summed = CHECKSUM_UNNECESSARY;
  835. __skb_queue_tail(&rxq, skb);
  836. np->rx.rsp_cons = ++i;
  837. work_done++;
  838. }
  839. __skb_queue_purge(&errq);
  840. work_done -= handle_incoming_queue(dev, &rxq);
  841. /* If we get a callback with very few responses, reduce fill target. */
  842. /* NB. Note exponential increase, linear decrease. */
  843. if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
  844. ((3*np->rx_target) / 4)) &&
  845. (--np->rx_target < np->rx_min_target))
  846. np->rx_target = np->rx_min_target;
  847. xennet_alloc_rx_buffers(dev);
  848. if (work_done < budget) {
  849. int more_to_do = 0;
  850. local_irq_save(flags);
  851. RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
  852. if (!more_to_do)
  853. __napi_complete(napi);
  854. local_irq_restore(flags);
  855. }
  856. spin_unlock(&np->rx_lock);
  857. return work_done;
  858. }
  859. static int xennet_change_mtu(struct net_device *dev, int mtu)
  860. {
  861. int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
  862. if (mtu > max)
  863. return -EINVAL;
  864. dev->mtu = mtu;
  865. return 0;
  866. }
  867. static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
  868. struct rtnl_link_stats64 *tot)
  869. {
  870. struct netfront_info *np = netdev_priv(dev);
  871. int cpu;
  872. for_each_possible_cpu(cpu) {
  873. struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
  874. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  875. unsigned int start;
  876. do {
  877. start = u64_stats_fetch_begin_bh(&stats->syncp);
  878. rx_packets = stats->rx_packets;
  879. tx_packets = stats->tx_packets;
  880. rx_bytes = stats->rx_bytes;
  881. tx_bytes = stats->tx_bytes;
  882. } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
  883. tot->rx_packets += rx_packets;
  884. tot->tx_packets += tx_packets;
  885. tot->rx_bytes += rx_bytes;
  886. tot->tx_bytes += tx_bytes;
  887. }
  888. tot->rx_errors = dev->stats.rx_errors;
  889. tot->tx_dropped = dev->stats.tx_dropped;
  890. return tot;
  891. }
  892. static void xennet_release_tx_bufs(struct netfront_info *np)
  893. {
  894. struct sk_buff *skb;
  895. int i;
  896. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  897. /* Skip over entries which are actually freelist references */
  898. if (skb_entry_is_link(&np->tx_skbs[i]))
  899. continue;
  900. skb = np->tx_skbs[i].skb;
  901. gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
  902. GNTMAP_readonly);
  903. gnttab_release_grant_reference(&np->gref_tx_head,
  904. np->grant_tx_ref[i]);
  905. np->grant_tx_ref[i] = GRANT_INVALID_REF;
  906. add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
  907. dev_kfree_skb_irq(skb);
  908. }
  909. }
  910. static void xennet_release_rx_bufs(struct netfront_info *np)
  911. {
  912. struct mmu_update *mmu = np->rx_mmu;
  913. struct multicall_entry *mcl = np->rx_mcl;
  914. struct sk_buff_head free_list;
  915. struct sk_buff *skb;
  916. unsigned long mfn;
  917. int xfer = 0, noxfer = 0, unused = 0;
  918. int id, ref;
  919. dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
  920. __func__);
  921. return;
  922. skb_queue_head_init(&free_list);
  923. spin_lock_bh(&np->rx_lock);
  924. for (id = 0; id < NET_RX_RING_SIZE; id++) {
  925. ref = np->grant_rx_ref[id];
  926. if (ref == GRANT_INVALID_REF) {
  927. unused++;
  928. continue;
  929. }
  930. skb = np->rx_skbs[id];
  931. mfn = gnttab_end_foreign_transfer_ref(ref);
  932. gnttab_release_grant_reference(&np->gref_rx_head, ref);
  933. np->grant_rx_ref[id] = GRANT_INVALID_REF;
  934. if (0 == mfn) {
  935. skb_shinfo(skb)->nr_frags = 0;
  936. dev_kfree_skb(skb);
  937. noxfer++;
  938. continue;
  939. }
  940. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  941. /* Remap the page. */
  942. const struct page *page =
  943. skb_frag_page(&skb_shinfo(skb)->frags[0]);
  944. unsigned long pfn = page_to_pfn(page);
  945. void *vaddr = page_address(page);
  946. MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
  947. mfn_pte(mfn, PAGE_KERNEL),
  948. 0);
  949. mcl++;
  950. mmu->ptr = ((u64)mfn << PAGE_SHIFT)
  951. | MMU_MACHPHYS_UPDATE;
  952. mmu->val = pfn;
  953. mmu++;
  954. set_phys_to_machine(pfn, mfn);
  955. }
  956. __skb_queue_tail(&free_list, skb);
  957. xfer++;
  958. }
  959. dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
  960. __func__, xfer, noxfer, unused);
  961. if (xfer) {
  962. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  963. /* Do all the remapping work and M2P updates. */
  964. MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
  965. NULL, DOMID_SELF);
  966. mcl++;
  967. HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
  968. }
  969. }
  970. __skb_queue_purge(&free_list);
  971. spin_unlock_bh(&np->rx_lock);
  972. }
  973. static void xennet_uninit(struct net_device *dev)
  974. {
  975. struct netfront_info *np = netdev_priv(dev);
  976. xennet_release_tx_bufs(np);
  977. xennet_release_rx_bufs(np);
  978. gnttab_free_grant_references(np->gref_tx_head);
  979. gnttab_free_grant_references(np->gref_rx_head);
  980. }
  981. static netdev_features_t xennet_fix_features(struct net_device *dev,
  982. netdev_features_t features)
  983. {
  984. struct netfront_info *np = netdev_priv(dev);
  985. int val;
  986. if (features & NETIF_F_SG) {
  987. if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
  988. "%d", &val) < 0)
  989. val = 0;
  990. if (!val)
  991. features &= ~NETIF_F_SG;
  992. }
  993. if (features & NETIF_F_TSO) {
  994. if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
  995. "feature-gso-tcpv4", "%d", &val) < 0)
  996. val = 0;
  997. if (!val)
  998. features &= ~NETIF_F_TSO;
  999. }
  1000. return features;
  1001. }
  1002. static int xennet_set_features(struct net_device *dev,
  1003. netdev_features_t features)
  1004. {
  1005. if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
  1006. netdev_info(dev, "Reducing MTU because no SG offload");
  1007. dev->mtu = ETH_DATA_LEN;
  1008. }
  1009. return 0;
  1010. }
  1011. static irqreturn_t xennet_interrupt(int irq, void *dev_id)
  1012. {
  1013. struct net_device *dev = dev_id;
  1014. struct netfront_info *np = netdev_priv(dev);
  1015. unsigned long flags;
  1016. spin_lock_irqsave(&np->tx_lock, flags);
  1017. if (likely(netif_carrier_ok(dev))) {
  1018. xennet_tx_buf_gc(dev);
  1019. /* Under tx_lock: protects access to rx shared-ring indexes. */
  1020. if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
  1021. napi_schedule(&np->napi);
  1022. }
  1023. spin_unlock_irqrestore(&np->tx_lock, flags);
  1024. return IRQ_HANDLED;
  1025. }
  1026. #ifdef CONFIG_NET_POLL_CONTROLLER
  1027. static void xennet_poll_controller(struct net_device *dev)
  1028. {
  1029. xennet_interrupt(0, dev);
  1030. }
  1031. #endif
  1032. static const struct net_device_ops xennet_netdev_ops = {
  1033. .ndo_open = xennet_open,
  1034. .ndo_uninit = xennet_uninit,
  1035. .ndo_stop = xennet_close,
  1036. .ndo_start_xmit = xennet_start_xmit,
  1037. .ndo_change_mtu = xennet_change_mtu,
  1038. .ndo_get_stats64 = xennet_get_stats64,
  1039. .ndo_set_mac_address = eth_mac_addr,
  1040. .ndo_validate_addr = eth_validate_addr,
  1041. .ndo_fix_features = xennet_fix_features,
  1042. .ndo_set_features = xennet_set_features,
  1043. #ifdef CONFIG_NET_POLL_CONTROLLER
  1044. .ndo_poll_controller = xennet_poll_controller,
  1045. #endif
  1046. };
  1047. static struct net_device *xennet_create_dev(struct xenbus_device *dev)
  1048. {
  1049. int i, err;
  1050. struct net_device *netdev;
  1051. struct netfront_info *np;
  1052. netdev = alloc_etherdev(sizeof(struct netfront_info));
  1053. if (!netdev)
  1054. return ERR_PTR(-ENOMEM);
  1055. np = netdev_priv(netdev);
  1056. np->xbdev = dev;
  1057. spin_lock_init(&np->tx_lock);
  1058. spin_lock_init(&np->rx_lock);
  1059. skb_queue_head_init(&np->rx_batch);
  1060. np->rx_target = RX_DFL_MIN_TARGET;
  1061. np->rx_min_target = RX_DFL_MIN_TARGET;
  1062. np->rx_max_target = RX_MAX_TARGET;
  1063. init_timer(&np->rx_refill_timer);
  1064. np->rx_refill_timer.data = (unsigned long)netdev;
  1065. np->rx_refill_timer.function = rx_refill_timeout;
  1066. err = -ENOMEM;
  1067. np->stats = alloc_percpu(struct netfront_stats);
  1068. if (np->stats == NULL)
  1069. goto exit;
  1070. /* Initialise tx_skbs as a free chain containing every entry. */
  1071. np->tx_skb_freelist = 0;
  1072. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  1073. skb_entry_set_link(&np->tx_skbs[i], i+1);
  1074. np->grant_tx_ref[i] = GRANT_INVALID_REF;
  1075. }
  1076. /* Clear out rx_skbs */
  1077. for (i = 0; i < NET_RX_RING_SIZE; i++) {
  1078. np->rx_skbs[i] = NULL;
  1079. np->grant_rx_ref[i] = GRANT_INVALID_REF;
  1080. }
  1081. /* A grant for every tx ring slot */
  1082. if (gnttab_alloc_grant_references(TX_MAX_TARGET,
  1083. &np->gref_tx_head) < 0) {
  1084. printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  1085. err = -ENOMEM;
  1086. goto exit_free_stats;
  1087. }
  1088. /* A grant for every rx ring slot */
  1089. if (gnttab_alloc_grant_references(RX_MAX_TARGET,
  1090. &np->gref_rx_head) < 0) {
  1091. printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  1092. err = -ENOMEM;
  1093. goto exit_free_tx;
  1094. }
  1095. netdev->netdev_ops = &xennet_netdev_ops;
  1096. netif_napi_add(netdev, &np->napi, xennet_poll, 64);
  1097. netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
  1098. NETIF_F_GSO_ROBUST;
  1099. netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
  1100. /*
  1101. * Assume that all hw features are available for now. This set
  1102. * will be adjusted by the call to netdev_update_features() in
  1103. * xennet_connect() which is the earliest point where we can
  1104. * negotiate with the backend regarding supported features.
  1105. */
  1106. netdev->features |= netdev->hw_features;
  1107. SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
  1108. SET_NETDEV_DEV(netdev, &dev->dev);
  1109. np->netdev = netdev;
  1110. netif_carrier_off(netdev);
  1111. return netdev;
  1112. exit_free_tx:
  1113. gnttab_free_grant_references(np->gref_tx_head);
  1114. exit_free_stats:
  1115. free_percpu(np->stats);
  1116. exit:
  1117. free_netdev(netdev);
  1118. return ERR_PTR(err);
  1119. }
  1120. /**
  1121. * Entry point to this code when a new device is created. Allocate the basic
  1122. * structures and the ring buffers for communication with the backend, and
  1123. * inform the backend of the appropriate details for those.
  1124. */
  1125. static int netfront_probe(struct xenbus_device *dev,
  1126. const struct xenbus_device_id *id)
  1127. {
  1128. int err;
  1129. struct net_device *netdev;
  1130. struct netfront_info *info;
  1131. netdev = xennet_create_dev(dev);
  1132. if (IS_ERR(netdev)) {
  1133. err = PTR_ERR(netdev);
  1134. xenbus_dev_fatal(dev, err, "creating netdev");
  1135. return err;
  1136. }
  1137. info = netdev_priv(netdev);
  1138. dev_set_drvdata(&dev->dev, info);
  1139. err = register_netdev(info->netdev);
  1140. if (err) {
  1141. printk(KERN_WARNING "%s: register_netdev err=%d\n",
  1142. __func__, err);
  1143. goto fail;
  1144. }
  1145. err = xennet_sysfs_addif(info->netdev);
  1146. if (err) {
  1147. unregister_netdev(info->netdev);
  1148. printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
  1149. __func__, err);
  1150. goto fail;
  1151. }
  1152. return 0;
  1153. fail:
  1154. free_netdev(netdev);
  1155. dev_set_drvdata(&dev->dev, NULL);
  1156. return err;
  1157. }
  1158. static void xennet_end_access(int ref, void *page)
  1159. {
  1160. /* This frees the page as a side-effect */
  1161. if (ref != GRANT_INVALID_REF)
  1162. gnttab_end_foreign_access(ref, 0, (unsigned long)page);
  1163. }
  1164. static void xennet_disconnect_backend(struct netfront_info *info)
  1165. {
  1166. /* Stop old i/f to prevent errors whilst we rebuild the state. */
  1167. spin_lock_bh(&info->rx_lock);
  1168. spin_lock_irq(&info->tx_lock);
  1169. netif_carrier_off(info->netdev);
  1170. spin_unlock_irq(&info->tx_lock);
  1171. spin_unlock_bh(&info->rx_lock);
  1172. if (info->netdev->irq)
  1173. unbind_from_irqhandler(info->netdev->irq, info->netdev);
  1174. info->evtchn = info->netdev->irq = 0;
  1175. /* End access and free the pages */
  1176. xennet_end_access(info->tx_ring_ref, info->tx.sring);
  1177. xennet_end_access(info->rx_ring_ref, info->rx.sring);
  1178. info->tx_ring_ref = GRANT_INVALID_REF;
  1179. info->rx_ring_ref = GRANT_INVALID_REF;
  1180. info->tx.sring = NULL;
  1181. info->rx.sring = NULL;
  1182. }
  1183. /**
  1184. * We are reconnecting to the backend, due to a suspend/resume, or a backend
  1185. * driver restart. We tear down our netif structure and recreate it, but
  1186. * leave the device-layer structures intact so that this is transparent to the
  1187. * rest of the kernel.
  1188. */
  1189. static int netfront_resume(struct xenbus_device *dev)
  1190. {
  1191. struct netfront_info *info = dev_get_drvdata(&dev->dev);
  1192. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1193. xennet_disconnect_backend(info);
  1194. return 0;
  1195. }
  1196. static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
  1197. {
  1198. char *s, *e, *macstr;
  1199. int i;
  1200. macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
  1201. if (IS_ERR(macstr))
  1202. return PTR_ERR(macstr);
  1203. for (i = 0; i < ETH_ALEN; i++) {
  1204. mac[i] = simple_strtoul(s, &e, 16);
  1205. if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
  1206. kfree(macstr);
  1207. return -ENOENT;
  1208. }
  1209. s = e+1;
  1210. }
  1211. kfree(macstr);
  1212. return 0;
  1213. }
  1214. static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
  1215. {
  1216. struct xen_netif_tx_sring *txs;
  1217. struct xen_netif_rx_sring *rxs;
  1218. int err;
  1219. struct net_device *netdev = info->netdev;
  1220. info->tx_ring_ref = GRANT_INVALID_REF;
  1221. info->rx_ring_ref = GRANT_INVALID_REF;
  1222. info->rx.sring = NULL;
  1223. info->tx.sring = NULL;
  1224. netdev->irq = 0;
  1225. err = xen_net_read_mac(dev, netdev->dev_addr);
  1226. if (err) {
  1227. xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
  1228. goto fail;
  1229. }
  1230. txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
  1231. if (!txs) {
  1232. err = -ENOMEM;
  1233. xenbus_dev_fatal(dev, err, "allocating tx ring page");
  1234. goto fail;
  1235. }
  1236. SHARED_RING_INIT(txs);
  1237. FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
  1238. err = xenbus_grant_ring(dev, virt_to_mfn(txs));
  1239. if (err < 0) {
  1240. free_page((unsigned long)txs);
  1241. goto fail;
  1242. }
  1243. info->tx_ring_ref = err;
  1244. rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
  1245. if (!rxs) {
  1246. err = -ENOMEM;
  1247. xenbus_dev_fatal(dev, err, "allocating rx ring page");
  1248. goto fail;
  1249. }
  1250. SHARED_RING_INIT(rxs);
  1251. FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
  1252. err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
  1253. if (err < 0) {
  1254. free_page((unsigned long)rxs);
  1255. goto fail;
  1256. }
  1257. info->rx_ring_ref = err;
  1258. err = xenbus_alloc_evtchn(dev, &info->evtchn);
  1259. if (err)
  1260. goto fail;
  1261. err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
  1262. 0, netdev->name, netdev);
  1263. if (err < 0)
  1264. goto fail;
  1265. netdev->irq = err;
  1266. return 0;
  1267. fail:
  1268. return err;
  1269. }
  1270. /* Common code used when first setting up, and when resuming. */
  1271. static int talk_to_netback(struct xenbus_device *dev,
  1272. struct netfront_info *info)
  1273. {
  1274. const char *message;
  1275. struct xenbus_transaction xbt;
  1276. int err;
  1277. /* Create shared ring, alloc event channel. */
  1278. err = setup_netfront(dev, info);
  1279. if (err)
  1280. goto out;
  1281. again:
  1282. err = xenbus_transaction_start(&xbt);
  1283. if (err) {
  1284. xenbus_dev_fatal(dev, err, "starting transaction");
  1285. goto destroy_ring;
  1286. }
  1287. err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
  1288. info->tx_ring_ref);
  1289. if (err) {
  1290. message = "writing tx ring-ref";
  1291. goto abort_transaction;
  1292. }
  1293. err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
  1294. info->rx_ring_ref);
  1295. if (err) {
  1296. message = "writing rx ring-ref";
  1297. goto abort_transaction;
  1298. }
  1299. err = xenbus_printf(xbt, dev->nodename,
  1300. "event-channel", "%u", info->evtchn);
  1301. if (err) {
  1302. message = "writing event-channel";
  1303. goto abort_transaction;
  1304. }
  1305. err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
  1306. 1);
  1307. if (err) {
  1308. message = "writing request-rx-copy";
  1309. goto abort_transaction;
  1310. }
  1311. err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
  1312. if (err) {
  1313. message = "writing feature-rx-notify";
  1314. goto abort_transaction;
  1315. }
  1316. err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
  1317. if (err) {
  1318. message = "writing feature-sg";
  1319. goto abort_transaction;
  1320. }
  1321. err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
  1322. if (err) {
  1323. message = "writing feature-gso-tcpv4";
  1324. goto abort_transaction;
  1325. }
  1326. err = xenbus_transaction_end(xbt, 0);
  1327. if (err) {
  1328. if (err == -EAGAIN)
  1329. goto again;
  1330. xenbus_dev_fatal(dev, err, "completing transaction");
  1331. goto destroy_ring;
  1332. }
  1333. return 0;
  1334. abort_transaction:
  1335. xenbus_transaction_end(xbt, 1);
  1336. xenbus_dev_fatal(dev, err, "%s", message);
  1337. destroy_ring:
  1338. xennet_disconnect_backend(info);
  1339. out:
  1340. return err;
  1341. }
  1342. static int xennet_connect(struct net_device *dev)
  1343. {
  1344. struct netfront_info *np = netdev_priv(dev);
  1345. int i, requeue_idx, err;
  1346. struct sk_buff *skb;
  1347. grant_ref_t ref;
  1348. struct xen_netif_rx_request *req;
  1349. unsigned int feature_rx_copy;
  1350. err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
  1351. "feature-rx-copy", "%u", &feature_rx_copy);
  1352. if (err != 1)
  1353. feature_rx_copy = 0;
  1354. if (!feature_rx_copy) {
  1355. dev_info(&dev->dev,
  1356. "backend does not support copying receive path\n");
  1357. return -ENODEV;
  1358. }
  1359. err = talk_to_netback(np->xbdev, np);
  1360. if (err)
  1361. return err;
  1362. rtnl_lock();
  1363. netdev_update_features(dev);
  1364. rtnl_unlock();
  1365. spin_lock_bh(&np->rx_lock);
  1366. spin_lock_irq(&np->tx_lock);
  1367. /* Step 1: Discard all pending TX packet fragments. */
  1368. xennet_release_tx_bufs(np);
  1369. /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
  1370. for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
  1371. skb_frag_t *frag;
  1372. const struct page *page;
  1373. if (!np->rx_skbs[i])
  1374. continue;
  1375. skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
  1376. ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
  1377. req = RING_GET_REQUEST(&np->rx, requeue_idx);
  1378. frag = &skb_shinfo(skb)->frags[0];
  1379. page = skb_frag_page(frag);
  1380. gnttab_grant_foreign_access_ref(
  1381. ref, np->xbdev->otherend_id,
  1382. pfn_to_mfn(page_to_pfn(page)),
  1383. 0);
  1384. req->gref = ref;
  1385. req->id = requeue_idx;
  1386. requeue_idx++;
  1387. }
  1388. np->rx.req_prod_pvt = requeue_idx;
  1389. /*
  1390. * Step 3: All public and private state should now be sane. Get
  1391. * ready to start sending and receiving packets and give the driver
  1392. * domain a kick because we've probably just requeued some
  1393. * packets.
  1394. */
  1395. netif_carrier_on(np->netdev);
  1396. notify_remote_via_irq(np->netdev->irq);
  1397. xennet_tx_buf_gc(dev);
  1398. xennet_alloc_rx_buffers(dev);
  1399. spin_unlock_irq(&np->tx_lock);
  1400. spin_unlock_bh(&np->rx_lock);
  1401. return 0;
  1402. }
  1403. /**
  1404. * Callback received when the backend's state changes.
  1405. */
  1406. static void netback_changed(struct xenbus_device *dev,
  1407. enum xenbus_state backend_state)
  1408. {
  1409. struct netfront_info *np = dev_get_drvdata(&dev->dev);
  1410. struct net_device *netdev = np->netdev;
  1411. dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
  1412. switch (backend_state) {
  1413. case XenbusStateInitialising:
  1414. case XenbusStateInitialised:
  1415. case XenbusStateReconfiguring:
  1416. case XenbusStateReconfigured:
  1417. case XenbusStateUnknown:
  1418. case XenbusStateClosed:
  1419. break;
  1420. case XenbusStateInitWait:
  1421. if (dev->state != XenbusStateInitialising)
  1422. break;
  1423. if (xennet_connect(netdev) != 0)
  1424. break;
  1425. xenbus_switch_state(dev, XenbusStateConnected);
  1426. break;
  1427. case XenbusStateConnected:
  1428. netdev_notify_peers(netdev);
  1429. break;
  1430. case XenbusStateClosing:
  1431. xenbus_frontend_closed(dev);
  1432. break;
  1433. }
  1434. }
  1435. static const struct xennet_stat {
  1436. char name[ETH_GSTRING_LEN];
  1437. u16 offset;
  1438. } xennet_stats[] = {
  1439. {
  1440. "rx_gso_checksum_fixup",
  1441. offsetof(struct netfront_info, rx_gso_checksum_fixup)
  1442. },
  1443. };
  1444. static int xennet_get_sset_count(struct net_device *dev, int string_set)
  1445. {
  1446. switch (string_set) {
  1447. case ETH_SS_STATS:
  1448. return ARRAY_SIZE(xennet_stats);
  1449. default:
  1450. return -EINVAL;
  1451. }
  1452. }
  1453. static void xennet_get_ethtool_stats(struct net_device *dev,
  1454. struct ethtool_stats *stats, u64 * data)
  1455. {
  1456. void *np = netdev_priv(dev);
  1457. int i;
  1458. for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
  1459. data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
  1460. }
  1461. static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
  1462. {
  1463. int i;
  1464. switch (stringset) {
  1465. case ETH_SS_STATS:
  1466. for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
  1467. memcpy(data + i * ETH_GSTRING_LEN,
  1468. xennet_stats[i].name, ETH_GSTRING_LEN);
  1469. break;
  1470. }
  1471. }
  1472. static const struct ethtool_ops xennet_ethtool_ops =
  1473. {
  1474. .get_link = ethtool_op_get_link,
  1475. .get_sset_count = xennet_get_sset_count,
  1476. .get_ethtool_stats = xennet_get_ethtool_stats,
  1477. .get_strings = xennet_get_strings,
  1478. };
  1479. #ifdef CONFIG_SYSFS
  1480. static ssize_t show_rxbuf_min(struct device *dev,
  1481. struct device_attribute *attr, char *buf)
  1482. {
  1483. struct net_device *netdev = to_net_dev(dev);
  1484. struct netfront_info *info = netdev_priv(netdev);
  1485. return sprintf(buf, "%u\n", info->rx_min_target);
  1486. }
  1487. static ssize_t store_rxbuf_min(struct device *dev,
  1488. struct device_attribute *attr,
  1489. const char *buf, size_t len)
  1490. {
  1491. struct net_device *netdev = to_net_dev(dev);
  1492. struct netfront_info *np = netdev_priv(netdev);
  1493. char *endp;
  1494. unsigned long target;
  1495. if (!capable(CAP_NET_ADMIN))
  1496. return -EPERM;
  1497. target = simple_strtoul(buf, &endp, 0);
  1498. if (endp == buf)
  1499. return -EBADMSG;
  1500. if (target < RX_MIN_TARGET)
  1501. target = RX_MIN_TARGET;
  1502. if (target > RX_MAX_TARGET)
  1503. target = RX_MAX_TARGET;
  1504. spin_lock_bh(&np->rx_lock);
  1505. if (target > np->rx_max_target)
  1506. np->rx_max_target = target;
  1507. np->rx_min_target = target;
  1508. if (target > np->rx_target)
  1509. np->rx_target = target;
  1510. xennet_alloc_rx_buffers(netdev);
  1511. spin_unlock_bh(&np->rx_lock);
  1512. return len;
  1513. }
  1514. static ssize_t show_rxbuf_max(struct device *dev,
  1515. struct device_attribute *attr, char *buf)
  1516. {
  1517. struct net_device *netdev = to_net_dev(dev);
  1518. struct netfront_info *info = netdev_priv(netdev);
  1519. return sprintf(buf, "%u\n", info->rx_max_target);
  1520. }
  1521. static ssize_t store_rxbuf_max(struct device *dev,
  1522. struct device_attribute *attr,
  1523. const char *buf, size_t len)
  1524. {
  1525. struct net_device *netdev = to_net_dev(dev);
  1526. struct netfront_info *np = netdev_priv(netdev);
  1527. char *endp;
  1528. unsigned long target;
  1529. if (!capable(CAP_NET_ADMIN))
  1530. return -EPERM;
  1531. target = simple_strtoul(buf, &endp, 0);
  1532. if (endp == buf)
  1533. return -EBADMSG;
  1534. if (target < RX_MIN_TARGET)
  1535. target = RX_MIN_TARGET;
  1536. if (target > RX_MAX_TARGET)
  1537. target = RX_MAX_TARGET;
  1538. spin_lock_bh(&np->rx_lock);
  1539. if (target < np->rx_min_target)
  1540. np->rx_min_target = target;
  1541. np->rx_max_target = target;
  1542. if (target < np->rx_target)
  1543. np->rx_target = target;
  1544. xennet_alloc_rx_buffers(netdev);
  1545. spin_unlock_bh(&np->rx_lock);
  1546. return len;
  1547. }
  1548. static ssize_t show_rxbuf_cur(struct device *dev,
  1549. struct device_attribute *attr, char *buf)
  1550. {
  1551. struct net_device *netdev = to_net_dev(dev);
  1552. struct netfront_info *info = netdev_priv(netdev);
  1553. return sprintf(buf, "%u\n", info->rx_target);
  1554. }
  1555. static struct device_attribute xennet_attrs[] = {
  1556. __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
  1557. __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
  1558. __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
  1559. };
  1560. static int xennet_sysfs_addif(struct net_device *netdev)
  1561. {
  1562. int i;
  1563. int err;
  1564. for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
  1565. err = device_create_file(&netdev->dev,
  1566. &xennet_attrs[i]);
  1567. if (err)
  1568. goto fail;
  1569. }
  1570. return 0;
  1571. fail:
  1572. while (--i >= 0)
  1573. device_remove_file(&netdev->dev, &xennet_attrs[i]);
  1574. return err;
  1575. }
  1576. static void xennet_sysfs_delif(struct net_device *netdev)
  1577. {
  1578. int i;
  1579. for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
  1580. device_remove_file(&netdev->dev, &xennet_attrs[i]);
  1581. }
  1582. #endif /* CONFIG_SYSFS */
  1583. static const struct xenbus_device_id netfront_ids[] = {
  1584. { "vif" },
  1585. { "" }
  1586. };
  1587. static int xennet_remove(struct xenbus_device *dev)
  1588. {
  1589. struct netfront_info *info = dev_get_drvdata(&dev->dev);
  1590. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1591. xennet_disconnect_backend(info);
  1592. xennet_sysfs_delif(info->netdev);
  1593. unregister_netdev(info->netdev);
  1594. del_timer_sync(&info->rx_refill_timer);
  1595. free_percpu(info->stats);
  1596. free_netdev(info->netdev);
  1597. return 0;
  1598. }
  1599. static DEFINE_XENBUS_DRIVER(netfront, ,
  1600. .probe = netfront_probe,
  1601. .remove = xennet_remove,
  1602. .resume = netfront_resume,
  1603. .otherend_changed = netback_changed,
  1604. );
  1605. static int __init netif_init(void)
  1606. {
  1607. if (!xen_domain())
  1608. return -ENODEV;
  1609. if (xen_hvm_domain() && !xen_platform_pci_unplug)
  1610. return -ENODEV;
  1611. printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
  1612. return xenbus_register_frontend(&netfront_driver);
  1613. }
  1614. module_init(netif_init);
  1615. static void __exit netif_exit(void)
  1616. {
  1617. xenbus_unregister_driver(&netfront_driver);
  1618. }
  1619. module_exit(netif_exit);
  1620. MODULE_DESCRIPTION("Xen virtual network device frontend");
  1621. MODULE_LICENSE("GPL");
  1622. MODULE_ALIAS("xen:vif");
  1623. MODULE_ALIAS("xennet");