netback.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <net/tcp.h>
  39. #include <xen/xen.h>
  40. #include <xen/events.h>
  41. #include <xen/interface/memory.h>
  42. #include <asm/xen/hypercall.h>
  43. #include <asm/xen/page.h>
  44. /* Provide an option to disable split event channels at load time as
  45. * event channels are limited resource. Split event channels are
  46. * enabled by default.
  47. */
  48. bool separate_tx_rx_irq = 1;
  49. module_param(separate_tx_rx_irq, bool, 0644);
  50. /*
  51. * This is the maximum slots a skb can have. If a guest sends a skb
  52. * which exceeds this limit it is considered malicious.
  53. */
  54. #define FATAL_SKB_SLOTS_DEFAULT 20
  55. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  56. module_param(fatal_skb_slots, uint, 0444);
  57. /*
  58. * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
  59. * the maximum slots a valid packet can use. Now this value is defined
  60. * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
  61. * all backend.
  62. */
  63. #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
  64. /*
  65. * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
  66. * one or more merged tx requests, otherwise it is the continuation of
  67. * previous tx request.
  68. */
  69. static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
  70. {
  71. return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
  72. }
  73. static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx,
  74. u8 status);
  75. static void make_tx_response(struct xenvif *vif,
  76. struct xen_netif_tx_request *txp,
  77. s8 st);
  78. static inline int tx_work_todo(struct xenvif *vif);
  79. static inline int rx_work_todo(struct xenvif *vif);
  80. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  81. u16 id,
  82. s8 st,
  83. u16 offset,
  84. u16 size,
  85. u16 flags);
  86. static inline unsigned long idx_to_pfn(struct xenvif *vif,
  87. u16 idx)
  88. {
  89. return page_to_pfn(vif->mmap_pages[idx]);
  90. }
  91. static inline unsigned long idx_to_kaddr(struct xenvif *vif,
  92. u16 idx)
  93. {
  94. return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
  95. }
  96. /*
  97. * This is the amount of packet we copy rather than map, so that the
  98. * guest can't fiddle with the contents of the headers while we do
  99. * packet processing on them (netfilter, routing, etc).
  100. */
  101. #define PKT_PROT_LEN (ETH_HLEN + \
  102. VLAN_HLEN + \
  103. sizeof(struct iphdr) + MAX_IPOPTLEN + \
  104. sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
  105. static u16 frag_get_pending_idx(skb_frag_t *frag)
  106. {
  107. return (u16)frag->page_offset;
  108. }
  109. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  110. {
  111. frag->page_offset = pending_idx;
  112. }
  113. static inline pending_ring_idx_t pending_index(unsigned i)
  114. {
  115. return i & (MAX_PENDING_REQS-1);
  116. }
  117. static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
  118. {
  119. return MAX_PENDING_REQS -
  120. vif->pending_prod + vif->pending_cons;
  121. }
  122. static int max_required_rx_slots(struct xenvif *vif)
  123. {
  124. int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
  125. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  126. if (vif->can_sg || vif->gso || vif->gso_prefix)
  127. max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
  128. return max;
  129. }
  130. int xen_netbk_rx_ring_full(struct xenvif *vif)
  131. {
  132. RING_IDX peek = vif->rx_req_cons_peek;
  133. RING_IDX needed = max_required_rx_slots(vif);
  134. return ((vif->rx.sring->req_prod - peek) < needed) ||
  135. ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
  136. }
  137. int xen_netbk_must_stop_queue(struct xenvif *vif)
  138. {
  139. if (!xen_netbk_rx_ring_full(vif))
  140. return 0;
  141. vif->rx.sring->req_event = vif->rx_req_cons_peek +
  142. max_required_rx_slots(vif);
  143. mb(); /* request notification /then/ check the queue */
  144. return xen_netbk_rx_ring_full(vif);
  145. }
  146. /*
  147. * Returns true if we should start a new receive buffer instead of
  148. * adding 'size' bytes to a buffer which currently contains 'offset'
  149. * bytes.
  150. */
  151. static bool start_new_rx_buffer(int offset, unsigned long size, int head)
  152. {
  153. /* simple case: we have completely filled the current buffer. */
  154. if (offset == MAX_BUFFER_OFFSET)
  155. return true;
  156. /*
  157. * complex case: start a fresh buffer if the current frag
  158. * would overflow the current buffer but only if:
  159. * (i) this frag would fit completely in the next buffer
  160. * and (ii) there is already some data in the current buffer
  161. * and (iii) this is not the head buffer.
  162. *
  163. * Where:
  164. * - (i) stops us splitting a frag into two copies
  165. * unless the frag is too large for a single buffer.
  166. * - (ii) stops us from leaving a buffer pointlessly empty.
  167. * - (iii) stops us leaving the first buffer
  168. * empty. Strictly speaking this is already covered
  169. * by (ii) but is explicitly checked because
  170. * netfront relies on the first buffer being
  171. * non-empty and can crash otherwise.
  172. *
  173. * This means we will effectively linearise small
  174. * frags but do not needlessly split large buffers
  175. * into multiple copies tend to give large frags their
  176. * own buffers as before.
  177. */
  178. if ((offset + size > MAX_BUFFER_OFFSET) &&
  179. (size <= MAX_BUFFER_OFFSET) && offset && !head)
  180. return true;
  181. return false;
  182. }
  183. /*
  184. * Figure out how many ring slots we're going to need to send @skb to
  185. * the guest. This function is essentially a dry run of
  186. * netbk_gop_frag_copy.
  187. */
  188. unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
  189. {
  190. unsigned int count;
  191. int i, copy_off;
  192. count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
  193. copy_off = skb_headlen(skb) % PAGE_SIZE;
  194. if (skb_shinfo(skb)->gso_size)
  195. count++;
  196. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  197. unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  198. unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
  199. unsigned long bytes;
  200. offset &= ~PAGE_MASK;
  201. while (size > 0) {
  202. BUG_ON(offset >= PAGE_SIZE);
  203. BUG_ON(copy_off > MAX_BUFFER_OFFSET);
  204. bytes = PAGE_SIZE - offset;
  205. if (bytes > size)
  206. bytes = size;
  207. if (start_new_rx_buffer(copy_off, bytes, 0)) {
  208. count++;
  209. copy_off = 0;
  210. }
  211. if (copy_off + bytes > MAX_BUFFER_OFFSET)
  212. bytes = MAX_BUFFER_OFFSET - copy_off;
  213. copy_off += bytes;
  214. offset += bytes;
  215. size -= bytes;
  216. if (offset == PAGE_SIZE)
  217. offset = 0;
  218. }
  219. }
  220. return count;
  221. }
  222. struct netrx_pending_operations {
  223. unsigned copy_prod, copy_cons;
  224. unsigned meta_prod, meta_cons;
  225. struct gnttab_copy *copy;
  226. struct xenvif_rx_meta *meta;
  227. int copy_off;
  228. grant_ref_t copy_gref;
  229. };
  230. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
  231. struct netrx_pending_operations *npo)
  232. {
  233. struct xenvif_rx_meta *meta;
  234. struct xen_netif_rx_request *req;
  235. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  236. meta = npo->meta + npo->meta_prod++;
  237. meta->gso_size = 0;
  238. meta->size = 0;
  239. meta->id = req->id;
  240. npo->copy_off = 0;
  241. npo->copy_gref = req->gref;
  242. return meta;
  243. }
  244. /*
  245. * Set up the grant operations for this fragment. If it's a flipping
  246. * interface, we also set up the unmap request from here.
  247. */
  248. static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
  249. struct netrx_pending_operations *npo,
  250. struct page *page, unsigned long size,
  251. unsigned long offset, int *head)
  252. {
  253. struct gnttab_copy *copy_gop;
  254. struct xenvif_rx_meta *meta;
  255. unsigned long bytes;
  256. /* Data must not cross a page boundary. */
  257. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  258. meta = npo->meta + npo->meta_prod - 1;
  259. /* Skip unused frames from start of page */
  260. page += offset >> PAGE_SHIFT;
  261. offset &= ~PAGE_MASK;
  262. while (size > 0) {
  263. BUG_ON(offset >= PAGE_SIZE);
  264. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  265. bytes = PAGE_SIZE - offset;
  266. if (bytes > size)
  267. bytes = size;
  268. if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
  269. /*
  270. * Netfront requires there to be some data in the head
  271. * buffer.
  272. */
  273. BUG_ON(*head);
  274. meta = get_next_rx_buffer(vif, npo);
  275. }
  276. if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
  277. bytes = MAX_BUFFER_OFFSET - npo->copy_off;
  278. copy_gop = npo->copy + npo->copy_prod++;
  279. copy_gop->flags = GNTCOPY_dest_gref;
  280. copy_gop->len = bytes;
  281. copy_gop->source.domid = DOMID_SELF;
  282. copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
  283. copy_gop->source.offset = offset;
  284. copy_gop->dest.domid = vif->domid;
  285. copy_gop->dest.offset = npo->copy_off;
  286. copy_gop->dest.u.ref = npo->copy_gref;
  287. npo->copy_off += bytes;
  288. meta->size += bytes;
  289. offset += bytes;
  290. size -= bytes;
  291. /* Next frame */
  292. if (offset == PAGE_SIZE && size) {
  293. BUG_ON(!PageCompound(page));
  294. page++;
  295. offset = 0;
  296. }
  297. /* Leave a gap for the GSO descriptor. */
  298. if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
  299. vif->rx.req_cons++;
  300. *head = 0; /* There must be something in this buffer now. */
  301. }
  302. }
  303. /*
  304. * Prepare an SKB to be transmitted to the frontend.
  305. *
  306. * This function is responsible for allocating grant operations, meta
  307. * structures, etc.
  308. *
  309. * It returns the number of meta structures consumed. The number of
  310. * ring slots used is always equal to the number of meta slots used
  311. * plus the number of GSO descriptors used. Currently, we use either
  312. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  313. * frontend-side LRO).
  314. */
  315. static int netbk_gop_skb(struct sk_buff *skb,
  316. struct netrx_pending_operations *npo)
  317. {
  318. struct xenvif *vif = netdev_priv(skb->dev);
  319. int nr_frags = skb_shinfo(skb)->nr_frags;
  320. int i;
  321. struct xen_netif_rx_request *req;
  322. struct xenvif_rx_meta *meta;
  323. unsigned char *data;
  324. int head = 1;
  325. int old_meta_prod;
  326. old_meta_prod = npo->meta_prod;
  327. /* Set up a GSO prefix descriptor, if necessary */
  328. if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
  329. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  330. meta = npo->meta + npo->meta_prod++;
  331. meta->gso_size = skb_shinfo(skb)->gso_size;
  332. meta->size = 0;
  333. meta->id = req->id;
  334. }
  335. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  336. meta = npo->meta + npo->meta_prod++;
  337. if (!vif->gso_prefix)
  338. meta->gso_size = skb_shinfo(skb)->gso_size;
  339. else
  340. meta->gso_size = 0;
  341. meta->size = 0;
  342. meta->id = req->id;
  343. npo->copy_off = 0;
  344. npo->copy_gref = req->gref;
  345. data = skb->data;
  346. while (data < skb_tail_pointer(skb)) {
  347. unsigned int offset = offset_in_page(data);
  348. unsigned int len = PAGE_SIZE - offset;
  349. if (data + len > skb_tail_pointer(skb))
  350. len = skb_tail_pointer(skb) - data;
  351. netbk_gop_frag_copy(vif, skb, npo,
  352. virt_to_page(data), len, offset, &head);
  353. data += len;
  354. }
  355. for (i = 0; i < nr_frags; i++) {
  356. netbk_gop_frag_copy(vif, skb, npo,
  357. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  358. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  359. skb_shinfo(skb)->frags[i].page_offset,
  360. &head);
  361. }
  362. return npo->meta_prod - old_meta_prod;
  363. }
  364. /*
  365. * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
  366. * used to set up the operations on the top of
  367. * netrx_pending_operations, which have since been done. Check that
  368. * they didn't give any errors and advance over them.
  369. */
  370. static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
  371. struct netrx_pending_operations *npo)
  372. {
  373. struct gnttab_copy *copy_op;
  374. int status = XEN_NETIF_RSP_OKAY;
  375. int i;
  376. for (i = 0; i < nr_meta_slots; i++) {
  377. copy_op = npo->copy + npo->copy_cons++;
  378. if (copy_op->status != GNTST_okay) {
  379. netdev_dbg(vif->dev,
  380. "Bad status %d from copy to DOM%d.\n",
  381. copy_op->status, vif->domid);
  382. status = XEN_NETIF_RSP_ERROR;
  383. }
  384. }
  385. return status;
  386. }
  387. static void netbk_add_frag_responses(struct xenvif *vif, int status,
  388. struct xenvif_rx_meta *meta,
  389. int nr_meta_slots)
  390. {
  391. int i;
  392. unsigned long offset;
  393. /* No fragments used */
  394. if (nr_meta_slots <= 1)
  395. return;
  396. nr_meta_slots--;
  397. for (i = 0; i < nr_meta_slots; i++) {
  398. int flags;
  399. if (i == nr_meta_slots - 1)
  400. flags = 0;
  401. else
  402. flags = XEN_NETRXF_more_data;
  403. offset = 0;
  404. make_rx_response(vif, meta[i].id, status, offset,
  405. meta[i].size, flags);
  406. }
  407. }
  408. struct skb_cb_overlay {
  409. int meta_slots_used;
  410. };
  411. static void xen_netbk_kick_thread(struct xenvif *vif)
  412. {
  413. wake_up(&vif->wq);
  414. }
  415. void xen_netbk_rx_action(struct xenvif *vif)
  416. {
  417. s8 status;
  418. u16 flags;
  419. struct xen_netif_rx_response *resp;
  420. struct sk_buff_head rxq;
  421. struct sk_buff *skb;
  422. LIST_HEAD(notify);
  423. int ret;
  424. int nr_frags;
  425. int count;
  426. unsigned long offset;
  427. struct skb_cb_overlay *sco;
  428. int need_to_notify = 0;
  429. struct netrx_pending_operations npo = {
  430. .copy = vif->grant_copy_op,
  431. .meta = vif->meta,
  432. };
  433. skb_queue_head_init(&rxq);
  434. count = 0;
  435. while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
  436. vif = netdev_priv(skb->dev);
  437. nr_frags = skb_shinfo(skb)->nr_frags;
  438. sco = (struct skb_cb_overlay *)skb->cb;
  439. sco->meta_slots_used = netbk_gop_skb(skb, &npo);
  440. count += nr_frags + 1;
  441. __skb_queue_tail(&rxq, skb);
  442. /* Filled the batch queue? */
  443. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  444. if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
  445. break;
  446. }
  447. BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
  448. if (!npo.copy_prod)
  449. return;
  450. BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
  451. gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
  452. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  453. sco = (struct skb_cb_overlay *)skb->cb;
  454. vif = netdev_priv(skb->dev);
  455. if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
  456. resp = RING_GET_RESPONSE(&vif->rx,
  457. vif->rx.rsp_prod_pvt++);
  458. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  459. resp->offset = vif->meta[npo.meta_cons].gso_size;
  460. resp->id = vif->meta[npo.meta_cons].id;
  461. resp->status = sco->meta_slots_used;
  462. npo.meta_cons++;
  463. sco->meta_slots_used--;
  464. }
  465. vif->dev->stats.tx_bytes += skb->len;
  466. vif->dev->stats.tx_packets++;
  467. status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
  468. if (sco->meta_slots_used == 1)
  469. flags = 0;
  470. else
  471. flags = XEN_NETRXF_more_data;
  472. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  473. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  474. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  475. /* remote but checksummed. */
  476. flags |= XEN_NETRXF_data_validated;
  477. offset = 0;
  478. resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
  479. status, offset,
  480. vif->meta[npo.meta_cons].size,
  481. flags);
  482. if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
  483. struct xen_netif_extra_info *gso =
  484. (struct xen_netif_extra_info *)
  485. RING_GET_RESPONSE(&vif->rx,
  486. vif->rx.rsp_prod_pvt++);
  487. resp->flags |= XEN_NETRXF_extra_info;
  488. gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
  489. gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
  490. gso->u.gso.pad = 0;
  491. gso->u.gso.features = 0;
  492. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  493. gso->flags = 0;
  494. }
  495. netbk_add_frag_responses(vif, status,
  496. vif->meta + npo.meta_cons + 1,
  497. sco->meta_slots_used);
  498. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
  499. if (ret)
  500. need_to_notify = 1;
  501. xenvif_notify_tx_completion(vif);
  502. npo.meta_cons += sco->meta_slots_used;
  503. dev_kfree_skb(skb);
  504. }
  505. if (need_to_notify)
  506. notify_remote_via_irq(vif->rx_irq);
  507. /* More work to do? */
  508. if (!skb_queue_empty(&vif->rx_queue))
  509. xen_netbk_kick_thread(vif);
  510. }
  511. void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
  512. {
  513. skb_queue_tail(&vif->rx_queue, skb);
  514. xen_netbk_kick_thread(vif);
  515. }
  516. void xen_netbk_check_rx_xenvif(struct xenvif *vif)
  517. {
  518. int more_to_do;
  519. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
  520. if (more_to_do)
  521. napi_schedule(&vif->napi);
  522. }
  523. static void tx_add_credit(struct xenvif *vif)
  524. {
  525. unsigned long max_burst, max_credit;
  526. /*
  527. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  528. * Otherwise the interface can seize up due to insufficient credit.
  529. */
  530. max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
  531. max_burst = min(max_burst, 131072UL);
  532. max_burst = max(max_burst, vif->credit_bytes);
  533. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  534. max_credit = vif->remaining_credit + vif->credit_bytes;
  535. if (max_credit < vif->remaining_credit)
  536. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  537. vif->remaining_credit = min(max_credit, max_burst);
  538. }
  539. static void tx_credit_callback(unsigned long data)
  540. {
  541. struct xenvif *vif = (struct xenvif *)data;
  542. tx_add_credit(vif);
  543. xen_netbk_check_rx_xenvif(vif);
  544. }
  545. static void netbk_tx_err(struct xenvif *vif,
  546. struct xen_netif_tx_request *txp, RING_IDX end)
  547. {
  548. RING_IDX cons = vif->tx.req_cons;
  549. do {
  550. make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
  551. if (cons == end)
  552. break;
  553. txp = RING_GET_REQUEST(&vif->tx, cons++);
  554. } while (1);
  555. vif->tx.req_cons = cons;
  556. }
  557. static void netbk_fatal_tx_err(struct xenvif *vif)
  558. {
  559. netdev_err(vif->dev, "fatal error; disabling device\n");
  560. xenvif_carrier_off(vif);
  561. }
  562. static int netbk_count_requests(struct xenvif *vif,
  563. struct xen_netif_tx_request *first,
  564. struct xen_netif_tx_request *txp,
  565. int work_to_do)
  566. {
  567. RING_IDX cons = vif->tx.req_cons;
  568. int slots = 0;
  569. int drop_err = 0;
  570. int more_data;
  571. if (!(first->flags & XEN_NETTXF_more_data))
  572. return 0;
  573. do {
  574. struct xen_netif_tx_request dropped_tx = { 0 };
  575. if (slots >= work_to_do) {
  576. netdev_err(vif->dev,
  577. "Asked for %d slots but exceeds this limit\n",
  578. work_to_do);
  579. netbk_fatal_tx_err(vif);
  580. return -ENODATA;
  581. }
  582. /* This guest is really using too many slots and
  583. * considered malicious.
  584. */
  585. if (unlikely(slots >= fatal_skb_slots)) {
  586. netdev_err(vif->dev,
  587. "Malicious frontend using %d slots, threshold %u\n",
  588. slots, fatal_skb_slots);
  589. netbk_fatal_tx_err(vif);
  590. return -E2BIG;
  591. }
  592. /* Xen network protocol had implicit dependency on
  593. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  594. * the historical MAX_SKB_FRAGS value 18 to honor the
  595. * same behavior as before. Any packet using more than
  596. * 18 slots but less than fatal_skb_slots slots is
  597. * dropped
  598. */
  599. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  600. if (net_ratelimit())
  601. netdev_dbg(vif->dev,
  602. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  603. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  604. drop_err = -E2BIG;
  605. }
  606. if (drop_err)
  607. txp = &dropped_tx;
  608. memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
  609. sizeof(*txp));
  610. /* If the guest submitted a frame >= 64 KiB then
  611. * first->size overflowed and following slots will
  612. * appear to be larger than the frame.
  613. *
  614. * This cannot be fatal error as there are buggy
  615. * frontends that do this.
  616. *
  617. * Consume all slots and drop the packet.
  618. */
  619. if (!drop_err && txp->size > first->size) {
  620. if (net_ratelimit())
  621. netdev_dbg(vif->dev,
  622. "Invalid tx request, slot size %u > remaining size %u\n",
  623. txp->size, first->size);
  624. drop_err = -EIO;
  625. }
  626. first->size -= txp->size;
  627. slots++;
  628. if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
  629. netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
  630. txp->offset, txp->size);
  631. netbk_fatal_tx_err(vif);
  632. return -EINVAL;
  633. }
  634. more_data = txp->flags & XEN_NETTXF_more_data;
  635. if (!drop_err)
  636. txp++;
  637. } while (more_data);
  638. if (drop_err) {
  639. netbk_tx_err(vif, first, cons + slots);
  640. return drop_err;
  641. }
  642. return slots;
  643. }
  644. static struct page *xen_netbk_alloc_page(struct xenvif *vif,
  645. u16 pending_idx)
  646. {
  647. struct page *page;
  648. page = alloc_page(GFP_ATOMIC|__GFP_COLD);
  649. if (!page)
  650. return NULL;
  651. vif->mmap_pages[pending_idx] = page;
  652. return page;
  653. }
  654. static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
  655. struct sk_buff *skb,
  656. struct xen_netif_tx_request *txp,
  657. struct gnttab_copy *gop)
  658. {
  659. struct skb_shared_info *shinfo = skb_shinfo(skb);
  660. skb_frag_t *frags = shinfo->frags;
  661. u16 pending_idx = *((u16 *)skb->data);
  662. u16 head_idx = 0;
  663. int slot, start;
  664. struct page *page;
  665. pending_ring_idx_t index, start_idx = 0;
  666. uint16_t dst_offset;
  667. unsigned int nr_slots;
  668. struct pending_tx_info *first = NULL;
  669. /* At this point shinfo->nr_frags is in fact the number of
  670. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  671. */
  672. nr_slots = shinfo->nr_frags;
  673. /* Skip first skb fragment if it is on same page as header fragment. */
  674. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  675. /* Coalesce tx requests, at this point the packet passed in
  676. * should be <= 64K. Any packets larger than 64K have been
  677. * handled in netbk_count_requests().
  678. */
  679. for (shinfo->nr_frags = slot = start; slot < nr_slots;
  680. shinfo->nr_frags++) {
  681. struct pending_tx_info *pending_tx_info =
  682. vif->pending_tx_info;
  683. page = alloc_page(GFP_ATOMIC|__GFP_COLD);
  684. if (!page)
  685. goto err;
  686. dst_offset = 0;
  687. first = NULL;
  688. while (dst_offset < PAGE_SIZE && slot < nr_slots) {
  689. gop->flags = GNTCOPY_source_gref;
  690. gop->source.u.ref = txp->gref;
  691. gop->source.domid = vif->domid;
  692. gop->source.offset = txp->offset;
  693. gop->dest.domid = DOMID_SELF;
  694. gop->dest.offset = dst_offset;
  695. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  696. if (dst_offset + txp->size > PAGE_SIZE) {
  697. /* This page can only merge a portion
  698. * of tx request. Do not increment any
  699. * pointer / counter here. The txp
  700. * will be dealt with in future
  701. * rounds, eventually hitting the
  702. * `else` branch.
  703. */
  704. gop->len = PAGE_SIZE - dst_offset;
  705. txp->offset += gop->len;
  706. txp->size -= gop->len;
  707. dst_offset += gop->len; /* quit loop */
  708. } else {
  709. /* This tx request can be merged in the page */
  710. gop->len = txp->size;
  711. dst_offset += gop->len;
  712. index = pending_index(vif->pending_cons++);
  713. pending_idx = vif->pending_ring[index];
  714. memcpy(&pending_tx_info[pending_idx].req, txp,
  715. sizeof(*txp));
  716. /* Poison these fields, corresponding
  717. * fields for head tx req will be set
  718. * to correct values after the loop.
  719. */
  720. vif->mmap_pages[pending_idx] = (void *)(~0UL);
  721. pending_tx_info[pending_idx].head =
  722. INVALID_PENDING_RING_IDX;
  723. if (!first) {
  724. first = &pending_tx_info[pending_idx];
  725. start_idx = index;
  726. head_idx = pending_idx;
  727. }
  728. txp++;
  729. slot++;
  730. }
  731. gop++;
  732. }
  733. first->req.offset = 0;
  734. first->req.size = dst_offset;
  735. first->head = start_idx;
  736. vif->mmap_pages[head_idx] = page;
  737. frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
  738. }
  739. BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
  740. return gop;
  741. err:
  742. /* Unwind, freeing all pages and sending error responses. */
  743. while (shinfo->nr_frags-- > start) {
  744. xen_netbk_idx_release(vif,
  745. frag_get_pending_idx(&frags[shinfo->nr_frags]),
  746. XEN_NETIF_RSP_ERROR);
  747. }
  748. /* The head too, if necessary. */
  749. if (start)
  750. xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  751. return NULL;
  752. }
  753. static int xen_netbk_tx_check_gop(struct xenvif *vif,
  754. struct sk_buff *skb,
  755. struct gnttab_copy **gopp)
  756. {
  757. struct gnttab_copy *gop = *gopp;
  758. u16 pending_idx = *((u16 *)skb->data);
  759. struct skb_shared_info *shinfo = skb_shinfo(skb);
  760. struct pending_tx_info *tx_info;
  761. int nr_frags = shinfo->nr_frags;
  762. int i, err, start;
  763. u16 peek; /* peek into next tx request */
  764. /* Check status of header. */
  765. err = gop->status;
  766. if (unlikely(err))
  767. xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  768. /* Skip first skb fragment if it is on same page as header fragment. */
  769. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  770. for (i = start; i < nr_frags; i++) {
  771. int j, newerr;
  772. pending_ring_idx_t head;
  773. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  774. tx_info = &vif->pending_tx_info[pending_idx];
  775. head = tx_info->head;
  776. /* Check error status: if okay then remember grant handle. */
  777. do {
  778. newerr = (++gop)->status;
  779. if (newerr)
  780. break;
  781. peek = vif->pending_ring[pending_index(++head)];
  782. } while (!pending_tx_is_head(vif, peek));
  783. if (likely(!newerr)) {
  784. /* Had a previous error? Invalidate this fragment. */
  785. if (unlikely(err))
  786. xen_netbk_idx_release(vif, pending_idx,
  787. XEN_NETIF_RSP_OKAY);
  788. continue;
  789. }
  790. /* Error on this fragment: respond to client with an error. */
  791. xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  792. /* Not the first error? Preceding frags already invalidated. */
  793. if (err)
  794. continue;
  795. /* First error: invalidate header and preceding fragments. */
  796. pending_idx = *((u16 *)skb->data);
  797. xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
  798. for (j = start; j < i; j++) {
  799. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  800. xen_netbk_idx_release(vif, pending_idx,
  801. XEN_NETIF_RSP_OKAY);
  802. }
  803. /* Remember the error: invalidate all subsequent fragments. */
  804. err = newerr;
  805. }
  806. *gopp = gop + 1;
  807. return err;
  808. }
  809. static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb)
  810. {
  811. struct skb_shared_info *shinfo = skb_shinfo(skb);
  812. int nr_frags = shinfo->nr_frags;
  813. int i;
  814. for (i = 0; i < nr_frags; i++) {
  815. skb_frag_t *frag = shinfo->frags + i;
  816. struct xen_netif_tx_request *txp;
  817. struct page *page;
  818. u16 pending_idx;
  819. pending_idx = frag_get_pending_idx(frag);
  820. txp = &vif->pending_tx_info[pending_idx].req;
  821. page = virt_to_page(idx_to_kaddr(vif, pending_idx));
  822. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  823. skb->len += txp->size;
  824. skb->data_len += txp->size;
  825. skb->truesize += txp->size;
  826. /* Take an extra reference to offset xen_netbk_idx_release */
  827. get_page(vif->mmap_pages[pending_idx]);
  828. xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
  829. }
  830. }
  831. static int xen_netbk_get_extras(struct xenvif *vif,
  832. struct xen_netif_extra_info *extras,
  833. int work_to_do)
  834. {
  835. struct xen_netif_extra_info extra;
  836. RING_IDX cons = vif->tx.req_cons;
  837. do {
  838. if (unlikely(work_to_do-- <= 0)) {
  839. netdev_err(vif->dev, "Missing extra info\n");
  840. netbk_fatal_tx_err(vif);
  841. return -EBADR;
  842. }
  843. memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
  844. sizeof(extra));
  845. if (unlikely(!extra.type ||
  846. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  847. vif->tx.req_cons = ++cons;
  848. netdev_err(vif->dev,
  849. "Invalid extra type: %d\n", extra.type);
  850. netbk_fatal_tx_err(vif);
  851. return -EINVAL;
  852. }
  853. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  854. vif->tx.req_cons = ++cons;
  855. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  856. return work_to_do;
  857. }
  858. static int netbk_set_skb_gso(struct xenvif *vif,
  859. struct sk_buff *skb,
  860. struct xen_netif_extra_info *gso)
  861. {
  862. if (!gso->u.gso.size) {
  863. netdev_err(vif->dev, "GSO size must not be zero.\n");
  864. netbk_fatal_tx_err(vif);
  865. return -EINVAL;
  866. }
  867. /* Currently only TCPv4 S.O. is supported. */
  868. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
  869. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  870. netbk_fatal_tx_err(vif);
  871. return -EINVAL;
  872. }
  873. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  874. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  875. /* Header must be checked, and gso_segs computed. */
  876. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  877. skb_shinfo(skb)->gso_segs = 0;
  878. return 0;
  879. }
  880. static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
  881. {
  882. struct iphdr *iph;
  883. int err = -EPROTO;
  884. int recalculate_partial_csum = 0;
  885. /*
  886. * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  887. * peers can fail to set NETRXF_csum_blank when sending a GSO
  888. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  889. * recalculate the partial checksum.
  890. */
  891. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  892. vif->rx_gso_checksum_fixup++;
  893. skb->ip_summed = CHECKSUM_PARTIAL;
  894. recalculate_partial_csum = 1;
  895. }
  896. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  897. if (skb->ip_summed != CHECKSUM_PARTIAL)
  898. return 0;
  899. if (skb->protocol != htons(ETH_P_IP))
  900. goto out;
  901. iph = (void *)skb->data;
  902. switch (iph->protocol) {
  903. case IPPROTO_TCP:
  904. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  905. offsetof(struct tcphdr, check)))
  906. goto out;
  907. if (recalculate_partial_csum) {
  908. struct tcphdr *tcph = tcp_hdr(skb);
  909. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  910. skb->len - iph->ihl*4,
  911. IPPROTO_TCP, 0);
  912. }
  913. break;
  914. case IPPROTO_UDP:
  915. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  916. offsetof(struct udphdr, check)))
  917. goto out;
  918. if (recalculate_partial_csum) {
  919. struct udphdr *udph = udp_hdr(skb);
  920. udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  921. skb->len - iph->ihl*4,
  922. IPPROTO_UDP, 0);
  923. }
  924. break;
  925. default:
  926. if (net_ratelimit())
  927. netdev_err(vif->dev,
  928. "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
  929. iph->protocol);
  930. goto out;
  931. }
  932. err = 0;
  933. out:
  934. return err;
  935. }
  936. static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
  937. {
  938. unsigned long now = jiffies;
  939. unsigned long next_credit =
  940. vif->credit_timeout.expires +
  941. msecs_to_jiffies(vif->credit_usec / 1000);
  942. /* Timer could already be pending in rare cases. */
  943. if (timer_pending(&vif->credit_timeout))
  944. return true;
  945. /* Passed the point where we can replenish credit? */
  946. if (time_after_eq(now, next_credit)) {
  947. vif->credit_timeout.expires = now;
  948. tx_add_credit(vif);
  949. }
  950. /* Still too big to send right now? Set a callback. */
  951. if (size > vif->remaining_credit) {
  952. vif->credit_timeout.data =
  953. (unsigned long)vif;
  954. vif->credit_timeout.function =
  955. tx_credit_callback;
  956. mod_timer(&vif->credit_timeout,
  957. next_credit);
  958. return true;
  959. }
  960. return false;
  961. }
  962. static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
  963. {
  964. struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
  965. struct sk_buff *skb;
  966. int ret;
  967. while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
  968. < MAX_PENDING_REQS)) {
  969. struct xen_netif_tx_request txreq;
  970. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  971. struct page *page;
  972. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  973. u16 pending_idx;
  974. RING_IDX idx;
  975. int work_to_do;
  976. unsigned int data_len;
  977. pending_ring_idx_t index;
  978. if (vif->tx.sring->req_prod - vif->tx.req_cons >
  979. XEN_NETIF_TX_RING_SIZE) {
  980. netdev_err(vif->dev,
  981. "Impossible number of requests. "
  982. "req_prod %d, req_cons %d, size %ld\n",
  983. vif->tx.sring->req_prod, vif->tx.req_cons,
  984. XEN_NETIF_TX_RING_SIZE);
  985. netbk_fatal_tx_err(vif);
  986. continue;
  987. }
  988. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
  989. if (!work_to_do)
  990. break;
  991. idx = vif->tx.req_cons;
  992. rmb(); /* Ensure that we see the request before we copy it. */
  993. memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
  994. /* Credit-based scheduling. */
  995. if (txreq.size > vif->remaining_credit &&
  996. tx_credit_exceeded(vif, txreq.size))
  997. break;
  998. vif->remaining_credit -= txreq.size;
  999. work_to_do--;
  1000. vif->tx.req_cons = ++idx;
  1001. memset(extras, 0, sizeof(extras));
  1002. if (txreq.flags & XEN_NETTXF_extra_info) {
  1003. work_to_do = xen_netbk_get_extras(vif, extras,
  1004. work_to_do);
  1005. idx = vif->tx.req_cons;
  1006. if (unlikely(work_to_do < 0))
  1007. break;
  1008. }
  1009. ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
  1010. if (unlikely(ret < 0))
  1011. break;
  1012. idx += ret;
  1013. if (unlikely(txreq.size < ETH_HLEN)) {
  1014. netdev_dbg(vif->dev,
  1015. "Bad packet size: %d\n", txreq.size);
  1016. netbk_tx_err(vif, &txreq, idx);
  1017. break;
  1018. }
  1019. /* No crossing a page as the payload mustn't fragment. */
  1020. if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
  1021. netdev_err(vif->dev,
  1022. "txreq.offset: %x, size: %u, end: %lu\n",
  1023. txreq.offset, txreq.size,
  1024. (txreq.offset&~PAGE_MASK) + txreq.size);
  1025. netbk_fatal_tx_err(vif);
  1026. break;
  1027. }
  1028. index = pending_index(vif->pending_cons);
  1029. pending_idx = vif->pending_ring[index];
  1030. data_len = (txreq.size > PKT_PROT_LEN &&
  1031. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1032. PKT_PROT_LEN : txreq.size;
  1033. skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
  1034. GFP_ATOMIC | __GFP_NOWARN);
  1035. if (unlikely(skb == NULL)) {
  1036. netdev_dbg(vif->dev,
  1037. "Can't allocate a skb in start_xmit.\n");
  1038. netbk_tx_err(vif, &txreq, idx);
  1039. break;
  1040. }
  1041. /* Packets passed to netif_rx() must have some headroom. */
  1042. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  1043. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1044. struct xen_netif_extra_info *gso;
  1045. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1046. if (netbk_set_skb_gso(vif, skb, gso)) {
  1047. /* Failure in netbk_set_skb_gso is fatal. */
  1048. kfree_skb(skb);
  1049. break;
  1050. }
  1051. }
  1052. /* XXX could copy straight to head */
  1053. page = xen_netbk_alloc_page(vif, pending_idx);
  1054. if (!page) {
  1055. kfree_skb(skb);
  1056. netbk_tx_err(vif, &txreq, idx);
  1057. break;
  1058. }
  1059. gop->source.u.ref = txreq.gref;
  1060. gop->source.domid = vif->domid;
  1061. gop->source.offset = txreq.offset;
  1062. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  1063. gop->dest.domid = DOMID_SELF;
  1064. gop->dest.offset = txreq.offset;
  1065. gop->len = txreq.size;
  1066. gop->flags = GNTCOPY_source_gref;
  1067. gop++;
  1068. memcpy(&vif->pending_tx_info[pending_idx].req,
  1069. &txreq, sizeof(txreq));
  1070. vif->pending_tx_info[pending_idx].head = index;
  1071. *((u16 *)skb->data) = pending_idx;
  1072. __skb_put(skb, data_len);
  1073. skb_shinfo(skb)->nr_frags = ret;
  1074. if (data_len < txreq.size) {
  1075. skb_shinfo(skb)->nr_frags++;
  1076. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1077. pending_idx);
  1078. } else {
  1079. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1080. INVALID_PENDING_IDX);
  1081. }
  1082. vif->pending_cons++;
  1083. request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop);
  1084. if (request_gop == NULL) {
  1085. kfree_skb(skb);
  1086. netbk_tx_err(vif, &txreq, idx);
  1087. break;
  1088. }
  1089. gop = request_gop;
  1090. __skb_queue_tail(&vif->tx_queue, skb);
  1091. vif->tx.req_cons = idx;
  1092. if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
  1093. break;
  1094. }
  1095. return gop - vif->tx_copy_ops;
  1096. }
  1097. static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
  1098. {
  1099. struct gnttab_copy *gop = vif->tx_copy_ops;
  1100. struct sk_buff *skb;
  1101. int work_done = 0;
  1102. while (work_done < budget &&
  1103. (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
  1104. struct xen_netif_tx_request *txp;
  1105. u16 pending_idx;
  1106. unsigned data_len;
  1107. pending_idx = *((u16 *)skb->data);
  1108. txp = &vif->pending_tx_info[pending_idx].req;
  1109. /* Check the remap error code. */
  1110. if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) {
  1111. netdev_dbg(vif->dev, "netback grant failed.\n");
  1112. skb_shinfo(skb)->nr_frags = 0;
  1113. kfree_skb(skb);
  1114. continue;
  1115. }
  1116. data_len = skb->len;
  1117. memcpy(skb->data,
  1118. (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
  1119. data_len);
  1120. if (data_len < txp->size) {
  1121. /* Append the packet payload as a fragment. */
  1122. txp->offset += data_len;
  1123. txp->size -= data_len;
  1124. } else {
  1125. /* Schedule a response immediately. */
  1126. xen_netbk_idx_release(vif, pending_idx,
  1127. XEN_NETIF_RSP_OKAY);
  1128. }
  1129. if (txp->flags & XEN_NETTXF_csum_blank)
  1130. skb->ip_summed = CHECKSUM_PARTIAL;
  1131. else if (txp->flags & XEN_NETTXF_data_validated)
  1132. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1133. xen_netbk_fill_frags(vif, skb);
  1134. /*
  1135. * If the initial fragment was < PKT_PROT_LEN then
  1136. * pull through some bytes from the other fragments to
  1137. * increase the linear region to PKT_PROT_LEN bytes.
  1138. */
  1139. if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
  1140. int target = min_t(int, skb->len, PKT_PROT_LEN);
  1141. __pskb_pull_tail(skb, target - skb_headlen(skb));
  1142. }
  1143. skb->dev = vif->dev;
  1144. skb->protocol = eth_type_trans(skb, skb->dev);
  1145. skb_reset_network_header(skb);
  1146. if (checksum_setup(vif, skb)) {
  1147. netdev_dbg(vif->dev,
  1148. "Can't setup checksum in net_tx_action\n");
  1149. kfree_skb(skb);
  1150. continue;
  1151. }
  1152. skb_probe_transport_header(skb, 0);
  1153. vif->dev->stats.rx_bytes += skb->len;
  1154. vif->dev->stats.rx_packets++;
  1155. work_done++;
  1156. netif_receive_skb(skb);
  1157. }
  1158. return work_done;
  1159. }
  1160. /* Called after netfront has transmitted */
  1161. int xen_netbk_tx_action(struct xenvif *vif, int budget)
  1162. {
  1163. unsigned nr_gops;
  1164. int work_done;
  1165. if (unlikely(!tx_work_todo(vif)))
  1166. return 0;
  1167. nr_gops = xen_netbk_tx_build_gops(vif);
  1168. if (nr_gops == 0)
  1169. return 0;
  1170. gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
  1171. work_done = xen_netbk_tx_submit(vif, nr_gops);
  1172. return work_done;
  1173. }
  1174. static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx,
  1175. u8 status)
  1176. {
  1177. struct pending_tx_info *pending_tx_info;
  1178. pending_ring_idx_t head;
  1179. u16 peek; /* peek into next tx request */
  1180. BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
  1181. /* Already complete? */
  1182. if (vif->mmap_pages[pending_idx] == NULL)
  1183. return;
  1184. pending_tx_info = &vif->pending_tx_info[pending_idx];
  1185. head = pending_tx_info->head;
  1186. BUG_ON(!pending_tx_is_head(vif, head));
  1187. BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
  1188. do {
  1189. pending_ring_idx_t index;
  1190. pending_ring_idx_t idx = pending_index(head);
  1191. u16 info_idx = vif->pending_ring[idx];
  1192. pending_tx_info = &vif->pending_tx_info[info_idx];
  1193. make_tx_response(vif, &pending_tx_info->req, status);
  1194. /* Setting any number other than
  1195. * INVALID_PENDING_RING_IDX indicates this slot is
  1196. * starting a new packet / ending a previous packet.
  1197. */
  1198. pending_tx_info->head = 0;
  1199. index = pending_index(vif->pending_prod++);
  1200. vif->pending_ring[index] = vif->pending_ring[info_idx];
  1201. peek = vif->pending_ring[pending_index(++head)];
  1202. } while (!pending_tx_is_head(vif, peek));
  1203. put_page(vif->mmap_pages[pending_idx]);
  1204. vif->mmap_pages[pending_idx] = NULL;
  1205. }
  1206. static void make_tx_response(struct xenvif *vif,
  1207. struct xen_netif_tx_request *txp,
  1208. s8 st)
  1209. {
  1210. RING_IDX i = vif->tx.rsp_prod_pvt;
  1211. struct xen_netif_tx_response *resp;
  1212. int notify;
  1213. resp = RING_GET_RESPONSE(&vif->tx, i);
  1214. resp->id = txp->id;
  1215. resp->status = st;
  1216. if (txp->flags & XEN_NETTXF_extra_info)
  1217. RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1218. vif->tx.rsp_prod_pvt = ++i;
  1219. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
  1220. if (notify)
  1221. notify_remote_via_irq(vif->tx_irq);
  1222. }
  1223. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  1224. u16 id,
  1225. s8 st,
  1226. u16 offset,
  1227. u16 size,
  1228. u16 flags)
  1229. {
  1230. RING_IDX i = vif->rx.rsp_prod_pvt;
  1231. struct xen_netif_rx_response *resp;
  1232. resp = RING_GET_RESPONSE(&vif->rx, i);
  1233. resp->offset = offset;
  1234. resp->flags = flags;
  1235. resp->id = id;
  1236. resp->status = (s16)size;
  1237. if (st < 0)
  1238. resp->status = (s16)st;
  1239. vif->rx.rsp_prod_pvt = ++i;
  1240. return resp;
  1241. }
  1242. static inline int rx_work_todo(struct xenvif *vif)
  1243. {
  1244. return !skb_queue_empty(&vif->rx_queue);
  1245. }
  1246. static inline int tx_work_todo(struct xenvif *vif)
  1247. {
  1248. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
  1249. (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
  1250. < MAX_PENDING_REQS))
  1251. return 1;
  1252. return 0;
  1253. }
  1254. void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
  1255. {
  1256. if (vif->tx.sring)
  1257. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1258. vif->tx.sring);
  1259. if (vif->rx.sring)
  1260. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1261. vif->rx.sring);
  1262. }
  1263. int xen_netbk_map_frontend_rings(struct xenvif *vif,
  1264. grant_ref_t tx_ring_ref,
  1265. grant_ref_t rx_ring_ref)
  1266. {
  1267. void *addr;
  1268. struct xen_netif_tx_sring *txs;
  1269. struct xen_netif_rx_sring *rxs;
  1270. int err = -ENOMEM;
  1271. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1272. tx_ring_ref, &addr);
  1273. if (err)
  1274. goto err;
  1275. txs = (struct xen_netif_tx_sring *)addr;
  1276. BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
  1277. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1278. rx_ring_ref, &addr);
  1279. if (err)
  1280. goto err;
  1281. rxs = (struct xen_netif_rx_sring *)addr;
  1282. BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  1283. vif->rx_req_cons_peek = 0;
  1284. return 0;
  1285. err:
  1286. xen_netbk_unmap_frontend_rings(vif);
  1287. return err;
  1288. }
  1289. int xen_netbk_kthread(void *data)
  1290. {
  1291. struct xenvif *vif = data;
  1292. while (!kthread_should_stop()) {
  1293. wait_event_interruptible(vif->wq,
  1294. rx_work_todo(vif) ||
  1295. kthread_should_stop());
  1296. if (kthread_should_stop())
  1297. break;
  1298. if (rx_work_todo(vif))
  1299. xen_netbk_rx_action(vif);
  1300. cond_resched();
  1301. }
  1302. return 0;
  1303. }
  1304. static int __init netback_init(void)
  1305. {
  1306. int rc = 0;
  1307. if (!xen_domain())
  1308. return -ENODEV;
  1309. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1310. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1311. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1312. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1313. }
  1314. rc = xenvif_xenbus_init();
  1315. if (rc)
  1316. goto failed_init;
  1317. return 0;
  1318. failed_init:
  1319. return rc;
  1320. }
  1321. module_init(netback_init);
  1322. static void __exit netback_fini(void)
  1323. {
  1324. xenvif_xenbus_fini();
  1325. }
  1326. module_exit(netback_fini);
  1327. MODULE_LICENSE("Dual BSD/GPL");
  1328. MODULE_ALIAS("xen-backend:vif");