netback.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <net/tcp.h>
  39. #include <xen/xen.h>
  40. #include <xen/events.h>
  41. #include <xen/interface/memory.h>
  42. #include <asm/xen/hypercall.h>
  43. #include <asm/xen/page.h>
  44. /* Provide an option to disable split event channels at load time as
  45. * event channels are limited resource. Split event channels are
  46. * enabled by default.
  47. */
  48. bool separate_tx_rx_irq = 1;
  49. module_param(separate_tx_rx_irq, bool, 0644);
  50. /*
  51. * This is the maximum slots a skb can have. If a guest sends a skb
  52. * which exceeds this limit it is considered malicious.
  53. */
  54. #define FATAL_SKB_SLOTS_DEFAULT 20
  55. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  56. module_param(fatal_skb_slots, uint, 0444);
  57. /*
  58. * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
  59. * the maximum slots a valid packet can use. Now this value is defined
  60. * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
  61. * all backend.
  62. */
  63. #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
  64. /*
  65. * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
  66. * one or more merged tx requests, otherwise it is the continuation of
  67. * previous tx request.
  68. */
  69. static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
  70. {
  71. return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
  72. }
  73. static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
  74. u8 status);
  75. static void make_tx_response(struct xenvif *vif,
  76. struct xen_netif_tx_request *txp,
  77. s8 st);
  78. static inline int tx_work_todo(struct xenvif *vif);
  79. static inline int rx_work_todo(struct xenvif *vif);
  80. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  81. u16 id,
  82. s8 st,
  83. u16 offset,
  84. u16 size,
  85. u16 flags);
  86. static inline unsigned long idx_to_pfn(struct xenvif *vif,
  87. u16 idx)
  88. {
  89. return page_to_pfn(vif->mmap_pages[idx]);
  90. }
  91. static inline unsigned long idx_to_kaddr(struct xenvif *vif,
  92. u16 idx)
  93. {
  94. return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
  95. }
  96. /*
  97. * This is the amount of packet we copy rather than map, so that the
  98. * guest can't fiddle with the contents of the headers while we do
  99. * packet processing on them (netfilter, routing, etc).
  100. */
  101. #define PKT_PROT_LEN (ETH_HLEN + \
  102. VLAN_HLEN + \
  103. sizeof(struct iphdr) + MAX_IPOPTLEN + \
  104. sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
  105. static u16 frag_get_pending_idx(skb_frag_t *frag)
  106. {
  107. return (u16)frag->page_offset;
  108. }
  109. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  110. {
  111. frag->page_offset = pending_idx;
  112. }
  113. static inline pending_ring_idx_t pending_index(unsigned i)
  114. {
  115. return i & (MAX_PENDING_REQS-1);
  116. }
  117. static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
  118. {
  119. return MAX_PENDING_REQS -
  120. vif->pending_prod + vif->pending_cons;
  121. }
  122. static int max_required_rx_slots(struct xenvif *vif)
  123. {
  124. int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
  125. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  126. if (vif->can_sg || vif->gso || vif->gso_prefix)
  127. max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
  128. return max;
  129. }
  130. int xenvif_rx_ring_full(struct xenvif *vif)
  131. {
  132. RING_IDX peek = vif->rx_req_cons_peek;
  133. RING_IDX needed = max_required_rx_slots(vif);
  134. return ((vif->rx.sring->req_prod - peek) < needed) ||
  135. ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
  136. }
  137. int xenvif_must_stop_queue(struct xenvif *vif)
  138. {
  139. if (!xenvif_rx_ring_full(vif))
  140. return 0;
  141. vif->rx.sring->req_event = vif->rx_req_cons_peek +
  142. max_required_rx_slots(vif);
  143. mb(); /* request notification /then/ check the queue */
  144. return xenvif_rx_ring_full(vif);
  145. }
  146. /*
  147. * Returns true if we should start a new receive buffer instead of
  148. * adding 'size' bytes to a buffer which currently contains 'offset'
  149. * bytes.
  150. */
  151. static bool start_new_rx_buffer(int offset, unsigned long size, int head)
  152. {
  153. /* simple case: we have completely filled the current buffer. */
  154. if (offset == MAX_BUFFER_OFFSET)
  155. return true;
  156. /*
  157. * complex case: start a fresh buffer if the current frag
  158. * would overflow the current buffer but only if:
  159. * (i) this frag would fit completely in the next buffer
  160. * and (ii) there is already some data in the current buffer
  161. * and (iii) this is not the head buffer.
  162. *
  163. * Where:
  164. * - (i) stops us splitting a frag into two copies
  165. * unless the frag is too large for a single buffer.
  166. * - (ii) stops us from leaving a buffer pointlessly empty.
  167. * - (iii) stops us leaving the first buffer
  168. * empty. Strictly speaking this is already covered
  169. * by (ii) but is explicitly checked because
  170. * netfront relies on the first buffer being
  171. * non-empty and can crash otherwise.
  172. *
  173. * This means we will effectively linearise small
  174. * frags but do not needlessly split large buffers
  175. * into multiple copies tend to give large frags their
  176. * own buffers as before.
  177. */
  178. if ((offset + size > MAX_BUFFER_OFFSET) &&
  179. (size <= MAX_BUFFER_OFFSET) && offset && !head)
  180. return true;
  181. return false;
  182. }
  183. struct xenvif_count_slot_state {
  184. unsigned long copy_off;
  185. bool head;
  186. };
  187. unsigned int xenvif_count_frag_slots(struct xenvif *vif,
  188. unsigned long offset, unsigned long size,
  189. struct xenvif_count_slot_state *state)
  190. {
  191. unsigned count = 0;
  192. offset &= ~PAGE_MASK;
  193. while (size > 0) {
  194. unsigned long bytes;
  195. bytes = PAGE_SIZE - offset;
  196. if (bytes > size)
  197. bytes = size;
  198. if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
  199. count++;
  200. state->copy_off = 0;
  201. }
  202. if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
  203. bytes = MAX_BUFFER_OFFSET - state->copy_off;
  204. state->copy_off += bytes;
  205. offset += bytes;
  206. size -= bytes;
  207. if (offset == PAGE_SIZE)
  208. offset = 0;
  209. state->head = false;
  210. }
  211. return count;
  212. }
  213. /*
  214. * Figure out how many ring slots we're going to need to send @skb to
  215. * the guest. This function is essentially a dry run of
  216. * xenvif_gop_frag_copy.
  217. */
  218. unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
  219. {
  220. struct xenvif_count_slot_state state;
  221. unsigned int count;
  222. unsigned char *data;
  223. unsigned i;
  224. state.head = true;
  225. state.copy_off = 0;
  226. /* Slot for the first (partial) page of data. */
  227. count = 1;
  228. /* Need a slot for the GSO prefix for GSO extra data? */
  229. if (skb_shinfo(skb)->gso_size)
  230. count++;
  231. data = skb->data;
  232. while (data < skb_tail_pointer(skb)) {
  233. unsigned long offset = offset_in_page(data);
  234. unsigned long size = PAGE_SIZE - offset;
  235. if (data + size > skb_tail_pointer(skb))
  236. size = skb_tail_pointer(skb) - data;
  237. count += xenvif_count_frag_slots(vif, offset, size, &state);
  238. data += size;
  239. }
  240. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  241. unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  242. unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
  243. count += xenvif_count_frag_slots(vif, offset, size, &state);
  244. }
  245. return count;
  246. }
  247. struct netrx_pending_operations {
  248. unsigned copy_prod, copy_cons;
  249. unsigned meta_prod, meta_cons;
  250. struct gnttab_copy *copy;
  251. struct xenvif_rx_meta *meta;
  252. int copy_off;
  253. grant_ref_t copy_gref;
  254. };
  255. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
  256. struct netrx_pending_operations *npo)
  257. {
  258. struct xenvif_rx_meta *meta;
  259. struct xen_netif_rx_request *req;
  260. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  261. meta = npo->meta + npo->meta_prod++;
  262. meta->gso_size = 0;
  263. meta->size = 0;
  264. meta->id = req->id;
  265. npo->copy_off = 0;
  266. npo->copy_gref = req->gref;
  267. return meta;
  268. }
  269. /*
  270. * Set up the grant operations for this fragment. If it's a flipping
  271. * interface, we also set up the unmap request from here.
  272. */
  273. static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
  274. struct netrx_pending_operations *npo,
  275. struct page *page, unsigned long size,
  276. unsigned long offset, int *head)
  277. {
  278. struct gnttab_copy *copy_gop;
  279. struct xenvif_rx_meta *meta;
  280. unsigned long bytes;
  281. /* Data must not cross a page boundary. */
  282. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  283. meta = npo->meta + npo->meta_prod - 1;
  284. /* Skip unused frames from start of page */
  285. page += offset >> PAGE_SHIFT;
  286. offset &= ~PAGE_MASK;
  287. while (size > 0) {
  288. BUG_ON(offset >= PAGE_SIZE);
  289. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  290. bytes = PAGE_SIZE - offset;
  291. if (bytes > size)
  292. bytes = size;
  293. if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
  294. /*
  295. * Netfront requires there to be some data in the head
  296. * buffer.
  297. */
  298. BUG_ON(*head);
  299. meta = get_next_rx_buffer(vif, npo);
  300. }
  301. if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
  302. bytes = MAX_BUFFER_OFFSET - npo->copy_off;
  303. copy_gop = npo->copy + npo->copy_prod++;
  304. copy_gop->flags = GNTCOPY_dest_gref;
  305. copy_gop->len = bytes;
  306. copy_gop->source.domid = DOMID_SELF;
  307. copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
  308. copy_gop->source.offset = offset;
  309. copy_gop->dest.domid = vif->domid;
  310. copy_gop->dest.offset = npo->copy_off;
  311. copy_gop->dest.u.ref = npo->copy_gref;
  312. npo->copy_off += bytes;
  313. meta->size += bytes;
  314. offset += bytes;
  315. size -= bytes;
  316. /* Next frame */
  317. if (offset == PAGE_SIZE && size) {
  318. BUG_ON(!PageCompound(page));
  319. page++;
  320. offset = 0;
  321. }
  322. /* Leave a gap for the GSO descriptor. */
  323. if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
  324. vif->rx.req_cons++;
  325. *head = 0; /* There must be something in this buffer now. */
  326. }
  327. }
  328. /*
  329. * Prepare an SKB to be transmitted to the frontend.
  330. *
  331. * This function is responsible for allocating grant operations, meta
  332. * structures, etc.
  333. *
  334. * It returns the number of meta structures consumed. The number of
  335. * ring slots used is always equal to the number of meta slots used
  336. * plus the number of GSO descriptors used. Currently, we use either
  337. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  338. * frontend-side LRO).
  339. */
  340. static int xenvif_gop_skb(struct sk_buff *skb,
  341. struct netrx_pending_operations *npo)
  342. {
  343. struct xenvif *vif = netdev_priv(skb->dev);
  344. int nr_frags = skb_shinfo(skb)->nr_frags;
  345. int i;
  346. struct xen_netif_rx_request *req;
  347. struct xenvif_rx_meta *meta;
  348. unsigned char *data;
  349. int head = 1;
  350. int old_meta_prod;
  351. old_meta_prod = npo->meta_prod;
  352. /* Set up a GSO prefix descriptor, if necessary */
  353. if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
  354. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  355. meta = npo->meta + npo->meta_prod++;
  356. meta->gso_size = skb_shinfo(skb)->gso_size;
  357. meta->size = 0;
  358. meta->id = req->id;
  359. }
  360. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  361. meta = npo->meta + npo->meta_prod++;
  362. if (!vif->gso_prefix)
  363. meta->gso_size = skb_shinfo(skb)->gso_size;
  364. else
  365. meta->gso_size = 0;
  366. meta->size = 0;
  367. meta->id = req->id;
  368. npo->copy_off = 0;
  369. npo->copy_gref = req->gref;
  370. data = skb->data;
  371. while (data < skb_tail_pointer(skb)) {
  372. unsigned int offset = offset_in_page(data);
  373. unsigned int len = PAGE_SIZE - offset;
  374. if (data + len > skb_tail_pointer(skb))
  375. len = skb_tail_pointer(skb) - data;
  376. xenvif_gop_frag_copy(vif, skb, npo,
  377. virt_to_page(data), len, offset, &head);
  378. data += len;
  379. }
  380. for (i = 0; i < nr_frags; i++) {
  381. xenvif_gop_frag_copy(vif, skb, npo,
  382. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  383. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  384. skb_shinfo(skb)->frags[i].page_offset,
  385. &head);
  386. }
  387. return npo->meta_prod - old_meta_prod;
  388. }
  389. /*
  390. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  391. * used to set up the operations on the top of
  392. * netrx_pending_operations, which have since been done. Check that
  393. * they didn't give any errors and advance over them.
  394. */
  395. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  396. struct netrx_pending_operations *npo)
  397. {
  398. struct gnttab_copy *copy_op;
  399. int status = XEN_NETIF_RSP_OKAY;
  400. int i;
  401. for (i = 0; i < nr_meta_slots; i++) {
  402. copy_op = npo->copy + npo->copy_cons++;
  403. if (copy_op->status != GNTST_okay) {
  404. netdev_dbg(vif->dev,
  405. "Bad status %d from copy to DOM%d.\n",
  406. copy_op->status, vif->domid);
  407. status = XEN_NETIF_RSP_ERROR;
  408. }
  409. }
  410. return status;
  411. }
  412. static void xenvif_add_frag_responses(struct xenvif *vif, int status,
  413. struct xenvif_rx_meta *meta,
  414. int nr_meta_slots)
  415. {
  416. int i;
  417. unsigned long offset;
  418. /* No fragments used */
  419. if (nr_meta_slots <= 1)
  420. return;
  421. nr_meta_slots--;
  422. for (i = 0; i < nr_meta_slots; i++) {
  423. int flags;
  424. if (i == nr_meta_slots - 1)
  425. flags = 0;
  426. else
  427. flags = XEN_NETRXF_more_data;
  428. offset = 0;
  429. make_rx_response(vif, meta[i].id, status, offset,
  430. meta[i].size, flags);
  431. }
  432. }
  433. struct skb_cb_overlay {
  434. int meta_slots_used;
  435. };
  436. static void xenvif_kick_thread(struct xenvif *vif)
  437. {
  438. wake_up(&vif->wq);
  439. }
  440. void xenvif_rx_action(struct xenvif *vif)
  441. {
  442. s8 status;
  443. u16 flags;
  444. struct xen_netif_rx_response *resp;
  445. struct sk_buff_head rxq;
  446. struct sk_buff *skb;
  447. LIST_HEAD(notify);
  448. int ret;
  449. int nr_frags;
  450. int count;
  451. unsigned long offset;
  452. struct skb_cb_overlay *sco;
  453. int need_to_notify = 0;
  454. struct netrx_pending_operations npo = {
  455. .copy = vif->grant_copy_op,
  456. .meta = vif->meta,
  457. };
  458. skb_queue_head_init(&rxq);
  459. count = 0;
  460. while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
  461. vif = netdev_priv(skb->dev);
  462. nr_frags = skb_shinfo(skb)->nr_frags;
  463. sco = (struct skb_cb_overlay *)skb->cb;
  464. sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
  465. count += nr_frags + 1;
  466. __skb_queue_tail(&rxq, skb);
  467. /* Filled the batch queue? */
  468. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  469. if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
  470. break;
  471. }
  472. BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
  473. if (!npo.copy_prod)
  474. return;
  475. BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
  476. gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
  477. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  478. sco = (struct skb_cb_overlay *)skb->cb;
  479. vif = netdev_priv(skb->dev);
  480. if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
  481. resp = RING_GET_RESPONSE(&vif->rx,
  482. vif->rx.rsp_prod_pvt++);
  483. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  484. resp->offset = vif->meta[npo.meta_cons].gso_size;
  485. resp->id = vif->meta[npo.meta_cons].id;
  486. resp->status = sco->meta_slots_used;
  487. npo.meta_cons++;
  488. sco->meta_slots_used--;
  489. }
  490. vif->dev->stats.tx_bytes += skb->len;
  491. vif->dev->stats.tx_packets++;
  492. status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
  493. if (sco->meta_slots_used == 1)
  494. flags = 0;
  495. else
  496. flags = XEN_NETRXF_more_data;
  497. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  498. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  499. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  500. /* remote but checksummed. */
  501. flags |= XEN_NETRXF_data_validated;
  502. offset = 0;
  503. resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
  504. status, offset,
  505. vif->meta[npo.meta_cons].size,
  506. flags);
  507. if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
  508. struct xen_netif_extra_info *gso =
  509. (struct xen_netif_extra_info *)
  510. RING_GET_RESPONSE(&vif->rx,
  511. vif->rx.rsp_prod_pvt++);
  512. resp->flags |= XEN_NETRXF_extra_info;
  513. gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
  514. gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
  515. gso->u.gso.pad = 0;
  516. gso->u.gso.features = 0;
  517. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  518. gso->flags = 0;
  519. }
  520. xenvif_add_frag_responses(vif, status,
  521. vif->meta + npo.meta_cons + 1,
  522. sco->meta_slots_used);
  523. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
  524. if (ret)
  525. need_to_notify = 1;
  526. xenvif_notify_tx_completion(vif);
  527. npo.meta_cons += sco->meta_slots_used;
  528. dev_kfree_skb(skb);
  529. }
  530. if (need_to_notify)
  531. notify_remote_via_irq(vif->rx_irq);
  532. /* More work to do? */
  533. if (!skb_queue_empty(&vif->rx_queue))
  534. xenvif_kick_thread(vif);
  535. }
  536. void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
  537. {
  538. skb_queue_tail(&vif->rx_queue, skb);
  539. xenvif_kick_thread(vif);
  540. }
  541. void xenvif_check_rx_xenvif(struct xenvif *vif)
  542. {
  543. int more_to_do;
  544. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
  545. if (more_to_do)
  546. napi_schedule(&vif->napi);
  547. }
  548. static void tx_add_credit(struct xenvif *vif)
  549. {
  550. unsigned long max_burst, max_credit;
  551. /*
  552. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  553. * Otherwise the interface can seize up due to insufficient credit.
  554. */
  555. max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
  556. max_burst = min(max_burst, 131072UL);
  557. max_burst = max(max_burst, vif->credit_bytes);
  558. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  559. max_credit = vif->remaining_credit + vif->credit_bytes;
  560. if (max_credit < vif->remaining_credit)
  561. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  562. vif->remaining_credit = min(max_credit, max_burst);
  563. }
  564. static void tx_credit_callback(unsigned long data)
  565. {
  566. struct xenvif *vif = (struct xenvif *)data;
  567. tx_add_credit(vif);
  568. xenvif_check_rx_xenvif(vif);
  569. }
  570. static void xenvif_tx_err(struct xenvif *vif,
  571. struct xen_netif_tx_request *txp, RING_IDX end)
  572. {
  573. RING_IDX cons = vif->tx.req_cons;
  574. do {
  575. make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
  576. if (cons == end)
  577. break;
  578. txp = RING_GET_REQUEST(&vif->tx, cons++);
  579. } while (1);
  580. vif->tx.req_cons = cons;
  581. }
  582. static void xenvif_fatal_tx_err(struct xenvif *vif)
  583. {
  584. netdev_err(vif->dev, "fatal error; disabling device\n");
  585. xenvif_carrier_off(vif);
  586. }
  587. static int xenvif_count_requests(struct xenvif *vif,
  588. struct xen_netif_tx_request *first,
  589. struct xen_netif_tx_request *txp,
  590. int work_to_do)
  591. {
  592. RING_IDX cons = vif->tx.req_cons;
  593. int slots = 0;
  594. int drop_err = 0;
  595. int more_data;
  596. if (!(first->flags & XEN_NETTXF_more_data))
  597. return 0;
  598. do {
  599. struct xen_netif_tx_request dropped_tx = { 0 };
  600. if (slots >= work_to_do) {
  601. netdev_err(vif->dev,
  602. "Asked for %d slots but exceeds this limit\n",
  603. work_to_do);
  604. xenvif_fatal_tx_err(vif);
  605. return -ENODATA;
  606. }
  607. /* This guest is really using too many slots and
  608. * considered malicious.
  609. */
  610. if (unlikely(slots >= fatal_skb_slots)) {
  611. netdev_err(vif->dev,
  612. "Malicious frontend using %d slots, threshold %u\n",
  613. slots, fatal_skb_slots);
  614. xenvif_fatal_tx_err(vif);
  615. return -E2BIG;
  616. }
  617. /* Xen network protocol had implicit dependency on
  618. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  619. * the historical MAX_SKB_FRAGS value 18 to honor the
  620. * same behavior as before. Any packet using more than
  621. * 18 slots but less than fatal_skb_slots slots is
  622. * dropped
  623. */
  624. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  625. if (net_ratelimit())
  626. netdev_dbg(vif->dev,
  627. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  628. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  629. drop_err = -E2BIG;
  630. }
  631. if (drop_err)
  632. txp = &dropped_tx;
  633. memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
  634. sizeof(*txp));
  635. /* If the guest submitted a frame >= 64 KiB then
  636. * first->size overflowed and following slots will
  637. * appear to be larger than the frame.
  638. *
  639. * This cannot be fatal error as there are buggy
  640. * frontends that do this.
  641. *
  642. * Consume all slots and drop the packet.
  643. */
  644. if (!drop_err && txp->size > first->size) {
  645. if (net_ratelimit())
  646. netdev_dbg(vif->dev,
  647. "Invalid tx request, slot size %u > remaining size %u\n",
  648. txp->size, first->size);
  649. drop_err = -EIO;
  650. }
  651. first->size -= txp->size;
  652. slots++;
  653. if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
  654. netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
  655. txp->offset, txp->size);
  656. xenvif_fatal_tx_err(vif);
  657. return -EINVAL;
  658. }
  659. more_data = txp->flags & XEN_NETTXF_more_data;
  660. if (!drop_err)
  661. txp++;
  662. } while (more_data);
  663. if (drop_err) {
  664. xenvif_tx_err(vif, first, cons + slots);
  665. return drop_err;
  666. }
  667. return slots;
  668. }
  669. static struct page *xenvif_alloc_page(struct xenvif *vif,
  670. u16 pending_idx)
  671. {
  672. struct page *page;
  673. page = alloc_page(GFP_ATOMIC|__GFP_COLD);
  674. if (!page)
  675. return NULL;
  676. vif->mmap_pages[pending_idx] = page;
  677. return page;
  678. }
  679. static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
  680. struct sk_buff *skb,
  681. struct xen_netif_tx_request *txp,
  682. struct gnttab_copy *gop)
  683. {
  684. struct skb_shared_info *shinfo = skb_shinfo(skb);
  685. skb_frag_t *frags = shinfo->frags;
  686. u16 pending_idx = *((u16 *)skb->data);
  687. u16 head_idx = 0;
  688. int slot, start;
  689. struct page *page;
  690. pending_ring_idx_t index, start_idx = 0;
  691. uint16_t dst_offset;
  692. unsigned int nr_slots;
  693. struct pending_tx_info *first = NULL;
  694. /* At this point shinfo->nr_frags is in fact the number of
  695. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  696. */
  697. nr_slots = shinfo->nr_frags;
  698. /* Skip first skb fragment if it is on same page as header fragment. */
  699. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  700. /* Coalesce tx requests, at this point the packet passed in
  701. * should be <= 64K. Any packets larger than 64K have been
  702. * handled in xenvif_count_requests().
  703. */
  704. for (shinfo->nr_frags = slot = start; slot < nr_slots;
  705. shinfo->nr_frags++) {
  706. struct pending_tx_info *pending_tx_info =
  707. vif->pending_tx_info;
  708. page = alloc_page(GFP_ATOMIC|__GFP_COLD);
  709. if (!page)
  710. goto err;
  711. dst_offset = 0;
  712. first = NULL;
  713. while (dst_offset < PAGE_SIZE && slot < nr_slots) {
  714. gop->flags = GNTCOPY_source_gref;
  715. gop->source.u.ref = txp->gref;
  716. gop->source.domid = vif->domid;
  717. gop->source.offset = txp->offset;
  718. gop->dest.domid = DOMID_SELF;
  719. gop->dest.offset = dst_offset;
  720. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  721. if (dst_offset + txp->size > PAGE_SIZE) {
  722. /* This page can only merge a portion
  723. * of tx request. Do not increment any
  724. * pointer / counter here. The txp
  725. * will be dealt with in future
  726. * rounds, eventually hitting the
  727. * `else` branch.
  728. */
  729. gop->len = PAGE_SIZE - dst_offset;
  730. txp->offset += gop->len;
  731. txp->size -= gop->len;
  732. dst_offset += gop->len; /* quit loop */
  733. } else {
  734. /* This tx request can be merged in the page */
  735. gop->len = txp->size;
  736. dst_offset += gop->len;
  737. index = pending_index(vif->pending_cons++);
  738. pending_idx = vif->pending_ring[index];
  739. memcpy(&pending_tx_info[pending_idx].req, txp,
  740. sizeof(*txp));
  741. /* Poison these fields, corresponding
  742. * fields for head tx req will be set
  743. * to correct values after the loop.
  744. */
  745. vif->mmap_pages[pending_idx] = (void *)(~0UL);
  746. pending_tx_info[pending_idx].head =
  747. INVALID_PENDING_RING_IDX;
  748. if (!first) {
  749. first = &pending_tx_info[pending_idx];
  750. start_idx = index;
  751. head_idx = pending_idx;
  752. }
  753. txp++;
  754. slot++;
  755. }
  756. gop++;
  757. }
  758. first->req.offset = 0;
  759. first->req.size = dst_offset;
  760. first->head = start_idx;
  761. vif->mmap_pages[head_idx] = page;
  762. frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
  763. }
  764. BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
  765. return gop;
  766. err:
  767. /* Unwind, freeing all pages and sending error responses. */
  768. while (shinfo->nr_frags-- > start) {
  769. xenvif_idx_release(vif,
  770. frag_get_pending_idx(&frags[shinfo->nr_frags]),
  771. XEN_NETIF_RSP_ERROR);
  772. }
  773. /* The head too, if necessary. */
  774. if (start)
  775. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  776. return NULL;
  777. }
  778. static int xenvif_tx_check_gop(struct xenvif *vif,
  779. struct sk_buff *skb,
  780. struct gnttab_copy **gopp)
  781. {
  782. struct gnttab_copy *gop = *gopp;
  783. u16 pending_idx = *((u16 *)skb->data);
  784. struct skb_shared_info *shinfo = skb_shinfo(skb);
  785. struct pending_tx_info *tx_info;
  786. int nr_frags = shinfo->nr_frags;
  787. int i, err, start;
  788. u16 peek; /* peek into next tx request */
  789. /* Check status of header. */
  790. err = gop->status;
  791. if (unlikely(err))
  792. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  793. /* Skip first skb fragment if it is on same page as header fragment. */
  794. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  795. for (i = start; i < nr_frags; i++) {
  796. int j, newerr;
  797. pending_ring_idx_t head;
  798. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  799. tx_info = &vif->pending_tx_info[pending_idx];
  800. head = tx_info->head;
  801. /* Check error status: if okay then remember grant handle. */
  802. do {
  803. newerr = (++gop)->status;
  804. if (newerr)
  805. break;
  806. peek = vif->pending_ring[pending_index(++head)];
  807. } while (!pending_tx_is_head(vif, peek));
  808. if (likely(!newerr)) {
  809. /* Had a previous error? Invalidate this fragment. */
  810. if (unlikely(err))
  811. xenvif_idx_release(vif, pending_idx,
  812. XEN_NETIF_RSP_OKAY);
  813. continue;
  814. }
  815. /* Error on this fragment: respond to client with an error. */
  816. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  817. /* Not the first error? Preceding frags already invalidated. */
  818. if (err)
  819. continue;
  820. /* First error: invalidate header and preceding fragments. */
  821. pending_idx = *((u16 *)skb->data);
  822. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
  823. for (j = start; j < i; j++) {
  824. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  825. xenvif_idx_release(vif, pending_idx,
  826. XEN_NETIF_RSP_OKAY);
  827. }
  828. /* Remember the error: invalidate all subsequent fragments. */
  829. err = newerr;
  830. }
  831. *gopp = gop + 1;
  832. return err;
  833. }
  834. static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
  835. {
  836. struct skb_shared_info *shinfo = skb_shinfo(skb);
  837. int nr_frags = shinfo->nr_frags;
  838. int i;
  839. for (i = 0; i < nr_frags; i++) {
  840. skb_frag_t *frag = shinfo->frags + i;
  841. struct xen_netif_tx_request *txp;
  842. struct page *page;
  843. u16 pending_idx;
  844. pending_idx = frag_get_pending_idx(frag);
  845. txp = &vif->pending_tx_info[pending_idx].req;
  846. page = virt_to_page(idx_to_kaddr(vif, pending_idx));
  847. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  848. skb->len += txp->size;
  849. skb->data_len += txp->size;
  850. skb->truesize += txp->size;
  851. /* Take an extra reference to offset xenvif_idx_release */
  852. get_page(vif->mmap_pages[pending_idx]);
  853. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
  854. }
  855. }
  856. static int xenvif_get_extras(struct xenvif *vif,
  857. struct xen_netif_extra_info *extras,
  858. int work_to_do)
  859. {
  860. struct xen_netif_extra_info extra;
  861. RING_IDX cons = vif->tx.req_cons;
  862. do {
  863. if (unlikely(work_to_do-- <= 0)) {
  864. netdev_err(vif->dev, "Missing extra info\n");
  865. xenvif_fatal_tx_err(vif);
  866. return -EBADR;
  867. }
  868. memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
  869. sizeof(extra));
  870. if (unlikely(!extra.type ||
  871. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  872. vif->tx.req_cons = ++cons;
  873. netdev_err(vif->dev,
  874. "Invalid extra type: %d\n", extra.type);
  875. xenvif_fatal_tx_err(vif);
  876. return -EINVAL;
  877. }
  878. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  879. vif->tx.req_cons = ++cons;
  880. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  881. return work_to_do;
  882. }
  883. static int xenvif_set_skb_gso(struct xenvif *vif,
  884. struct sk_buff *skb,
  885. struct xen_netif_extra_info *gso)
  886. {
  887. if (!gso->u.gso.size) {
  888. netdev_err(vif->dev, "GSO size must not be zero.\n");
  889. xenvif_fatal_tx_err(vif);
  890. return -EINVAL;
  891. }
  892. /* Currently only TCPv4 S.O. is supported. */
  893. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
  894. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  895. xenvif_fatal_tx_err(vif);
  896. return -EINVAL;
  897. }
  898. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  899. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  900. /* Header must be checked, and gso_segs computed. */
  901. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  902. skb_shinfo(skb)->gso_segs = 0;
  903. return 0;
  904. }
  905. static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
  906. {
  907. struct iphdr *iph;
  908. int err = -EPROTO;
  909. int recalculate_partial_csum = 0;
  910. /*
  911. * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  912. * peers can fail to set NETRXF_csum_blank when sending a GSO
  913. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  914. * recalculate the partial checksum.
  915. */
  916. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  917. vif->rx_gso_checksum_fixup++;
  918. skb->ip_summed = CHECKSUM_PARTIAL;
  919. recalculate_partial_csum = 1;
  920. }
  921. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  922. if (skb->ip_summed != CHECKSUM_PARTIAL)
  923. return 0;
  924. if (skb->protocol != htons(ETH_P_IP))
  925. goto out;
  926. iph = (void *)skb->data;
  927. switch (iph->protocol) {
  928. case IPPROTO_TCP:
  929. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  930. offsetof(struct tcphdr, check)))
  931. goto out;
  932. if (recalculate_partial_csum) {
  933. struct tcphdr *tcph = tcp_hdr(skb);
  934. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  935. skb->len - iph->ihl*4,
  936. IPPROTO_TCP, 0);
  937. }
  938. break;
  939. case IPPROTO_UDP:
  940. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  941. offsetof(struct udphdr, check)))
  942. goto out;
  943. if (recalculate_partial_csum) {
  944. struct udphdr *udph = udp_hdr(skb);
  945. udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  946. skb->len - iph->ihl*4,
  947. IPPROTO_UDP, 0);
  948. }
  949. break;
  950. default:
  951. if (net_ratelimit())
  952. netdev_err(vif->dev,
  953. "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
  954. iph->protocol);
  955. goto out;
  956. }
  957. err = 0;
  958. out:
  959. return err;
  960. }
  961. static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
  962. {
  963. u64 now = get_jiffies_64();
  964. u64 next_credit = vif->credit_window_start +
  965. msecs_to_jiffies(vif->credit_usec / 1000);
  966. /* Timer could already be pending in rare cases. */
  967. if (timer_pending(&vif->credit_timeout))
  968. return true;
  969. /* Passed the point where we can replenish credit? */
  970. if (time_after_eq64(now, next_credit)) {
  971. vif->credit_window_start = now;
  972. tx_add_credit(vif);
  973. }
  974. /* Still too big to send right now? Set a callback. */
  975. if (size > vif->remaining_credit) {
  976. vif->credit_timeout.data =
  977. (unsigned long)vif;
  978. vif->credit_timeout.function =
  979. tx_credit_callback;
  980. mod_timer(&vif->credit_timeout,
  981. next_credit);
  982. vif->credit_window_start = next_credit;
  983. return true;
  984. }
  985. return false;
  986. }
  987. static unsigned xenvif_tx_build_gops(struct xenvif *vif)
  988. {
  989. struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
  990. struct sk_buff *skb;
  991. int ret;
  992. while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
  993. < MAX_PENDING_REQS)) {
  994. struct xen_netif_tx_request txreq;
  995. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  996. struct page *page;
  997. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  998. u16 pending_idx;
  999. RING_IDX idx;
  1000. int work_to_do;
  1001. unsigned int data_len;
  1002. pending_ring_idx_t index;
  1003. if (vif->tx.sring->req_prod - vif->tx.req_cons >
  1004. XEN_NETIF_TX_RING_SIZE) {
  1005. netdev_err(vif->dev,
  1006. "Impossible number of requests. "
  1007. "req_prod %d, req_cons %d, size %ld\n",
  1008. vif->tx.sring->req_prod, vif->tx.req_cons,
  1009. XEN_NETIF_TX_RING_SIZE);
  1010. xenvif_fatal_tx_err(vif);
  1011. continue;
  1012. }
  1013. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
  1014. if (!work_to_do)
  1015. break;
  1016. idx = vif->tx.req_cons;
  1017. rmb(); /* Ensure that we see the request before we copy it. */
  1018. memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
  1019. /* Credit-based scheduling. */
  1020. if (txreq.size > vif->remaining_credit &&
  1021. tx_credit_exceeded(vif, txreq.size))
  1022. break;
  1023. vif->remaining_credit -= txreq.size;
  1024. work_to_do--;
  1025. vif->tx.req_cons = ++idx;
  1026. memset(extras, 0, sizeof(extras));
  1027. if (txreq.flags & XEN_NETTXF_extra_info) {
  1028. work_to_do = xenvif_get_extras(vif, extras,
  1029. work_to_do);
  1030. idx = vif->tx.req_cons;
  1031. if (unlikely(work_to_do < 0))
  1032. break;
  1033. }
  1034. ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
  1035. if (unlikely(ret < 0))
  1036. break;
  1037. idx += ret;
  1038. if (unlikely(txreq.size < ETH_HLEN)) {
  1039. netdev_dbg(vif->dev,
  1040. "Bad packet size: %d\n", txreq.size);
  1041. xenvif_tx_err(vif, &txreq, idx);
  1042. break;
  1043. }
  1044. /* No crossing a page as the payload mustn't fragment. */
  1045. if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
  1046. netdev_err(vif->dev,
  1047. "txreq.offset: %x, size: %u, end: %lu\n",
  1048. txreq.offset, txreq.size,
  1049. (txreq.offset&~PAGE_MASK) + txreq.size);
  1050. xenvif_fatal_tx_err(vif);
  1051. break;
  1052. }
  1053. index = pending_index(vif->pending_cons);
  1054. pending_idx = vif->pending_ring[index];
  1055. data_len = (txreq.size > PKT_PROT_LEN &&
  1056. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1057. PKT_PROT_LEN : txreq.size;
  1058. skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
  1059. GFP_ATOMIC | __GFP_NOWARN);
  1060. if (unlikely(skb == NULL)) {
  1061. netdev_dbg(vif->dev,
  1062. "Can't allocate a skb in start_xmit.\n");
  1063. xenvif_tx_err(vif, &txreq, idx);
  1064. break;
  1065. }
  1066. /* Packets passed to netif_rx() must have some headroom. */
  1067. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  1068. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1069. struct xen_netif_extra_info *gso;
  1070. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1071. if (xenvif_set_skb_gso(vif, skb, gso)) {
  1072. /* Failure in xenvif_set_skb_gso is fatal. */
  1073. kfree_skb(skb);
  1074. break;
  1075. }
  1076. }
  1077. /* XXX could copy straight to head */
  1078. page = xenvif_alloc_page(vif, pending_idx);
  1079. if (!page) {
  1080. kfree_skb(skb);
  1081. xenvif_tx_err(vif, &txreq, idx);
  1082. break;
  1083. }
  1084. gop->source.u.ref = txreq.gref;
  1085. gop->source.domid = vif->domid;
  1086. gop->source.offset = txreq.offset;
  1087. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  1088. gop->dest.domid = DOMID_SELF;
  1089. gop->dest.offset = txreq.offset;
  1090. gop->len = txreq.size;
  1091. gop->flags = GNTCOPY_source_gref;
  1092. gop++;
  1093. memcpy(&vif->pending_tx_info[pending_idx].req,
  1094. &txreq, sizeof(txreq));
  1095. vif->pending_tx_info[pending_idx].head = index;
  1096. *((u16 *)skb->data) = pending_idx;
  1097. __skb_put(skb, data_len);
  1098. skb_shinfo(skb)->nr_frags = ret;
  1099. if (data_len < txreq.size) {
  1100. skb_shinfo(skb)->nr_frags++;
  1101. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1102. pending_idx);
  1103. } else {
  1104. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1105. INVALID_PENDING_IDX);
  1106. }
  1107. vif->pending_cons++;
  1108. request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
  1109. if (request_gop == NULL) {
  1110. kfree_skb(skb);
  1111. xenvif_tx_err(vif, &txreq, idx);
  1112. break;
  1113. }
  1114. gop = request_gop;
  1115. __skb_queue_tail(&vif->tx_queue, skb);
  1116. vif->tx.req_cons = idx;
  1117. if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
  1118. break;
  1119. }
  1120. return gop - vif->tx_copy_ops;
  1121. }
  1122. static int xenvif_tx_submit(struct xenvif *vif, int budget)
  1123. {
  1124. struct gnttab_copy *gop = vif->tx_copy_ops;
  1125. struct sk_buff *skb;
  1126. int work_done = 0;
  1127. while (work_done < budget &&
  1128. (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
  1129. struct xen_netif_tx_request *txp;
  1130. u16 pending_idx;
  1131. unsigned data_len;
  1132. pending_idx = *((u16 *)skb->data);
  1133. txp = &vif->pending_tx_info[pending_idx].req;
  1134. /* Check the remap error code. */
  1135. if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
  1136. netdev_dbg(vif->dev, "netback grant failed.\n");
  1137. skb_shinfo(skb)->nr_frags = 0;
  1138. kfree_skb(skb);
  1139. continue;
  1140. }
  1141. data_len = skb->len;
  1142. memcpy(skb->data,
  1143. (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
  1144. data_len);
  1145. if (data_len < txp->size) {
  1146. /* Append the packet payload as a fragment. */
  1147. txp->offset += data_len;
  1148. txp->size -= data_len;
  1149. } else {
  1150. /* Schedule a response immediately. */
  1151. xenvif_idx_release(vif, pending_idx,
  1152. XEN_NETIF_RSP_OKAY);
  1153. }
  1154. if (txp->flags & XEN_NETTXF_csum_blank)
  1155. skb->ip_summed = CHECKSUM_PARTIAL;
  1156. else if (txp->flags & XEN_NETTXF_data_validated)
  1157. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1158. xenvif_fill_frags(vif, skb);
  1159. /*
  1160. * If the initial fragment was < PKT_PROT_LEN then
  1161. * pull through some bytes from the other fragments to
  1162. * increase the linear region to PKT_PROT_LEN bytes.
  1163. */
  1164. if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
  1165. int target = min_t(int, skb->len, PKT_PROT_LEN);
  1166. __pskb_pull_tail(skb, target - skb_headlen(skb));
  1167. }
  1168. skb->dev = vif->dev;
  1169. skb->protocol = eth_type_trans(skb, skb->dev);
  1170. skb_reset_network_header(skb);
  1171. if (checksum_setup(vif, skb)) {
  1172. netdev_dbg(vif->dev,
  1173. "Can't setup checksum in net_tx_action\n");
  1174. kfree_skb(skb);
  1175. continue;
  1176. }
  1177. skb_probe_transport_header(skb, 0);
  1178. vif->dev->stats.rx_bytes += skb->len;
  1179. vif->dev->stats.rx_packets++;
  1180. work_done++;
  1181. netif_receive_skb(skb);
  1182. }
  1183. return work_done;
  1184. }
  1185. /* Called after netfront has transmitted */
  1186. int xenvif_tx_action(struct xenvif *vif, int budget)
  1187. {
  1188. unsigned nr_gops;
  1189. int work_done;
  1190. if (unlikely(!tx_work_todo(vif)))
  1191. return 0;
  1192. nr_gops = xenvif_tx_build_gops(vif);
  1193. if (nr_gops == 0)
  1194. return 0;
  1195. gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
  1196. work_done = xenvif_tx_submit(vif, nr_gops);
  1197. return work_done;
  1198. }
  1199. static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
  1200. u8 status)
  1201. {
  1202. struct pending_tx_info *pending_tx_info;
  1203. pending_ring_idx_t head;
  1204. u16 peek; /* peek into next tx request */
  1205. BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
  1206. /* Already complete? */
  1207. if (vif->mmap_pages[pending_idx] == NULL)
  1208. return;
  1209. pending_tx_info = &vif->pending_tx_info[pending_idx];
  1210. head = pending_tx_info->head;
  1211. BUG_ON(!pending_tx_is_head(vif, head));
  1212. BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
  1213. do {
  1214. pending_ring_idx_t index;
  1215. pending_ring_idx_t idx = pending_index(head);
  1216. u16 info_idx = vif->pending_ring[idx];
  1217. pending_tx_info = &vif->pending_tx_info[info_idx];
  1218. make_tx_response(vif, &pending_tx_info->req, status);
  1219. /* Setting any number other than
  1220. * INVALID_PENDING_RING_IDX indicates this slot is
  1221. * starting a new packet / ending a previous packet.
  1222. */
  1223. pending_tx_info->head = 0;
  1224. index = pending_index(vif->pending_prod++);
  1225. vif->pending_ring[index] = vif->pending_ring[info_idx];
  1226. peek = vif->pending_ring[pending_index(++head)];
  1227. } while (!pending_tx_is_head(vif, peek));
  1228. put_page(vif->mmap_pages[pending_idx]);
  1229. vif->mmap_pages[pending_idx] = NULL;
  1230. }
  1231. static void make_tx_response(struct xenvif *vif,
  1232. struct xen_netif_tx_request *txp,
  1233. s8 st)
  1234. {
  1235. RING_IDX i = vif->tx.rsp_prod_pvt;
  1236. struct xen_netif_tx_response *resp;
  1237. int notify;
  1238. resp = RING_GET_RESPONSE(&vif->tx, i);
  1239. resp->id = txp->id;
  1240. resp->status = st;
  1241. if (txp->flags & XEN_NETTXF_extra_info)
  1242. RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1243. vif->tx.rsp_prod_pvt = ++i;
  1244. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
  1245. if (notify)
  1246. notify_remote_via_irq(vif->tx_irq);
  1247. }
  1248. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  1249. u16 id,
  1250. s8 st,
  1251. u16 offset,
  1252. u16 size,
  1253. u16 flags)
  1254. {
  1255. RING_IDX i = vif->rx.rsp_prod_pvt;
  1256. struct xen_netif_rx_response *resp;
  1257. resp = RING_GET_RESPONSE(&vif->rx, i);
  1258. resp->offset = offset;
  1259. resp->flags = flags;
  1260. resp->id = id;
  1261. resp->status = (s16)size;
  1262. if (st < 0)
  1263. resp->status = (s16)st;
  1264. vif->rx.rsp_prod_pvt = ++i;
  1265. return resp;
  1266. }
  1267. static inline int rx_work_todo(struct xenvif *vif)
  1268. {
  1269. return !skb_queue_empty(&vif->rx_queue);
  1270. }
  1271. static inline int tx_work_todo(struct xenvif *vif)
  1272. {
  1273. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
  1274. (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
  1275. < MAX_PENDING_REQS))
  1276. return 1;
  1277. return 0;
  1278. }
  1279. void xenvif_unmap_frontend_rings(struct xenvif *vif)
  1280. {
  1281. if (vif->tx.sring)
  1282. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1283. vif->tx.sring);
  1284. if (vif->rx.sring)
  1285. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1286. vif->rx.sring);
  1287. }
  1288. int xenvif_map_frontend_rings(struct xenvif *vif,
  1289. grant_ref_t tx_ring_ref,
  1290. grant_ref_t rx_ring_ref)
  1291. {
  1292. void *addr;
  1293. struct xen_netif_tx_sring *txs;
  1294. struct xen_netif_rx_sring *rxs;
  1295. int err = -ENOMEM;
  1296. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1297. tx_ring_ref, &addr);
  1298. if (err)
  1299. goto err;
  1300. txs = (struct xen_netif_tx_sring *)addr;
  1301. BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
  1302. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1303. rx_ring_ref, &addr);
  1304. if (err)
  1305. goto err;
  1306. rxs = (struct xen_netif_rx_sring *)addr;
  1307. BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  1308. vif->rx_req_cons_peek = 0;
  1309. return 0;
  1310. err:
  1311. xenvif_unmap_frontend_rings(vif);
  1312. return err;
  1313. }
  1314. int xenvif_kthread(void *data)
  1315. {
  1316. struct xenvif *vif = data;
  1317. while (!kthread_should_stop()) {
  1318. wait_event_interruptible(vif->wq,
  1319. rx_work_todo(vif) ||
  1320. kthread_should_stop());
  1321. if (kthread_should_stop())
  1322. break;
  1323. if (rx_work_todo(vif))
  1324. xenvif_rx_action(vif);
  1325. cond_resched();
  1326. }
  1327. return 0;
  1328. }
  1329. static int __init netback_init(void)
  1330. {
  1331. int rc = 0;
  1332. if (!xen_domain())
  1333. return -ENODEV;
  1334. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1335. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1336. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1337. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1338. }
  1339. rc = xenvif_xenbus_init();
  1340. if (rc)
  1341. goto failed_init;
  1342. return 0;
  1343. failed_init:
  1344. return rc;
  1345. }
  1346. module_init(netback_init);
  1347. static void __exit netback_fini(void)
  1348. {
  1349. xenvif_xenbus_fini();
  1350. }
  1351. module_exit(netback_fini);
  1352. MODULE_LICENSE("Dual BSD/GPL");
  1353. MODULE_ALIAS("xen-backend:vif");