netback.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <net/tcp.h>
  39. #include <xen/xen.h>
  40. #include <xen/events.h>
  41. #include <xen/interface/memory.h>
  42. #include <asm/xen/hypercall.h>
  43. #include <asm/xen/page.h>
  44. /*
  45. * This is the maximum slots a skb can have. If a guest sends a skb
  46. * which exceeds this limit it is considered malicious.
  47. */
  48. #define FATAL_SKB_SLOTS_DEFAULT 20
  49. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  50. module_param(fatal_skb_slots, uint, 0444);
  51. /*
  52. * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
  53. * the maximum slots a valid packet can use. Now this value is defined
  54. * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
  55. * all backend.
  56. */
  57. #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
  58. typedef unsigned int pending_ring_idx_t;
  59. #define INVALID_PENDING_RING_IDX (~0U)
  60. struct pending_tx_info {
  61. struct xen_netif_tx_request req; /* coalesced tx request */
  62. struct xenvif *vif;
  63. pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
  64. * if it is head of one or more tx
  65. * reqs
  66. */
  67. };
  68. struct netbk_rx_meta {
  69. int id;
  70. int size;
  71. int gso_size;
  72. };
  73. #define MAX_PENDING_REQS 256
  74. /* Discriminate from any valid pending_idx value. */
  75. #define INVALID_PENDING_IDX 0xFFFF
  76. #define MAX_BUFFER_OFFSET PAGE_SIZE
  77. /* extra field used in struct page */
  78. union page_ext {
  79. struct {
  80. #if BITS_PER_LONG < 64
  81. #define IDX_WIDTH 8
  82. #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
  83. unsigned int group:GROUP_WIDTH;
  84. unsigned int idx:IDX_WIDTH;
  85. #else
  86. unsigned int group, idx;
  87. #endif
  88. } e;
  89. void *mapping;
  90. };
  91. struct xen_netbk {
  92. wait_queue_head_t wq;
  93. struct task_struct *task;
  94. struct sk_buff_head rx_queue;
  95. struct sk_buff_head tx_queue;
  96. struct timer_list net_timer;
  97. struct page *mmap_pages[MAX_PENDING_REQS];
  98. pending_ring_idx_t pending_prod;
  99. pending_ring_idx_t pending_cons;
  100. struct list_head net_schedule_list;
  101. /* Protect the net_schedule_list in netif. */
  102. spinlock_t net_schedule_list_lock;
  103. atomic_t netfront_count;
  104. struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
  105. /* Coalescing tx requests before copying makes number of grant
  106. * copy ops greater or equal to number of slots required. In
  107. * worst case a tx request consumes 2 gnttab_copy.
  108. */
  109. struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
  110. u16 pending_ring[MAX_PENDING_REQS];
  111. /*
  112. * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
  113. * head/fragment page uses 2 copy operations because it
  114. * straddles two buffers in the frontend.
  115. */
  116. struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
  117. struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
  118. };
  119. static struct xen_netbk *xen_netbk;
  120. static int xen_netbk_group_nr;
  121. /*
  122. * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
  123. * one or more merged tx requests, otherwise it is the continuation of
  124. * previous tx request.
  125. */
  126. static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
  127. {
  128. return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
  129. }
  130. void xen_netbk_add_xenvif(struct xenvif *vif)
  131. {
  132. int i;
  133. int min_netfront_count;
  134. int min_group = 0;
  135. struct xen_netbk *netbk;
  136. min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
  137. for (i = 0; i < xen_netbk_group_nr; i++) {
  138. int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
  139. if (netfront_count < min_netfront_count) {
  140. min_group = i;
  141. min_netfront_count = netfront_count;
  142. }
  143. }
  144. netbk = &xen_netbk[min_group];
  145. vif->netbk = netbk;
  146. atomic_inc(&netbk->netfront_count);
  147. }
  148. void xen_netbk_remove_xenvif(struct xenvif *vif)
  149. {
  150. struct xen_netbk *netbk = vif->netbk;
  151. vif->netbk = NULL;
  152. atomic_dec(&netbk->netfront_count);
  153. }
  154. static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
  155. u8 status);
  156. static void make_tx_response(struct xenvif *vif,
  157. struct xen_netif_tx_request *txp,
  158. s8 st);
  159. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  160. u16 id,
  161. s8 st,
  162. u16 offset,
  163. u16 size,
  164. u16 flags);
  165. static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
  166. u16 idx)
  167. {
  168. return page_to_pfn(netbk->mmap_pages[idx]);
  169. }
  170. static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
  171. u16 idx)
  172. {
  173. return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
  174. }
  175. /* extra field used in struct page */
  176. static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
  177. unsigned int idx)
  178. {
  179. unsigned int group = netbk - xen_netbk;
  180. union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
  181. BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
  182. pg->mapping = ext.mapping;
  183. }
  184. static int get_page_ext(struct page *pg,
  185. unsigned int *pgroup, unsigned int *pidx)
  186. {
  187. union page_ext ext = { .mapping = pg->mapping };
  188. struct xen_netbk *netbk;
  189. unsigned int group, idx;
  190. group = ext.e.group - 1;
  191. if (group < 0 || group >= xen_netbk_group_nr)
  192. return 0;
  193. netbk = &xen_netbk[group];
  194. idx = ext.e.idx;
  195. if ((idx < 0) || (idx >= MAX_PENDING_REQS))
  196. return 0;
  197. if (netbk->mmap_pages[idx] != pg)
  198. return 0;
  199. *pgroup = group;
  200. *pidx = idx;
  201. return 1;
  202. }
  203. /*
  204. * This is the amount of packet we copy rather than map, so that the
  205. * guest can't fiddle with the contents of the headers while we do
  206. * packet processing on them (netfilter, routing, etc).
  207. */
  208. #define PKT_PROT_LEN (ETH_HLEN + \
  209. VLAN_HLEN + \
  210. sizeof(struct iphdr) + MAX_IPOPTLEN + \
  211. sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
  212. static u16 frag_get_pending_idx(skb_frag_t *frag)
  213. {
  214. return (u16)frag->page_offset;
  215. }
  216. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  217. {
  218. frag->page_offset = pending_idx;
  219. }
  220. static inline pending_ring_idx_t pending_index(unsigned i)
  221. {
  222. return i & (MAX_PENDING_REQS-1);
  223. }
  224. static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
  225. {
  226. return MAX_PENDING_REQS -
  227. netbk->pending_prod + netbk->pending_cons;
  228. }
  229. static void xen_netbk_kick_thread(struct xen_netbk *netbk)
  230. {
  231. wake_up(&netbk->wq);
  232. }
  233. static int max_required_rx_slots(struct xenvif *vif)
  234. {
  235. int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
  236. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  237. if (vif->can_sg || vif->gso || vif->gso_prefix)
  238. max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
  239. return max;
  240. }
  241. int xen_netbk_rx_ring_full(struct xenvif *vif)
  242. {
  243. RING_IDX peek = vif->rx_req_cons_peek;
  244. RING_IDX needed = max_required_rx_slots(vif);
  245. return ((vif->rx.sring->req_prod - peek) < needed) ||
  246. ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
  247. }
  248. int xen_netbk_must_stop_queue(struct xenvif *vif)
  249. {
  250. if (!xen_netbk_rx_ring_full(vif))
  251. return 0;
  252. vif->rx.sring->req_event = vif->rx_req_cons_peek +
  253. max_required_rx_slots(vif);
  254. mb(); /* request notification /then/ check the queue */
  255. return xen_netbk_rx_ring_full(vif);
  256. }
  257. /*
  258. * Returns true if we should start a new receive buffer instead of
  259. * adding 'size' bytes to a buffer which currently contains 'offset'
  260. * bytes.
  261. */
  262. static bool start_new_rx_buffer(int offset, unsigned long size, int head)
  263. {
  264. /* simple case: we have completely filled the current buffer. */
  265. if (offset == MAX_BUFFER_OFFSET)
  266. return true;
  267. /*
  268. * complex case: start a fresh buffer if the current frag
  269. * would overflow the current buffer but only if:
  270. * (i) this frag would fit completely in the next buffer
  271. * and (ii) there is already some data in the current buffer
  272. * and (iii) this is not the head buffer.
  273. *
  274. * Where:
  275. * - (i) stops us splitting a frag into two copies
  276. * unless the frag is too large for a single buffer.
  277. * - (ii) stops us from leaving a buffer pointlessly empty.
  278. * - (iii) stops us leaving the first buffer
  279. * empty. Strictly speaking this is already covered
  280. * by (ii) but is explicitly checked because
  281. * netfront relies on the first buffer being
  282. * non-empty and can crash otherwise.
  283. *
  284. * This means we will effectively linearise small
  285. * frags but do not needlessly split large buffers
  286. * into multiple copies tend to give large frags their
  287. * own buffers as before.
  288. */
  289. if ((offset + size > MAX_BUFFER_OFFSET) &&
  290. (size <= MAX_BUFFER_OFFSET) && offset && !head)
  291. return true;
  292. return false;
  293. }
  294. /*
  295. * Figure out how many ring slots we're going to need to send @skb to
  296. * the guest. This function is essentially a dry run of
  297. * netbk_gop_frag_copy.
  298. */
  299. unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
  300. {
  301. unsigned int count;
  302. int i, copy_off;
  303. count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
  304. copy_off = skb_headlen(skb) % PAGE_SIZE;
  305. if (skb_shinfo(skb)->gso_size)
  306. count++;
  307. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  308. unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  309. unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
  310. unsigned long bytes;
  311. offset &= ~PAGE_MASK;
  312. while (size > 0) {
  313. BUG_ON(offset >= PAGE_SIZE);
  314. BUG_ON(copy_off > MAX_BUFFER_OFFSET);
  315. bytes = PAGE_SIZE - offset;
  316. if (bytes > size)
  317. bytes = size;
  318. if (start_new_rx_buffer(copy_off, bytes, 0)) {
  319. count++;
  320. copy_off = 0;
  321. }
  322. if (copy_off + bytes > MAX_BUFFER_OFFSET)
  323. bytes = MAX_BUFFER_OFFSET - copy_off;
  324. copy_off += bytes;
  325. offset += bytes;
  326. size -= bytes;
  327. if (offset == PAGE_SIZE)
  328. offset = 0;
  329. }
  330. }
  331. return count;
  332. }
  333. struct netrx_pending_operations {
  334. unsigned copy_prod, copy_cons;
  335. unsigned meta_prod, meta_cons;
  336. struct gnttab_copy *copy;
  337. struct netbk_rx_meta *meta;
  338. int copy_off;
  339. grant_ref_t copy_gref;
  340. };
  341. static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
  342. struct netrx_pending_operations *npo)
  343. {
  344. struct netbk_rx_meta *meta;
  345. struct xen_netif_rx_request *req;
  346. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  347. meta = npo->meta + npo->meta_prod++;
  348. meta->gso_size = 0;
  349. meta->size = 0;
  350. meta->id = req->id;
  351. npo->copy_off = 0;
  352. npo->copy_gref = req->gref;
  353. return meta;
  354. }
  355. /*
  356. * Set up the grant operations for this fragment. If it's a flipping
  357. * interface, we also set up the unmap request from here.
  358. */
  359. static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
  360. struct netrx_pending_operations *npo,
  361. struct page *page, unsigned long size,
  362. unsigned long offset, int *head)
  363. {
  364. struct gnttab_copy *copy_gop;
  365. struct netbk_rx_meta *meta;
  366. /*
  367. * These variables are used iff get_page_ext returns true,
  368. * in which case they are guaranteed to be initialized.
  369. */
  370. unsigned int uninitialized_var(group), uninitialized_var(idx);
  371. int foreign = get_page_ext(page, &group, &idx);
  372. unsigned long bytes;
  373. /* Data must not cross a page boundary. */
  374. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  375. meta = npo->meta + npo->meta_prod - 1;
  376. /* Skip unused frames from start of page */
  377. page += offset >> PAGE_SHIFT;
  378. offset &= ~PAGE_MASK;
  379. while (size > 0) {
  380. BUG_ON(offset >= PAGE_SIZE);
  381. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  382. bytes = PAGE_SIZE - offset;
  383. if (bytes > size)
  384. bytes = size;
  385. if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
  386. /*
  387. * Netfront requires there to be some data in the head
  388. * buffer.
  389. */
  390. BUG_ON(*head);
  391. meta = get_next_rx_buffer(vif, npo);
  392. }
  393. if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
  394. bytes = MAX_BUFFER_OFFSET - npo->copy_off;
  395. copy_gop = npo->copy + npo->copy_prod++;
  396. copy_gop->flags = GNTCOPY_dest_gref;
  397. if (foreign) {
  398. struct xen_netbk *netbk = &xen_netbk[group];
  399. struct pending_tx_info *src_pend;
  400. src_pend = &netbk->pending_tx_info[idx];
  401. copy_gop->source.domid = src_pend->vif->domid;
  402. copy_gop->source.u.ref = src_pend->req.gref;
  403. copy_gop->flags |= GNTCOPY_source_gref;
  404. } else {
  405. void *vaddr = page_address(page);
  406. copy_gop->source.domid = DOMID_SELF;
  407. copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
  408. }
  409. copy_gop->source.offset = offset;
  410. copy_gop->dest.domid = vif->domid;
  411. copy_gop->dest.offset = npo->copy_off;
  412. copy_gop->dest.u.ref = npo->copy_gref;
  413. copy_gop->len = bytes;
  414. npo->copy_off += bytes;
  415. meta->size += bytes;
  416. offset += bytes;
  417. size -= bytes;
  418. /* Next frame */
  419. if (offset == PAGE_SIZE && size) {
  420. BUG_ON(!PageCompound(page));
  421. page++;
  422. offset = 0;
  423. }
  424. /* Leave a gap for the GSO descriptor. */
  425. if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
  426. vif->rx.req_cons++;
  427. *head = 0; /* There must be something in this buffer now. */
  428. }
  429. }
  430. /*
  431. * Prepare an SKB to be transmitted to the frontend.
  432. *
  433. * This function is responsible for allocating grant operations, meta
  434. * structures, etc.
  435. *
  436. * It returns the number of meta structures consumed. The number of
  437. * ring slots used is always equal to the number of meta slots used
  438. * plus the number of GSO descriptors used. Currently, we use either
  439. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  440. * frontend-side LRO).
  441. */
  442. static int netbk_gop_skb(struct sk_buff *skb,
  443. struct netrx_pending_operations *npo)
  444. {
  445. struct xenvif *vif = netdev_priv(skb->dev);
  446. int nr_frags = skb_shinfo(skb)->nr_frags;
  447. int i;
  448. struct xen_netif_rx_request *req;
  449. struct netbk_rx_meta *meta;
  450. unsigned char *data;
  451. int head = 1;
  452. int old_meta_prod;
  453. old_meta_prod = npo->meta_prod;
  454. /* Set up a GSO prefix descriptor, if necessary */
  455. if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
  456. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  457. meta = npo->meta + npo->meta_prod++;
  458. meta->gso_size = skb_shinfo(skb)->gso_size;
  459. meta->size = 0;
  460. meta->id = req->id;
  461. }
  462. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  463. meta = npo->meta + npo->meta_prod++;
  464. if (!vif->gso_prefix)
  465. meta->gso_size = skb_shinfo(skb)->gso_size;
  466. else
  467. meta->gso_size = 0;
  468. meta->size = 0;
  469. meta->id = req->id;
  470. npo->copy_off = 0;
  471. npo->copy_gref = req->gref;
  472. data = skb->data;
  473. while (data < skb_tail_pointer(skb)) {
  474. unsigned int offset = offset_in_page(data);
  475. unsigned int len = PAGE_SIZE - offset;
  476. if (data + len > skb_tail_pointer(skb))
  477. len = skb_tail_pointer(skb) - data;
  478. netbk_gop_frag_copy(vif, skb, npo,
  479. virt_to_page(data), len, offset, &head);
  480. data += len;
  481. }
  482. for (i = 0; i < nr_frags; i++) {
  483. netbk_gop_frag_copy(vif, skb, npo,
  484. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  485. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  486. skb_shinfo(skb)->frags[i].page_offset,
  487. &head);
  488. }
  489. return npo->meta_prod - old_meta_prod;
  490. }
  491. /*
  492. * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
  493. * used to set up the operations on the top of
  494. * netrx_pending_operations, which have since been done. Check that
  495. * they didn't give any errors and advance over them.
  496. */
  497. static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
  498. struct netrx_pending_operations *npo)
  499. {
  500. struct gnttab_copy *copy_op;
  501. int status = XEN_NETIF_RSP_OKAY;
  502. int i;
  503. for (i = 0; i < nr_meta_slots; i++) {
  504. copy_op = npo->copy + npo->copy_cons++;
  505. if (copy_op->status != GNTST_okay) {
  506. netdev_dbg(vif->dev,
  507. "Bad status %d from copy to DOM%d.\n",
  508. copy_op->status, vif->domid);
  509. status = XEN_NETIF_RSP_ERROR;
  510. }
  511. }
  512. return status;
  513. }
  514. static void netbk_add_frag_responses(struct xenvif *vif, int status,
  515. struct netbk_rx_meta *meta,
  516. int nr_meta_slots)
  517. {
  518. int i;
  519. unsigned long offset;
  520. /* No fragments used */
  521. if (nr_meta_slots <= 1)
  522. return;
  523. nr_meta_slots--;
  524. for (i = 0; i < nr_meta_slots; i++) {
  525. int flags;
  526. if (i == nr_meta_slots - 1)
  527. flags = 0;
  528. else
  529. flags = XEN_NETRXF_more_data;
  530. offset = 0;
  531. make_rx_response(vif, meta[i].id, status, offset,
  532. meta[i].size, flags);
  533. }
  534. }
  535. struct skb_cb_overlay {
  536. int meta_slots_used;
  537. };
  538. static void xen_netbk_rx_action(struct xen_netbk *netbk)
  539. {
  540. struct xenvif *vif = NULL, *tmp;
  541. s8 status;
  542. u16 irq, flags;
  543. struct xen_netif_rx_response *resp;
  544. struct sk_buff_head rxq;
  545. struct sk_buff *skb;
  546. LIST_HEAD(notify);
  547. int ret;
  548. int nr_frags;
  549. int count;
  550. unsigned long offset;
  551. struct skb_cb_overlay *sco;
  552. struct netrx_pending_operations npo = {
  553. .copy = netbk->grant_copy_op,
  554. .meta = netbk->meta,
  555. };
  556. skb_queue_head_init(&rxq);
  557. count = 0;
  558. while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
  559. vif = netdev_priv(skb->dev);
  560. nr_frags = skb_shinfo(skb)->nr_frags;
  561. sco = (struct skb_cb_overlay *)skb->cb;
  562. sco->meta_slots_used = netbk_gop_skb(skb, &npo);
  563. count += nr_frags + 1;
  564. __skb_queue_tail(&rxq, skb);
  565. /* Filled the batch queue? */
  566. /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
  567. if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
  568. break;
  569. }
  570. BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
  571. if (!npo.copy_prod)
  572. return;
  573. BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
  574. gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
  575. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  576. sco = (struct skb_cb_overlay *)skb->cb;
  577. vif = netdev_priv(skb->dev);
  578. if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
  579. resp = RING_GET_RESPONSE(&vif->rx,
  580. vif->rx.rsp_prod_pvt++);
  581. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  582. resp->offset = netbk->meta[npo.meta_cons].gso_size;
  583. resp->id = netbk->meta[npo.meta_cons].id;
  584. resp->status = sco->meta_slots_used;
  585. npo.meta_cons++;
  586. sco->meta_slots_used--;
  587. }
  588. vif->dev->stats.tx_bytes += skb->len;
  589. vif->dev->stats.tx_packets++;
  590. status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
  591. if (sco->meta_slots_used == 1)
  592. flags = 0;
  593. else
  594. flags = XEN_NETRXF_more_data;
  595. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  596. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  597. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  598. /* remote but checksummed. */
  599. flags |= XEN_NETRXF_data_validated;
  600. offset = 0;
  601. resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
  602. status, offset,
  603. netbk->meta[npo.meta_cons].size,
  604. flags);
  605. if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
  606. struct xen_netif_extra_info *gso =
  607. (struct xen_netif_extra_info *)
  608. RING_GET_RESPONSE(&vif->rx,
  609. vif->rx.rsp_prod_pvt++);
  610. resp->flags |= XEN_NETRXF_extra_info;
  611. gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
  612. gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
  613. gso->u.gso.pad = 0;
  614. gso->u.gso.features = 0;
  615. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  616. gso->flags = 0;
  617. }
  618. netbk_add_frag_responses(vif, status,
  619. netbk->meta + npo.meta_cons + 1,
  620. sco->meta_slots_used);
  621. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
  622. irq = vif->irq;
  623. if (ret && list_empty(&vif->notify_list))
  624. list_add_tail(&vif->notify_list, &notify);
  625. xenvif_notify_tx_completion(vif);
  626. xenvif_put(vif);
  627. npo.meta_cons += sco->meta_slots_used;
  628. dev_kfree_skb(skb);
  629. }
  630. list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
  631. notify_remote_via_irq(vif->irq);
  632. list_del_init(&vif->notify_list);
  633. }
  634. /* More work to do? */
  635. if (!skb_queue_empty(&netbk->rx_queue) &&
  636. !timer_pending(&netbk->net_timer))
  637. xen_netbk_kick_thread(netbk);
  638. }
  639. void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
  640. {
  641. struct xen_netbk *netbk = vif->netbk;
  642. skb_queue_tail(&netbk->rx_queue, skb);
  643. xen_netbk_kick_thread(netbk);
  644. }
  645. static void xen_netbk_alarm(unsigned long data)
  646. {
  647. struct xen_netbk *netbk = (struct xen_netbk *)data;
  648. xen_netbk_kick_thread(netbk);
  649. }
  650. static int __on_net_schedule_list(struct xenvif *vif)
  651. {
  652. return !list_empty(&vif->schedule_list);
  653. }
  654. /* Must be called with net_schedule_list_lock held */
  655. static void remove_from_net_schedule_list(struct xenvif *vif)
  656. {
  657. if (likely(__on_net_schedule_list(vif))) {
  658. list_del_init(&vif->schedule_list);
  659. xenvif_put(vif);
  660. }
  661. }
  662. static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
  663. {
  664. struct xenvif *vif = NULL;
  665. spin_lock_irq(&netbk->net_schedule_list_lock);
  666. if (list_empty(&netbk->net_schedule_list))
  667. goto out;
  668. vif = list_first_entry(&netbk->net_schedule_list,
  669. struct xenvif, schedule_list);
  670. if (!vif)
  671. goto out;
  672. xenvif_get(vif);
  673. remove_from_net_schedule_list(vif);
  674. out:
  675. spin_unlock_irq(&netbk->net_schedule_list_lock);
  676. return vif;
  677. }
  678. void xen_netbk_schedule_xenvif(struct xenvif *vif)
  679. {
  680. unsigned long flags;
  681. struct xen_netbk *netbk = vif->netbk;
  682. if (__on_net_schedule_list(vif))
  683. goto kick;
  684. spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
  685. if (!__on_net_schedule_list(vif) &&
  686. likely(xenvif_schedulable(vif))) {
  687. list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
  688. xenvif_get(vif);
  689. }
  690. spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
  691. kick:
  692. smp_mb();
  693. if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
  694. !list_empty(&netbk->net_schedule_list))
  695. xen_netbk_kick_thread(netbk);
  696. }
  697. void xen_netbk_deschedule_xenvif(struct xenvif *vif)
  698. {
  699. struct xen_netbk *netbk = vif->netbk;
  700. spin_lock_irq(&netbk->net_schedule_list_lock);
  701. remove_from_net_schedule_list(vif);
  702. spin_unlock_irq(&netbk->net_schedule_list_lock);
  703. }
  704. void xen_netbk_check_rx_xenvif(struct xenvif *vif)
  705. {
  706. int more_to_do;
  707. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
  708. if (more_to_do)
  709. xen_netbk_schedule_xenvif(vif);
  710. }
  711. static void tx_add_credit(struct xenvif *vif)
  712. {
  713. unsigned long max_burst, max_credit;
  714. /*
  715. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  716. * Otherwise the interface can seize up due to insufficient credit.
  717. */
  718. max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
  719. max_burst = min(max_burst, 131072UL);
  720. max_burst = max(max_burst, vif->credit_bytes);
  721. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  722. max_credit = vif->remaining_credit + vif->credit_bytes;
  723. if (max_credit < vif->remaining_credit)
  724. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  725. vif->remaining_credit = min(max_credit, max_burst);
  726. }
  727. static void tx_credit_callback(unsigned long data)
  728. {
  729. struct xenvif *vif = (struct xenvif *)data;
  730. tx_add_credit(vif);
  731. xen_netbk_check_rx_xenvif(vif);
  732. }
  733. static void netbk_tx_err(struct xenvif *vif,
  734. struct xen_netif_tx_request *txp, RING_IDX end)
  735. {
  736. RING_IDX cons = vif->tx.req_cons;
  737. do {
  738. make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
  739. if (cons == end)
  740. break;
  741. txp = RING_GET_REQUEST(&vif->tx, cons++);
  742. } while (1);
  743. vif->tx.req_cons = cons;
  744. xen_netbk_check_rx_xenvif(vif);
  745. xenvif_put(vif);
  746. }
  747. static void netbk_fatal_tx_err(struct xenvif *vif)
  748. {
  749. netdev_err(vif->dev, "fatal error; disabling device\n");
  750. xenvif_carrier_off(vif);
  751. xenvif_put(vif);
  752. }
  753. static int netbk_count_requests(struct xenvif *vif,
  754. struct xen_netif_tx_request *first,
  755. struct xen_netif_tx_request *txp,
  756. int work_to_do)
  757. {
  758. RING_IDX cons = vif->tx.req_cons;
  759. int slots = 0;
  760. int drop_err = 0;
  761. int more_data;
  762. if (!(first->flags & XEN_NETTXF_more_data))
  763. return 0;
  764. do {
  765. struct xen_netif_tx_request dropped_tx = { 0 };
  766. if (slots >= work_to_do) {
  767. netdev_err(vif->dev,
  768. "Asked for %d slots but exceeds this limit\n",
  769. work_to_do);
  770. netbk_fatal_tx_err(vif);
  771. return -ENODATA;
  772. }
  773. /* This guest is really using too many slots and
  774. * considered malicious.
  775. */
  776. if (unlikely(slots >= fatal_skb_slots)) {
  777. netdev_err(vif->dev,
  778. "Malicious frontend using %d slots, threshold %u\n",
  779. slots, fatal_skb_slots);
  780. netbk_fatal_tx_err(vif);
  781. return -E2BIG;
  782. }
  783. /* Xen network protocol had implicit dependency on
  784. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  785. * the historical MAX_SKB_FRAGS value 18 to honor the
  786. * same behavior as before. Any packet using more than
  787. * 18 slots but less than fatal_skb_slots slots is
  788. * dropped
  789. */
  790. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  791. if (net_ratelimit())
  792. netdev_dbg(vif->dev,
  793. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  794. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  795. drop_err = -E2BIG;
  796. }
  797. if (drop_err)
  798. txp = &dropped_tx;
  799. memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
  800. sizeof(*txp));
  801. /* If the guest submitted a frame >= 64 KiB then
  802. * first->size overflowed and following slots will
  803. * appear to be larger than the frame.
  804. *
  805. * This cannot be fatal error as there are buggy
  806. * frontends that do this.
  807. *
  808. * Consume all slots and drop the packet.
  809. */
  810. if (!drop_err && txp->size > first->size) {
  811. if (net_ratelimit())
  812. netdev_dbg(vif->dev,
  813. "Invalid tx request, slot size %u > remaining size %u\n",
  814. txp->size, first->size);
  815. drop_err = -EIO;
  816. }
  817. first->size -= txp->size;
  818. slots++;
  819. if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
  820. netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
  821. txp->offset, txp->size);
  822. netbk_fatal_tx_err(vif);
  823. return -EINVAL;
  824. }
  825. more_data = txp->flags & XEN_NETTXF_more_data;
  826. if (!drop_err)
  827. txp++;
  828. } while (more_data);
  829. if (drop_err) {
  830. netbk_tx_err(vif, first, cons + slots);
  831. return drop_err;
  832. }
  833. return slots;
  834. }
  835. static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
  836. u16 pending_idx)
  837. {
  838. struct page *page;
  839. page = alloc_page(GFP_KERNEL|__GFP_COLD);
  840. if (!page)
  841. return NULL;
  842. set_page_ext(page, netbk, pending_idx);
  843. netbk->mmap_pages[pending_idx] = page;
  844. return page;
  845. }
  846. static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
  847. struct xenvif *vif,
  848. struct sk_buff *skb,
  849. struct xen_netif_tx_request *txp,
  850. struct gnttab_copy *gop)
  851. {
  852. struct skb_shared_info *shinfo = skb_shinfo(skb);
  853. skb_frag_t *frags = shinfo->frags;
  854. u16 pending_idx = *((u16 *)skb->data);
  855. u16 head_idx = 0;
  856. int slot, start;
  857. struct page *page;
  858. pending_ring_idx_t index, start_idx = 0;
  859. uint16_t dst_offset;
  860. unsigned int nr_slots;
  861. struct pending_tx_info *first = NULL;
  862. /* At this point shinfo->nr_frags is in fact the number of
  863. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  864. */
  865. nr_slots = shinfo->nr_frags;
  866. /* Skip first skb fragment if it is on same page as header fragment. */
  867. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  868. /* Coalesce tx requests, at this point the packet passed in
  869. * should be <= 64K. Any packets larger than 64K have been
  870. * handled in netbk_count_requests().
  871. */
  872. for (shinfo->nr_frags = slot = start; slot < nr_slots;
  873. shinfo->nr_frags++) {
  874. struct pending_tx_info *pending_tx_info =
  875. netbk->pending_tx_info;
  876. page = alloc_page(GFP_KERNEL|__GFP_COLD);
  877. if (!page)
  878. goto err;
  879. dst_offset = 0;
  880. first = NULL;
  881. while (dst_offset < PAGE_SIZE && slot < nr_slots) {
  882. gop->flags = GNTCOPY_source_gref;
  883. gop->source.u.ref = txp->gref;
  884. gop->source.domid = vif->domid;
  885. gop->source.offset = txp->offset;
  886. gop->dest.domid = DOMID_SELF;
  887. gop->dest.offset = dst_offset;
  888. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  889. if (dst_offset + txp->size > PAGE_SIZE) {
  890. /* This page can only merge a portion
  891. * of tx request. Do not increment any
  892. * pointer / counter here. The txp
  893. * will be dealt with in future
  894. * rounds, eventually hitting the
  895. * `else` branch.
  896. */
  897. gop->len = PAGE_SIZE - dst_offset;
  898. txp->offset += gop->len;
  899. txp->size -= gop->len;
  900. dst_offset += gop->len; /* quit loop */
  901. } else {
  902. /* This tx request can be merged in the page */
  903. gop->len = txp->size;
  904. dst_offset += gop->len;
  905. index = pending_index(netbk->pending_cons++);
  906. pending_idx = netbk->pending_ring[index];
  907. memcpy(&pending_tx_info[pending_idx].req, txp,
  908. sizeof(*txp));
  909. xenvif_get(vif);
  910. pending_tx_info[pending_idx].vif = vif;
  911. /* Poison these fields, corresponding
  912. * fields for head tx req will be set
  913. * to correct values after the loop.
  914. */
  915. netbk->mmap_pages[pending_idx] = (void *)(~0UL);
  916. pending_tx_info[pending_idx].head =
  917. INVALID_PENDING_RING_IDX;
  918. if (!first) {
  919. first = &pending_tx_info[pending_idx];
  920. start_idx = index;
  921. head_idx = pending_idx;
  922. }
  923. txp++;
  924. slot++;
  925. }
  926. gop++;
  927. }
  928. first->req.offset = 0;
  929. first->req.size = dst_offset;
  930. first->head = start_idx;
  931. set_page_ext(page, netbk, head_idx);
  932. netbk->mmap_pages[head_idx] = page;
  933. frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
  934. }
  935. BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
  936. return gop;
  937. err:
  938. /* Unwind, freeing all pages and sending error responses. */
  939. while (shinfo->nr_frags-- > start) {
  940. xen_netbk_idx_release(netbk,
  941. frag_get_pending_idx(&frags[shinfo->nr_frags]),
  942. XEN_NETIF_RSP_ERROR);
  943. }
  944. /* The head too, if necessary. */
  945. if (start)
  946. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
  947. return NULL;
  948. }
  949. static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
  950. struct sk_buff *skb,
  951. struct gnttab_copy **gopp)
  952. {
  953. struct gnttab_copy *gop = *gopp;
  954. u16 pending_idx = *((u16 *)skb->data);
  955. struct skb_shared_info *shinfo = skb_shinfo(skb);
  956. struct pending_tx_info *tx_info;
  957. int nr_frags = shinfo->nr_frags;
  958. int i, err, start;
  959. u16 peek; /* peek into next tx request */
  960. /* Check status of header. */
  961. err = gop->status;
  962. if (unlikely(err))
  963. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
  964. /* Skip first skb fragment if it is on same page as header fragment. */
  965. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  966. for (i = start; i < nr_frags; i++) {
  967. int j, newerr;
  968. pending_ring_idx_t head;
  969. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  970. tx_info = &netbk->pending_tx_info[pending_idx];
  971. head = tx_info->head;
  972. /* Check error status: if okay then remember grant handle. */
  973. do {
  974. newerr = (++gop)->status;
  975. if (newerr)
  976. break;
  977. peek = netbk->pending_ring[pending_index(++head)];
  978. } while (!pending_tx_is_head(netbk, peek));
  979. if (likely(!newerr)) {
  980. /* Had a previous error? Invalidate this fragment. */
  981. if (unlikely(err))
  982. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
  983. continue;
  984. }
  985. /* Error on this fragment: respond to client with an error. */
  986. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
  987. /* Not the first error? Preceding frags already invalidated. */
  988. if (err)
  989. continue;
  990. /* First error: invalidate header and preceding fragments. */
  991. pending_idx = *((u16 *)skb->data);
  992. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
  993. for (j = start; j < i; j++) {
  994. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  995. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
  996. }
  997. /* Remember the error: invalidate all subsequent fragments. */
  998. err = newerr;
  999. }
  1000. *gopp = gop + 1;
  1001. return err;
  1002. }
  1003. static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
  1004. {
  1005. struct skb_shared_info *shinfo = skb_shinfo(skb);
  1006. int nr_frags = shinfo->nr_frags;
  1007. int i;
  1008. for (i = 0; i < nr_frags; i++) {
  1009. skb_frag_t *frag = shinfo->frags + i;
  1010. struct xen_netif_tx_request *txp;
  1011. struct page *page;
  1012. u16 pending_idx;
  1013. pending_idx = frag_get_pending_idx(frag);
  1014. txp = &netbk->pending_tx_info[pending_idx].req;
  1015. page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
  1016. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  1017. skb->len += txp->size;
  1018. skb->data_len += txp->size;
  1019. skb->truesize += txp->size;
  1020. /* Take an extra reference to offset xen_netbk_idx_release */
  1021. get_page(netbk->mmap_pages[pending_idx]);
  1022. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
  1023. }
  1024. }
  1025. static int xen_netbk_get_extras(struct xenvif *vif,
  1026. struct xen_netif_extra_info *extras,
  1027. int work_to_do)
  1028. {
  1029. struct xen_netif_extra_info extra;
  1030. RING_IDX cons = vif->tx.req_cons;
  1031. do {
  1032. if (unlikely(work_to_do-- <= 0)) {
  1033. netdev_err(vif->dev, "Missing extra info\n");
  1034. netbk_fatal_tx_err(vif);
  1035. return -EBADR;
  1036. }
  1037. memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
  1038. sizeof(extra));
  1039. if (unlikely(!extra.type ||
  1040. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  1041. vif->tx.req_cons = ++cons;
  1042. netdev_err(vif->dev,
  1043. "Invalid extra type: %d\n", extra.type);
  1044. netbk_fatal_tx_err(vif);
  1045. return -EINVAL;
  1046. }
  1047. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  1048. vif->tx.req_cons = ++cons;
  1049. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  1050. return work_to_do;
  1051. }
  1052. static int netbk_set_skb_gso(struct xenvif *vif,
  1053. struct sk_buff *skb,
  1054. struct xen_netif_extra_info *gso)
  1055. {
  1056. if (!gso->u.gso.size) {
  1057. netdev_err(vif->dev, "GSO size must not be zero.\n");
  1058. netbk_fatal_tx_err(vif);
  1059. return -EINVAL;
  1060. }
  1061. /* Currently only TCPv4 S.O. is supported. */
  1062. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
  1063. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  1064. netbk_fatal_tx_err(vif);
  1065. return -EINVAL;
  1066. }
  1067. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  1068. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  1069. /* Header must be checked, and gso_segs computed. */
  1070. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  1071. skb_shinfo(skb)->gso_segs = 0;
  1072. return 0;
  1073. }
  1074. static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
  1075. {
  1076. struct iphdr *iph;
  1077. int err = -EPROTO;
  1078. int recalculate_partial_csum = 0;
  1079. /*
  1080. * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  1081. * peers can fail to set NETRXF_csum_blank when sending a GSO
  1082. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  1083. * recalculate the partial checksum.
  1084. */
  1085. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  1086. vif->rx_gso_checksum_fixup++;
  1087. skb->ip_summed = CHECKSUM_PARTIAL;
  1088. recalculate_partial_csum = 1;
  1089. }
  1090. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  1091. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1092. return 0;
  1093. if (skb->protocol != htons(ETH_P_IP))
  1094. goto out;
  1095. iph = (void *)skb->data;
  1096. switch (iph->protocol) {
  1097. case IPPROTO_TCP:
  1098. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  1099. offsetof(struct tcphdr, check)))
  1100. goto out;
  1101. if (recalculate_partial_csum) {
  1102. struct tcphdr *tcph = tcp_hdr(skb);
  1103. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1104. skb->len - iph->ihl*4,
  1105. IPPROTO_TCP, 0);
  1106. }
  1107. break;
  1108. case IPPROTO_UDP:
  1109. if (!skb_partial_csum_set(skb, 4 * iph->ihl,
  1110. offsetof(struct udphdr, check)))
  1111. goto out;
  1112. if (recalculate_partial_csum) {
  1113. struct udphdr *udph = udp_hdr(skb);
  1114. udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1115. skb->len - iph->ihl*4,
  1116. IPPROTO_UDP, 0);
  1117. }
  1118. break;
  1119. default:
  1120. if (net_ratelimit())
  1121. netdev_err(vif->dev,
  1122. "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
  1123. iph->protocol);
  1124. goto out;
  1125. }
  1126. err = 0;
  1127. out:
  1128. return err;
  1129. }
  1130. static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
  1131. {
  1132. unsigned long now = jiffies;
  1133. unsigned long next_credit =
  1134. vif->credit_timeout.expires +
  1135. msecs_to_jiffies(vif->credit_usec / 1000);
  1136. /* Timer could already be pending in rare cases. */
  1137. if (timer_pending(&vif->credit_timeout))
  1138. return true;
  1139. /* Passed the point where we can replenish credit? */
  1140. if (time_after_eq(now, next_credit)) {
  1141. vif->credit_timeout.expires = now;
  1142. tx_add_credit(vif);
  1143. }
  1144. /* Still too big to send right now? Set a callback. */
  1145. if (size > vif->remaining_credit) {
  1146. vif->credit_timeout.data =
  1147. (unsigned long)vif;
  1148. vif->credit_timeout.function =
  1149. tx_credit_callback;
  1150. mod_timer(&vif->credit_timeout,
  1151. next_credit);
  1152. return true;
  1153. }
  1154. return false;
  1155. }
  1156. static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
  1157. {
  1158. struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
  1159. struct sk_buff *skb;
  1160. int ret;
  1161. while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
  1162. < MAX_PENDING_REQS) &&
  1163. !list_empty(&netbk->net_schedule_list)) {
  1164. struct xenvif *vif;
  1165. struct xen_netif_tx_request txreq;
  1166. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1167. struct page *page;
  1168. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1169. u16 pending_idx;
  1170. RING_IDX idx;
  1171. int work_to_do;
  1172. unsigned int data_len;
  1173. pending_ring_idx_t index;
  1174. /* Get a netif from the list with work to do. */
  1175. vif = poll_net_schedule_list(netbk);
  1176. /* This can sometimes happen because the test of
  1177. * list_empty(net_schedule_list) at the top of the
  1178. * loop is unlocked. Just go back and have another
  1179. * look.
  1180. */
  1181. if (!vif)
  1182. continue;
  1183. if (vif->tx.sring->req_prod - vif->tx.req_cons >
  1184. XEN_NETIF_TX_RING_SIZE) {
  1185. netdev_err(vif->dev,
  1186. "Impossible number of requests. "
  1187. "req_prod %d, req_cons %d, size %ld\n",
  1188. vif->tx.sring->req_prod, vif->tx.req_cons,
  1189. XEN_NETIF_TX_RING_SIZE);
  1190. netbk_fatal_tx_err(vif);
  1191. continue;
  1192. }
  1193. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
  1194. if (!work_to_do) {
  1195. xenvif_put(vif);
  1196. continue;
  1197. }
  1198. idx = vif->tx.req_cons;
  1199. rmb(); /* Ensure that we see the request before we copy it. */
  1200. memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
  1201. /* Credit-based scheduling. */
  1202. if (txreq.size > vif->remaining_credit &&
  1203. tx_credit_exceeded(vif, txreq.size)) {
  1204. xenvif_put(vif);
  1205. continue;
  1206. }
  1207. vif->remaining_credit -= txreq.size;
  1208. work_to_do--;
  1209. vif->tx.req_cons = ++idx;
  1210. memset(extras, 0, sizeof(extras));
  1211. if (txreq.flags & XEN_NETTXF_extra_info) {
  1212. work_to_do = xen_netbk_get_extras(vif, extras,
  1213. work_to_do);
  1214. idx = vif->tx.req_cons;
  1215. if (unlikely(work_to_do < 0))
  1216. continue;
  1217. }
  1218. ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
  1219. if (unlikely(ret < 0))
  1220. continue;
  1221. idx += ret;
  1222. if (unlikely(txreq.size < ETH_HLEN)) {
  1223. netdev_dbg(vif->dev,
  1224. "Bad packet size: %d\n", txreq.size);
  1225. netbk_tx_err(vif, &txreq, idx);
  1226. continue;
  1227. }
  1228. /* No crossing a page as the payload mustn't fragment. */
  1229. if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
  1230. netdev_err(vif->dev,
  1231. "txreq.offset: %x, size: %u, end: %lu\n",
  1232. txreq.offset, txreq.size,
  1233. (txreq.offset&~PAGE_MASK) + txreq.size);
  1234. netbk_fatal_tx_err(vif);
  1235. continue;
  1236. }
  1237. index = pending_index(netbk->pending_cons);
  1238. pending_idx = netbk->pending_ring[index];
  1239. data_len = (txreq.size > PKT_PROT_LEN &&
  1240. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1241. PKT_PROT_LEN : txreq.size;
  1242. skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
  1243. GFP_ATOMIC | __GFP_NOWARN);
  1244. if (unlikely(skb == NULL)) {
  1245. netdev_dbg(vif->dev,
  1246. "Can't allocate a skb in start_xmit.\n");
  1247. netbk_tx_err(vif, &txreq, idx);
  1248. break;
  1249. }
  1250. /* Packets passed to netif_rx() must have some headroom. */
  1251. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  1252. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1253. struct xen_netif_extra_info *gso;
  1254. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1255. if (netbk_set_skb_gso(vif, skb, gso)) {
  1256. /* Failure in netbk_set_skb_gso is fatal. */
  1257. kfree_skb(skb);
  1258. continue;
  1259. }
  1260. }
  1261. /* XXX could copy straight to head */
  1262. page = xen_netbk_alloc_page(netbk, pending_idx);
  1263. if (!page) {
  1264. kfree_skb(skb);
  1265. netbk_tx_err(vif, &txreq, idx);
  1266. continue;
  1267. }
  1268. gop->source.u.ref = txreq.gref;
  1269. gop->source.domid = vif->domid;
  1270. gop->source.offset = txreq.offset;
  1271. gop->dest.u.gmfn = virt_to_mfn(page_address(page));
  1272. gop->dest.domid = DOMID_SELF;
  1273. gop->dest.offset = txreq.offset;
  1274. gop->len = txreq.size;
  1275. gop->flags = GNTCOPY_source_gref;
  1276. gop++;
  1277. memcpy(&netbk->pending_tx_info[pending_idx].req,
  1278. &txreq, sizeof(txreq));
  1279. netbk->pending_tx_info[pending_idx].vif = vif;
  1280. netbk->pending_tx_info[pending_idx].head = index;
  1281. *((u16 *)skb->data) = pending_idx;
  1282. __skb_put(skb, data_len);
  1283. skb_shinfo(skb)->nr_frags = ret;
  1284. if (data_len < txreq.size) {
  1285. skb_shinfo(skb)->nr_frags++;
  1286. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1287. pending_idx);
  1288. } else {
  1289. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1290. INVALID_PENDING_IDX);
  1291. }
  1292. netbk->pending_cons++;
  1293. request_gop = xen_netbk_get_requests(netbk, vif,
  1294. skb, txfrags, gop);
  1295. if (request_gop == NULL) {
  1296. kfree_skb(skb);
  1297. netbk_tx_err(vif, &txreq, idx);
  1298. continue;
  1299. }
  1300. gop = request_gop;
  1301. __skb_queue_tail(&netbk->tx_queue, skb);
  1302. vif->tx.req_cons = idx;
  1303. xen_netbk_check_rx_xenvif(vif);
  1304. if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
  1305. break;
  1306. }
  1307. return gop - netbk->tx_copy_ops;
  1308. }
  1309. static void xen_netbk_tx_submit(struct xen_netbk *netbk)
  1310. {
  1311. struct gnttab_copy *gop = netbk->tx_copy_ops;
  1312. struct sk_buff *skb;
  1313. while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
  1314. struct xen_netif_tx_request *txp;
  1315. struct xenvif *vif;
  1316. u16 pending_idx;
  1317. unsigned data_len;
  1318. pending_idx = *((u16 *)skb->data);
  1319. vif = netbk->pending_tx_info[pending_idx].vif;
  1320. txp = &netbk->pending_tx_info[pending_idx].req;
  1321. /* Check the remap error code. */
  1322. if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
  1323. netdev_dbg(vif->dev, "netback grant failed.\n");
  1324. skb_shinfo(skb)->nr_frags = 0;
  1325. kfree_skb(skb);
  1326. continue;
  1327. }
  1328. data_len = skb->len;
  1329. memcpy(skb->data,
  1330. (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
  1331. data_len);
  1332. if (data_len < txp->size) {
  1333. /* Append the packet payload as a fragment. */
  1334. txp->offset += data_len;
  1335. txp->size -= data_len;
  1336. } else {
  1337. /* Schedule a response immediately. */
  1338. xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
  1339. }
  1340. if (txp->flags & XEN_NETTXF_csum_blank)
  1341. skb->ip_summed = CHECKSUM_PARTIAL;
  1342. else if (txp->flags & XEN_NETTXF_data_validated)
  1343. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1344. xen_netbk_fill_frags(netbk, skb);
  1345. /*
  1346. * If the initial fragment was < PKT_PROT_LEN then
  1347. * pull through some bytes from the other fragments to
  1348. * increase the linear region to PKT_PROT_LEN bytes.
  1349. */
  1350. if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
  1351. int target = min_t(int, skb->len, PKT_PROT_LEN);
  1352. __pskb_pull_tail(skb, target - skb_headlen(skb));
  1353. }
  1354. skb->dev = vif->dev;
  1355. skb->protocol = eth_type_trans(skb, skb->dev);
  1356. skb_reset_network_header(skb);
  1357. if (checksum_setup(vif, skb)) {
  1358. netdev_dbg(vif->dev,
  1359. "Can't setup checksum in net_tx_action\n");
  1360. kfree_skb(skb);
  1361. continue;
  1362. }
  1363. skb_probe_transport_header(skb, 0);
  1364. vif->dev->stats.rx_bytes += skb->len;
  1365. vif->dev->stats.rx_packets++;
  1366. xenvif_receive_skb(vif, skb);
  1367. }
  1368. }
  1369. /* Called after netfront has transmitted */
  1370. static void xen_netbk_tx_action(struct xen_netbk *netbk)
  1371. {
  1372. unsigned nr_gops;
  1373. nr_gops = xen_netbk_tx_build_gops(netbk);
  1374. if (nr_gops == 0)
  1375. return;
  1376. gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
  1377. xen_netbk_tx_submit(netbk);
  1378. }
  1379. static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
  1380. u8 status)
  1381. {
  1382. struct xenvif *vif;
  1383. struct pending_tx_info *pending_tx_info;
  1384. pending_ring_idx_t head;
  1385. u16 peek; /* peek into next tx request */
  1386. BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
  1387. /* Already complete? */
  1388. if (netbk->mmap_pages[pending_idx] == NULL)
  1389. return;
  1390. pending_tx_info = &netbk->pending_tx_info[pending_idx];
  1391. vif = pending_tx_info->vif;
  1392. head = pending_tx_info->head;
  1393. BUG_ON(!pending_tx_is_head(netbk, head));
  1394. BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
  1395. do {
  1396. pending_ring_idx_t index;
  1397. pending_ring_idx_t idx = pending_index(head);
  1398. u16 info_idx = netbk->pending_ring[idx];
  1399. pending_tx_info = &netbk->pending_tx_info[info_idx];
  1400. make_tx_response(vif, &pending_tx_info->req, status);
  1401. /* Setting any number other than
  1402. * INVALID_PENDING_RING_IDX indicates this slot is
  1403. * starting a new packet / ending a previous packet.
  1404. */
  1405. pending_tx_info->head = 0;
  1406. index = pending_index(netbk->pending_prod++);
  1407. netbk->pending_ring[index] = netbk->pending_ring[info_idx];
  1408. xenvif_put(vif);
  1409. peek = netbk->pending_ring[pending_index(++head)];
  1410. } while (!pending_tx_is_head(netbk, peek));
  1411. netbk->mmap_pages[pending_idx]->mapping = 0;
  1412. put_page(netbk->mmap_pages[pending_idx]);
  1413. netbk->mmap_pages[pending_idx] = NULL;
  1414. }
  1415. static void make_tx_response(struct xenvif *vif,
  1416. struct xen_netif_tx_request *txp,
  1417. s8 st)
  1418. {
  1419. RING_IDX i = vif->tx.rsp_prod_pvt;
  1420. struct xen_netif_tx_response *resp;
  1421. int notify;
  1422. resp = RING_GET_RESPONSE(&vif->tx, i);
  1423. resp->id = txp->id;
  1424. resp->status = st;
  1425. if (txp->flags & XEN_NETTXF_extra_info)
  1426. RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1427. vif->tx.rsp_prod_pvt = ++i;
  1428. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
  1429. if (notify)
  1430. notify_remote_via_irq(vif->irq);
  1431. }
  1432. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  1433. u16 id,
  1434. s8 st,
  1435. u16 offset,
  1436. u16 size,
  1437. u16 flags)
  1438. {
  1439. RING_IDX i = vif->rx.rsp_prod_pvt;
  1440. struct xen_netif_rx_response *resp;
  1441. resp = RING_GET_RESPONSE(&vif->rx, i);
  1442. resp->offset = offset;
  1443. resp->flags = flags;
  1444. resp->id = id;
  1445. resp->status = (s16)size;
  1446. if (st < 0)
  1447. resp->status = (s16)st;
  1448. vif->rx.rsp_prod_pvt = ++i;
  1449. return resp;
  1450. }
  1451. static inline int rx_work_todo(struct xen_netbk *netbk)
  1452. {
  1453. return !skb_queue_empty(&netbk->rx_queue);
  1454. }
  1455. static inline int tx_work_todo(struct xen_netbk *netbk)
  1456. {
  1457. if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
  1458. < MAX_PENDING_REQS) &&
  1459. !list_empty(&netbk->net_schedule_list))
  1460. return 1;
  1461. return 0;
  1462. }
  1463. static int xen_netbk_kthread(void *data)
  1464. {
  1465. struct xen_netbk *netbk = data;
  1466. while (!kthread_should_stop()) {
  1467. wait_event_interruptible(netbk->wq,
  1468. rx_work_todo(netbk) ||
  1469. tx_work_todo(netbk) ||
  1470. kthread_should_stop());
  1471. cond_resched();
  1472. if (kthread_should_stop())
  1473. break;
  1474. if (rx_work_todo(netbk))
  1475. xen_netbk_rx_action(netbk);
  1476. if (tx_work_todo(netbk))
  1477. xen_netbk_tx_action(netbk);
  1478. }
  1479. return 0;
  1480. }
  1481. void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
  1482. {
  1483. if (vif->tx.sring)
  1484. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1485. vif->tx.sring);
  1486. if (vif->rx.sring)
  1487. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1488. vif->rx.sring);
  1489. }
  1490. int xen_netbk_map_frontend_rings(struct xenvif *vif,
  1491. grant_ref_t tx_ring_ref,
  1492. grant_ref_t rx_ring_ref)
  1493. {
  1494. void *addr;
  1495. struct xen_netif_tx_sring *txs;
  1496. struct xen_netif_rx_sring *rxs;
  1497. int err = -ENOMEM;
  1498. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1499. tx_ring_ref, &addr);
  1500. if (err)
  1501. goto err;
  1502. txs = (struct xen_netif_tx_sring *)addr;
  1503. BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
  1504. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1505. rx_ring_ref, &addr);
  1506. if (err)
  1507. goto err;
  1508. rxs = (struct xen_netif_rx_sring *)addr;
  1509. BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  1510. vif->rx_req_cons_peek = 0;
  1511. return 0;
  1512. err:
  1513. xen_netbk_unmap_frontend_rings(vif);
  1514. return err;
  1515. }
  1516. static int __init netback_init(void)
  1517. {
  1518. int i;
  1519. int rc = 0;
  1520. int group;
  1521. if (!xen_domain())
  1522. return -ENODEV;
  1523. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1524. printk(KERN_INFO
  1525. "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1526. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1527. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1528. }
  1529. xen_netbk_group_nr = num_online_cpus();
  1530. xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
  1531. if (!xen_netbk)
  1532. return -ENOMEM;
  1533. for (group = 0; group < xen_netbk_group_nr; group++) {
  1534. struct xen_netbk *netbk = &xen_netbk[group];
  1535. skb_queue_head_init(&netbk->rx_queue);
  1536. skb_queue_head_init(&netbk->tx_queue);
  1537. init_timer(&netbk->net_timer);
  1538. netbk->net_timer.data = (unsigned long)netbk;
  1539. netbk->net_timer.function = xen_netbk_alarm;
  1540. netbk->pending_cons = 0;
  1541. netbk->pending_prod = MAX_PENDING_REQS;
  1542. for (i = 0; i < MAX_PENDING_REQS; i++)
  1543. netbk->pending_ring[i] = i;
  1544. init_waitqueue_head(&netbk->wq);
  1545. netbk->task = kthread_create(xen_netbk_kthread,
  1546. (void *)netbk,
  1547. "netback/%u", group);
  1548. if (IS_ERR(netbk->task)) {
  1549. printk(KERN_ALERT "kthread_create() fails at netback\n");
  1550. del_timer(&netbk->net_timer);
  1551. rc = PTR_ERR(netbk->task);
  1552. goto failed_init;
  1553. }
  1554. kthread_bind(netbk->task, group);
  1555. INIT_LIST_HEAD(&netbk->net_schedule_list);
  1556. spin_lock_init(&netbk->net_schedule_list_lock);
  1557. atomic_set(&netbk->netfront_count, 0);
  1558. wake_up_process(netbk->task);
  1559. }
  1560. rc = xenvif_xenbus_init();
  1561. if (rc)
  1562. goto failed_init;
  1563. return 0;
  1564. failed_init:
  1565. while (--group >= 0) {
  1566. struct xen_netbk *netbk = &xen_netbk[group];
  1567. del_timer(&netbk->net_timer);
  1568. kthread_stop(netbk->task);
  1569. }
  1570. vfree(xen_netbk);
  1571. return rc;
  1572. }
  1573. module_init(netback_init);
  1574. static void __exit netback_fini(void)
  1575. {
  1576. int i, j;
  1577. xenvif_xenbus_fini();
  1578. for (i = 0; i < xen_netbk_group_nr; i++) {
  1579. struct xen_netbk *netbk = &xen_netbk[i];
  1580. del_timer_sync(&netbk->net_timer);
  1581. kthread_stop(netbk->task);
  1582. for (j = 0; j < MAX_PENDING_REQS; j++) {
  1583. if (netbk->mmap_pages[i])
  1584. __free_page(netbk->mmap_pages[i]);
  1585. }
  1586. }
  1587. vfree(xen_netbk);
  1588. }
  1589. module_exit(netback_fini);
  1590. MODULE_LICENSE("Dual BSD/GPL");
  1591. MODULE_ALIAS("xen-backend:vif");