rx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/socket.h>
  11. #include <linux/in.h>
  12. #include <linux/slab.h>
  13. #include <linux/ip.h>
  14. #include <linux/tcp.h>
  15. #include <linux/udp.h>
  16. #include <linux/prefetch.h>
  17. #include <linux/moduleparam.h>
  18. #include <net/ip.h>
  19. #include <net/checksum.h>
  20. #include "net_driver.h"
  21. #include "efx.h"
  22. #include "nic.h"
  23. #include "selftest.h"
  24. #include "workarounds.h"
  25. /* Number of RX descriptors pushed at once. */
  26. #define EFX_RX_BATCH 8
  27. /* Maximum size of a buffer sharing a page */
  28. #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
  29. /* Size of buffer allocated for skb header area. */
  30. #define EFX_SKB_HEADERS 64u
  31. /*
  32. * rx_alloc_method - RX buffer allocation method
  33. *
  34. * This driver supports two methods for allocating and using RX buffers:
  35. * each RX buffer may be backed by an skb or by an order-n page.
  36. *
  37. * When GRO is in use then the second method has a lower overhead,
  38. * since we don't have to allocate then free skbs on reassembled frames.
  39. *
  40. * Values:
  41. * - RX_ALLOC_METHOD_AUTO = 0
  42. * - RX_ALLOC_METHOD_SKB = 1
  43. * - RX_ALLOC_METHOD_PAGE = 2
  44. *
  45. * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
  46. * controlled by the parameters below.
  47. *
  48. * - Since pushing and popping descriptors are separated by the rx_queue
  49. * size, so the watermarks should be ~rxd_size.
  50. * - The performance win by using page-based allocation for GRO is less
  51. * than the performance hit of using page-based allocation of non-GRO,
  52. * so the watermarks should reflect this.
  53. *
  54. * Per channel we maintain a single variable, updated by each channel:
  55. *
  56. * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
  57. * RX_ALLOC_FACTOR_SKB)
  58. * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
  59. * limits the hysteresis), and update the allocation strategy:
  60. *
  61. * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
  62. * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  63. */
  64. static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
  65. #define RX_ALLOC_LEVEL_GRO 0x2000
  66. #define RX_ALLOC_LEVEL_MAX 0x3000
  67. #define RX_ALLOC_FACTOR_GRO 1
  68. #define RX_ALLOC_FACTOR_SKB (-2)
  69. /* This is the percentage fill level below which new RX descriptors
  70. * will be added to the RX descriptor ring.
  71. */
  72. static unsigned int rx_refill_threshold = 90;
  73. /* This is the percentage fill level to which an RX queue will be refilled
  74. * when the "RX refill threshold" is reached.
  75. */
  76. static unsigned int rx_refill_limit = 95;
  77. /*
  78. * RX maximum head room required.
  79. *
  80. * This must be at least 1 to prevent overflow and at least 2 to allow
  81. * pipelined receives.
  82. */
  83. #define EFX_RXD_HEAD_ROOM 2
  84. /* Offset of ethernet header within page */
  85. static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
  86. struct efx_rx_buffer *buf)
  87. {
  88. /* Offset is always within one page, so we don't need to consider
  89. * the page order.
  90. */
  91. return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
  92. efx->type->rx_buffer_hash_size);
  93. }
  94. static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
  95. {
  96. return PAGE_SIZE << efx->rx_buffer_order;
  97. }
  98. static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
  99. {
  100. if (buf->is_page)
  101. return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
  102. else
  103. return ((u8 *)buf->u.skb->data +
  104. efx->type->rx_buffer_hash_size);
  105. }
  106. static inline u32 efx_rx_buf_hash(const u8 *eh)
  107. {
  108. /* The ethernet header is always directly after any hash. */
  109. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
  110. return __le32_to_cpup((const __le32 *)(eh - 4));
  111. #else
  112. const u8 *data = eh - 4;
  113. return ((u32)data[0] |
  114. (u32)data[1] << 8 |
  115. (u32)data[2] << 16 |
  116. (u32)data[3] << 24);
  117. #endif
  118. }
  119. /**
  120. * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
  121. *
  122. * @rx_queue: Efx RX queue
  123. *
  124. * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
  125. * struct efx_rx_buffer for each one. Return a negative error code or 0
  126. * on success. May fail having only inserted fewer than EFX_RX_BATCH
  127. * buffers.
  128. */
  129. static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
  130. {
  131. struct efx_nic *efx = rx_queue->efx;
  132. struct net_device *net_dev = efx->net_dev;
  133. struct efx_rx_buffer *rx_buf;
  134. struct sk_buff *skb;
  135. int skb_len = efx->rx_buffer_len;
  136. unsigned index, count;
  137. for (count = 0; count < EFX_RX_BATCH; ++count) {
  138. index = rx_queue->added_count & rx_queue->ptr_mask;
  139. rx_buf = efx_rx_buffer(rx_queue, index);
  140. rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
  141. if (unlikely(!skb))
  142. return -ENOMEM;
  143. /* Adjust the SKB for padding and checksum */
  144. skb_reserve(skb, NET_IP_ALIGN);
  145. rx_buf->len = skb_len - NET_IP_ALIGN;
  146. rx_buf->is_page = false;
  147. skb->ip_summed = CHECKSUM_UNNECESSARY;
  148. rx_buf->dma_addr = pci_map_single(efx->pci_dev,
  149. skb->data, rx_buf->len,
  150. PCI_DMA_FROMDEVICE);
  151. if (unlikely(pci_dma_mapping_error(efx->pci_dev,
  152. rx_buf->dma_addr))) {
  153. dev_kfree_skb_any(skb);
  154. rx_buf->u.skb = NULL;
  155. return -EIO;
  156. }
  157. ++rx_queue->added_count;
  158. ++rx_queue->alloc_skb_count;
  159. }
  160. return 0;
  161. }
  162. /**
  163. * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
  164. *
  165. * @rx_queue: Efx RX queue
  166. *
  167. * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
  168. * and populates struct efx_rx_buffers for each one. Return a negative error
  169. * code or 0 on success. If a single page can be split between two buffers,
  170. * then the page will either be inserted fully, or not at at all.
  171. */
  172. static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
  173. {
  174. struct efx_nic *efx = rx_queue->efx;
  175. struct efx_rx_buffer *rx_buf;
  176. struct page *page;
  177. void *page_addr;
  178. struct efx_rx_page_state *state;
  179. dma_addr_t dma_addr;
  180. unsigned index, count;
  181. /* We can split a page between two buffers */
  182. BUILD_BUG_ON(EFX_RX_BATCH & 1);
  183. for (count = 0; count < EFX_RX_BATCH; ++count) {
  184. page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
  185. efx->rx_buffer_order);
  186. if (unlikely(page == NULL))
  187. return -ENOMEM;
  188. dma_addr = pci_map_page(efx->pci_dev, page, 0,
  189. efx_rx_buf_size(efx),
  190. PCI_DMA_FROMDEVICE);
  191. if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
  192. __free_pages(page, efx->rx_buffer_order);
  193. return -EIO;
  194. }
  195. page_addr = page_address(page);
  196. state = page_addr;
  197. state->refcnt = 0;
  198. state->dma_addr = dma_addr;
  199. page_addr += sizeof(struct efx_rx_page_state);
  200. dma_addr += sizeof(struct efx_rx_page_state);
  201. split:
  202. index = rx_queue->added_count & rx_queue->ptr_mask;
  203. rx_buf = efx_rx_buffer(rx_queue, index);
  204. rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
  205. rx_buf->u.page = page;
  206. rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
  207. rx_buf->is_page = true;
  208. ++rx_queue->added_count;
  209. ++rx_queue->alloc_page_count;
  210. ++state->refcnt;
  211. if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
  212. /* Use the second half of the page */
  213. get_page(page);
  214. dma_addr += (PAGE_SIZE >> 1);
  215. page_addr += (PAGE_SIZE >> 1);
  216. ++count;
  217. goto split;
  218. }
  219. }
  220. return 0;
  221. }
  222. static void efx_unmap_rx_buffer(struct efx_nic *efx,
  223. struct efx_rx_buffer *rx_buf)
  224. {
  225. if (rx_buf->is_page && rx_buf->u.page) {
  226. struct efx_rx_page_state *state;
  227. state = page_address(rx_buf->u.page);
  228. if (--state->refcnt == 0) {
  229. pci_unmap_page(efx->pci_dev,
  230. state->dma_addr,
  231. efx_rx_buf_size(efx),
  232. PCI_DMA_FROMDEVICE);
  233. }
  234. } else if (!rx_buf->is_page && rx_buf->u.skb) {
  235. pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
  236. rx_buf->len, PCI_DMA_FROMDEVICE);
  237. }
  238. }
  239. static void efx_free_rx_buffer(struct efx_nic *efx,
  240. struct efx_rx_buffer *rx_buf)
  241. {
  242. if (rx_buf->is_page && rx_buf->u.page) {
  243. __free_pages(rx_buf->u.page, efx->rx_buffer_order);
  244. rx_buf->u.page = NULL;
  245. } else if (!rx_buf->is_page && rx_buf->u.skb) {
  246. dev_kfree_skb_any(rx_buf->u.skb);
  247. rx_buf->u.skb = NULL;
  248. }
  249. }
  250. static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
  251. struct efx_rx_buffer *rx_buf)
  252. {
  253. efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
  254. efx_free_rx_buffer(rx_queue->efx, rx_buf);
  255. }
  256. /* Attempt to resurrect the other receive buffer that used to share this page,
  257. * which had previously been passed up to the kernel and freed. */
  258. static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
  259. struct efx_rx_buffer *rx_buf)
  260. {
  261. struct efx_rx_page_state *state = page_address(rx_buf->u.page);
  262. struct efx_rx_buffer *new_buf;
  263. unsigned fill_level, index;
  264. /* +1 because efx_rx_packet() incremented removed_count. +1 because
  265. * we'd like to insert an additional descriptor whilst leaving
  266. * EFX_RXD_HEAD_ROOM for the non-recycle path */
  267. fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
  268. if (unlikely(fill_level > rx_queue->max_fill)) {
  269. /* We could place "state" on a list, and drain the list in
  270. * efx_fast_push_rx_descriptors(). For now, this will do. */
  271. return;
  272. }
  273. ++state->refcnt;
  274. get_page(rx_buf->u.page);
  275. index = rx_queue->added_count & rx_queue->ptr_mask;
  276. new_buf = efx_rx_buffer(rx_queue, index);
  277. new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
  278. new_buf->u.page = rx_buf->u.page;
  279. new_buf->len = rx_buf->len;
  280. new_buf->is_page = true;
  281. ++rx_queue->added_count;
  282. }
  283. /* Recycle the given rx buffer directly back into the rx_queue. There is
  284. * always room to add this buffer, because we've just popped a buffer. */
  285. static void efx_recycle_rx_buffer(struct efx_channel *channel,
  286. struct efx_rx_buffer *rx_buf)
  287. {
  288. struct efx_nic *efx = channel->efx;
  289. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  290. struct efx_rx_buffer *new_buf;
  291. unsigned index;
  292. if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
  293. page_count(rx_buf->u.page) == 1)
  294. efx_resurrect_rx_buffer(rx_queue, rx_buf);
  295. index = rx_queue->added_count & rx_queue->ptr_mask;
  296. new_buf = efx_rx_buffer(rx_queue, index);
  297. memcpy(new_buf, rx_buf, sizeof(*new_buf));
  298. rx_buf->u.page = NULL;
  299. ++rx_queue->added_count;
  300. }
  301. /**
  302. * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  303. * @rx_queue: RX descriptor queue
  304. * This will aim to fill the RX descriptor queue up to
  305. * @rx_queue->@fast_fill_limit. If there is insufficient atomic
  306. * memory to do so, a slow fill will be scheduled.
  307. *
  308. * The caller must provide serialisation (none is used here). In practise,
  309. * this means this function must run from the NAPI handler, or be called
  310. * when NAPI is disabled.
  311. */
  312. void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
  313. {
  314. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  315. unsigned fill_level;
  316. int space, rc = 0;
  317. /* Calculate current fill level, and exit if we don't need to fill */
  318. fill_level = (rx_queue->added_count - rx_queue->removed_count);
  319. EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
  320. if (fill_level >= rx_queue->fast_fill_trigger)
  321. goto out;
  322. /* Record minimum fill level */
  323. if (unlikely(fill_level < rx_queue->min_fill)) {
  324. if (fill_level)
  325. rx_queue->min_fill = fill_level;
  326. }
  327. space = rx_queue->fast_fill_limit - fill_level;
  328. if (space < EFX_RX_BATCH)
  329. goto out;
  330. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  331. "RX queue %d fast-filling descriptor ring from"
  332. " level %d to level %d using %s allocation\n",
  333. efx_rx_queue_index(rx_queue), fill_level,
  334. rx_queue->fast_fill_limit,
  335. channel->rx_alloc_push_pages ? "page" : "skb");
  336. do {
  337. if (channel->rx_alloc_push_pages)
  338. rc = efx_init_rx_buffers_page(rx_queue);
  339. else
  340. rc = efx_init_rx_buffers_skb(rx_queue);
  341. if (unlikely(rc)) {
  342. /* Ensure that we don't leave the rx queue empty */
  343. if (rx_queue->added_count == rx_queue->removed_count)
  344. efx_schedule_slow_fill(rx_queue);
  345. goto out;
  346. }
  347. } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
  348. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  349. "RX queue %d fast-filled descriptor ring "
  350. "to level %d\n", efx_rx_queue_index(rx_queue),
  351. rx_queue->added_count - rx_queue->removed_count);
  352. out:
  353. if (rx_queue->notified_count != rx_queue->added_count)
  354. efx_nic_notify_rx_desc(rx_queue);
  355. }
  356. void efx_rx_slow_fill(unsigned long context)
  357. {
  358. struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
  359. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  360. /* Post an event to cause NAPI to run and refill the queue */
  361. efx_nic_generate_fill_event(channel);
  362. ++rx_queue->slow_fill_count;
  363. }
  364. static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  365. struct efx_rx_buffer *rx_buf,
  366. int len, bool *discard,
  367. bool *leak_packet)
  368. {
  369. struct efx_nic *efx = rx_queue->efx;
  370. unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
  371. if (likely(len <= max_len))
  372. return;
  373. /* The packet must be discarded, but this is only a fatal error
  374. * if the caller indicated it was
  375. */
  376. *discard = true;
  377. if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
  378. if (net_ratelimit())
  379. netif_err(efx, rx_err, efx->net_dev,
  380. " RX queue %d seriously overlength "
  381. "RX event (0x%x > 0x%x+0x%x). Leaking\n",
  382. efx_rx_queue_index(rx_queue), len, max_len,
  383. efx->type->rx_buffer_padding);
  384. /* If this buffer was skb-allocated, then the meta
  385. * data at the end of the skb will be trashed. So
  386. * we have no choice but to leak the fragment.
  387. */
  388. *leak_packet = !rx_buf->is_page;
  389. efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
  390. } else {
  391. if (net_ratelimit())
  392. netif_err(efx, rx_err, efx->net_dev,
  393. " RX queue %d overlength RX event "
  394. "(0x%x > 0x%x)\n",
  395. efx_rx_queue_index(rx_queue), len, max_len);
  396. }
  397. efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
  398. }
  399. /* Pass a received packet up through the generic GRO stack
  400. *
  401. * Handles driverlink veto, and passes the fragment up via
  402. * the appropriate GRO method
  403. */
  404. static void efx_rx_packet_gro(struct efx_channel *channel,
  405. struct efx_rx_buffer *rx_buf,
  406. const u8 *eh, bool checksummed)
  407. {
  408. struct napi_struct *napi = &channel->napi_str;
  409. gro_result_t gro_result;
  410. /* Pass the skb/page into the GRO engine */
  411. if (rx_buf->is_page) {
  412. struct efx_nic *efx = channel->efx;
  413. struct page *page = rx_buf->u.page;
  414. struct sk_buff *skb;
  415. rx_buf->u.page = NULL;
  416. skb = napi_get_frags(napi);
  417. if (!skb) {
  418. put_page(page);
  419. return;
  420. }
  421. if (efx->net_dev->features & NETIF_F_RXHASH)
  422. skb->rxhash = efx_rx_buf_hash(eh);
  423. skb_fill_page_desc(skb, 0, page,
  424. efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
  425. skb->len = rx_buf->len;
  426. skb->data_len = rx_buf->len;
  427. skb->truesize += rx_buf->len;
  428. skb->ip_summed =
  429. checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
  430. skb_record_rx_queue(skb, channel->channel);
  431. gro_result = napi_gro_frags(napi);
  432. } else {
  433. struct sk_buff *skb = rx_buf->u.skb;
  434. EFX_BUG_ON_PARANOID(!checksummed);
  435. rx_buf->u.skb = NULL;
  436. gro_result = napi_gro_receive(napi, skb);
  437. }
  438. if (gro_result == GRO_NORMAL) {
  439. channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
  440. } else if (gro_result != GRO_DROP) {
  441. channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
  442. channel->irq_mod_score += 2;
  443. }
  444. }
  445. void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
  446. unsigned int len, bool checksummed, bool discard)
  447. {
  448. struct efx_nic *efx = rx_queue->efx;
  449. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  450. struct efx_rx_buffer *rx_buf;
  451. bool leak_packet = false;
  452. rx_buf = efx_rx_buffer(rx_queue, index);
  453. /* This allows the refill path to post another buffer.
  454. * EFX_RXD_HEAD_ROOM ensures that the slot we are using
  455. * isn't overwritten yet.
  456. */
  457. rx_queue->removed_count++;
  458. /* Validate the length encoded in the event vs the descriptor pushed */
  459. efx_rx_packet__check_len(rx_queue, rx_buf, len,
  460. &discard, &leak_packet);
  461. netif_vdbg(efx, rx_status, efx->net_dev,
  462. "RX queue %d received id %x at %llx+%x %s%s\n",
  463. efx_rx_queue_index(rx_queue), index,
  464. (unsigned long long)rx_buf->dma_addr, len,
  465. (checksummed ? " [SUMMED]" : ""),
  466. (discard ? " [DISCARD]" : ""));
  467. /* Discard packet, if instructed to do so */
  468. if (unlikely(discard)) {
  469. if (unlikely(leak_packet))
  470. channel->n_skbuff_leaks++;
  471. else
  472. efx_recycle_rx_buffer(channel, rx_buf);
  473. /* Don't hold off the previous receive */
  474. rx_buf = NULL;
  475. goto out;
  476. }
  477. /* Release card resources - assumes all RX buffers consumed in-order
  478. * per RX queue
  479. */
  480. efx_unmap_rx_buffer(efx, rx_buf);
  481. /* Prefetch nice and early so data will (hopefully) be in cache by
  482. * the time we look at it.
  483. */
  484. prefetch(efx_rx_buf_eh(efx, rx_buf));
  485. /* Pipeline receives so that we give time for packet headers to be
  486. * prefetched into cache.
  487. */
  488. rx_buf->len = len - efx->type->rx_buffer_hash_size;
  489. out:
  490. if (channel->rx_pkt)
  491. __efx_rx_packet(channel,
  492. channel->rx_pkt, channel->rx_pkt_csummed);
  493. channel->rx_pkt = rx_buf;
  494. channel->rx_pkt_csummed = checksummed;
  495. }
  496. /* Handle a received packet. Second half: Touches packet payload. */
  497. void __efx_rx_packet(struct efx_channel *channel,
  498. struct efx_rx_buffer *rx_buf, bool checksummed)
  499. {
  500. struct efx_nic *efx = channel->efx;
  501. struct sk_buff *skb;
  502. u8 *eh = efx_rx_buf_eh(efx, rx_buf);
  503. /* If we're in loopback test, then pass the packet directly to the
  504. * loopback layer, and free the rx_buf here
  505. */
  506. if (unlikely(efx->loopback_selftest)) {
  507. efx_loopback_rx_packet(efx, eh, rx_buf->len);
  508. efx_free_rx_buffer(efx, rx_buf);
  509. return;
  510. }
  511. if (!rx_buf->is_page) {
  512. skb = rx_buf->u.skb;
  513. prefetch(skb_shinfo(skb));
  514. skb_reserve(skb, efx->type->rx_buffer_hash_size);
  515. skb_put(skb, rx_buf->len);
  516. if (efx->net_dev->features & NETIF_F_RXHASH)
  517. skb->rxhash = efx_rx_buf_hash(eh);
  518. /* Move past the ethernet header. rx_buf->data still points
  519. * at the ethernet header */
  520. skb->protocol = eth_type_trans(skb, efx->net_dev);
  521. skb_record_rx_queue(skb, channel->channel);
  522. }
  523. if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
  524. checksummed = false;
  525. if (likely(checksummed || rx_buf->is_page)) {
  526. efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
  527. return;
  528. }
  529. /* We now own the SKB */
  530. skb = rx_buf->u.skb;
  531. rx_buf->u.skb = NULL;
  532. /* Set the SKB flags */
  533. skb_checksum_none_assert(skb);
  534. /* Pass the packet up */
  535. netif_receive_skb(skb);
  536. /* Update allocation strategy method */
  537. channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
  538. }
  539. void efx_rx_strategy(struct efx_channel *channel)
  540. {
  541. enum efx_rx_alloc_method method = rx_alloc_method;
  542. /* Only makes sense to use page based allocation if GRO is enabled */
  543. if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
  544. method = RX_ALLOC_METHOD_SKB;
  545. } else if (method == RX_ALLOC_METHOD_AUTO) {
  546. /* Constrain the rx_alloc_level */
  547. if (channel->rx_alloc_level < 0)
  548. channel->rx_alloc_level = 0;
  549. else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
  550. channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
  551. /* Decide on the allocation method */
  552. method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
  553. RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
  554. }
  555. /* Push the option */
  556. channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
  557. }
  558. int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
  559. {
  560. struct efx_nic *efx = rx_queue->efx;
  561. unsigned int entries;
  562. int rc;
  563. /* Create the smallest power-of-two aligned ring */
  564. entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
  565. EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
  566. rx_queue->ptr_mask = entries - 1;
  567. netif_dbg(efx, probe, efx->net_dev,
  568. "creating RX queue %d size %#x mask %#x\n",
  569. efx_rx_queue_index(rx_queue), efx->rxq_entries,
  570. rx_queue->ptr_mask);
  571. /* Allocate RX buffers */
  572. rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
  573. GFP_KERNEL);
  574. if (!rx_queue->buffer)
  575. return -ENOMEM;
  576. rc = efx_nic_probe_rx(rx_queue);
  577. if (rc) {
  578. kfree(rx_queue->buffer);
  579. rx_queue->buffer = NULL;
  580. }
  581. return rc;
  582. }
  583. void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
  584. {
  585. struct efx_nic *efx = rx_queue->efx;
  586. unsigned int max_fill, trigger, limit;
  587. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  588. "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
  589. /* Initialise ptr fields */
  590. rx_queue->added_count = 0;
  591. rx_queue->notified_count = 0;
  592. rx_queue->removed_count = 0;
  593. rx_queue->min_fill = -1U;
  594. /* Initialise limit fields */
  595. max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
  596. trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
  597. limit = max_fill * min(rx_refill_limit, 100U) / 100U;
  598. rx_queue->max_fill = max_fill;
  599. rx_queue->fast_fill_trigger = trigger;
  600. rx_queue->fast_fill_limit = limit;
  601. /* Set up RX descriptor ring */
  602. efx_nic_init_rx(rx_queue);
  603. }
  604. void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
  605. {
  606. int i;
  607. struct efx_rx_buffer *rx_buf;
  608. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  609. "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
  610. del_timer_sync(&rx_queue->slow_fill);
  611. efx_nic_fini_rx(rx_queue);
  612. /* Release RX buffers NB start at index 0 not current HW ptr */
  613. if (rx_queue->buffer) {
  614. for (i = 0; i <= rx_queue->ptr_mask; i++) {
  615. rx_buf = efx_rx_buffer(rx_queue, i);
  616. efx_fini_rx_buffer(rx_queue, rx_buf);
  617. }
  618. }
  619. }
  620. void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
  621. {
  622. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  623. "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
  624. efx_nic_remove_rx(rx_queue);
  625. kfree(rx_queue->buffer);
  626. rx_queue->buffer = NULL;
  627. }
  628. module_param(rx_alloc_method, int, 0644);
  629. MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
  630. module_param(rx_refill_threshold, uint, 0444);
  631. MODULE_PARM_DESC(rx_refill_threshold,
  632. "RX descriptor ring fast/slow fill threshold (%)");