rx.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/socket.h>
  11. #include <linux/in.h>
  12. #include <linux/slab.h>
  13. #include <linux/ip.h>
  14. #include <linux/tcp.h>
  15. #include <linux/udp.h>
  16. #include <linux/prefetch.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/iommu.h>
  19. #include <net/ip.h>
  20. #include <net/checksum.h>
  21. #include "net_driver.h"
  22. #include "efx.h"
  23. #include "filter.h"
  24. #include "nic.h"
  25. #include "selftest.h"
  26. #include "workarounds.h"
  27. /* Preferred number of descriptors to fill at once */
  28. #define EFX_RX_PREFERRED_BATCH 8U
  29. /* Number of RX buffers to recycle pages for. When creating the RX page recycle
  30. * ring, this number is divided by the number of buffers per page to calculate
  31. * the number of pages to store in the RX page recycle ring.
  32. */
  33. #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
  34. #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
  35. /* Size of buffer allocated for skb header area. */
  36. #define EFX_SKB_HEADERS 128u
  37. /* This is the percentage fill level below which new RX descriptors
  38. * will be added to the RX descriptor ring.
  39. */
  40. static unsigned int rx_refill_threshold;
  41. /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
  42. #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
  43. EFX_RX_USR_BUF_SIZE)
  44. /*
  45. * RX maximum head room required.
  46. *
  47. * This must be at least 1 to prevent overflow, plus one packet-worth
  48. * to allow pipelined receives.
  49. */
  50. #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
  51. static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
  52. {
  53. return page_address(buf->page) + buf->page_offset;
  54. }
  55. static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
  56. {
  57. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  58. return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
  59. #else
  60. const u8 *data = eh + efx->rx_packet_hash_offset;
  61. return (u32)data[0] |
  62. (u32)data[1] << 8 |
  63. (u32)data[2] << 16 |
  64. (u32)data[3] << 24;
  65. #endif
  66. }
  67. static inline struct efx_rx_buffer *
  68. efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
  69. {
  70. if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
  71. return efx_rx_buffer(rx_queue, 0);
  72. else
  73. return rx_buf + 1;
  74. }
  75. static inline void efx_sync_rx_buffer(struct efx_nic *efx,
  76. struct efx_rx_buffer *rx_buf,
  77. unsigned int len)
  78. {
  79. dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
  80. DMA_FROM_DEVICE);
  81. }
  82. void efx_rx_config_page_split(struct efx_nic *efx)
  83. {
  84. efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
  85. EFX_RX_BUF_ALIGNMENT);
  86. efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
  87. ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
  88. efx->rx_page_buf_step);
  89. efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
  90. efx->rx_bufs_per_page;
  91. efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
  92. efx->rx_bufs_per_page);
  93. }
  94. /* Check the RX page recycle ring for a page that can be reused. */
  95. static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  96. {
  97. struct efx_nic *efx = rx_queue->efx;
  98. struct page *page;
  99. struct efx_rx_page_state *state;
  100. unsigned index;
  101. index = rx_queue->page_remove & rx_queue->page_ptr_mask;
  102. page = rx_queue->page_ring[index];
  103. if (page == NULL)
  104. return NULL;
  105. rx_queue->page_ring[index] = NULL;
  106. /* page_remove cannot exceed page_add. */
  107. if (rx_queue->page_remove != rx_queue->page_add)
  108. ++rx_queue->page_remove;
  109. /* If page_count is 1 then we hold the only reference to this page. */
  110. if (page_count(page) == 1) {
  111. ++rx_queue->page_recycle_count;
  112. return page;
  113. } else {
  114. state = page_address(page);
  115. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  116. PAGE_SIZE << efx->rx_buffer_order,
  117. DMA_FROM_DEVICE);
  118. put_page(page);
  119. ++rx_queue->page_recycle_failed;
  120. }
  121. return NULL;
  122. }
  123. /**
  124. * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
  125. *
  126. * @rx_queue: Efx RX queue
  127. *
  128. * This allocates a batch of pages, maps them for DMA, and populates
  129. * struct efx_rx_buffers for each one. Return a negative error code or
  130. * 0 on success. If a single page can be used for multiple buffers,
  131. * then the page will either be inserted fully, or not at all.
  132. */
  133. static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
  134. {
  135. struct efx_nic *efx = rx_queue->efx;
  136. struct efx_rx_buffer *rx_buf;
  137. struct page *page;
  138. unsigned int page_offset;
  139. struct efx_rx_page_state *state;
  140. dma_addr_t dma_addr;
  141. unsigned index, count;
  142. count = 0;
  143. do {
  144. page = efx_reuse_page(rx_queue);
  145. if (page == NULL) {
  146. page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
  147. efx->rx_buffer_order);
  148. if (unlikely(page == NULL))
  149. return -ENOMEM;
  150. dma_addr =
  151. dma_map_page(&efx->pci_dev->dev, page, 0,
  152. PAGE_SIZE << efx->rx_buffer_order,
  153. DMA_FROM_DEVICE);
  154. if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
  155. dma_addr))) {
  156. __free_pages(page, efx->rx_buffer_order);
  157. return -EIO;
  158. }
  159. state = page_address(page);
  160. state->dma_addr = dma_addr;
  161. } else {
  162. state = page_address(page);
  163. dma_addr = state->dma_addr;
  164. }
  165. dma_addr += sizeof(struct efx_rx_page_state);
  166. page_offset = sizeof(struct efx_rx_page_state);
  167. do {
  168. index = rx_queue->added_count & rx_queue->ptr_mask;
  169. rx_buf = efx_rx_buffer(rx_queue, index);
  170. rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
  171. rx_buf->page = page;
  172. rx_buf->page_offset = page_offset + NET_IP_ALIGN;
  173. rx_buf->len = efx->rx_dma_len;
  174. rx_buf->flags = 0;
  175. ++rx_queue->added_count;
  176. get_page(page);
  177. dma_addr += efx->rx_page_buf_step;
  178. page_offset += efx->rx_page_buf_step;
  179. } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
  180. rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
  181. } while (++count < efx->rx_pages_per_batch);
  182. return 0;
  183. }
  184. /* Unmap a DMA-mapped page. This function is only called for the final RX
  185. * buffer in a page.
  186. */
  187. static void efx_unmap_rx_buffer(struct efx_nic *efx,
  188. struct efx_rx_buffer *rx_buf)
  189. {
  190. struct page *page = rx_buf->page;
  191. if (page) {
  192. struct efx_rx_page_state *state = page_address(page);
  193. dma_unmap_page(&efx->pci_dev->dev,
  194. state->dma_addr,
  195. PAGE_SIZE << efx->rx_buffer_order,
  196. DMA_FROM_DEVICE);
  197. }
  198. }
  199. static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
  200. {
  201. if (rx_buf->page) {
  202. put_page(rx_buf->page);
  203. rx_buf->page = NULL;
  204. }
  205. }
  206. /* Attempt to recycle the page if there is an RX recycle ring; the page can
  207. * only be added if this is the final RX buffer, to prevent pages being used in
  208. * the descriptor ring and appearing in the recycle ring simultaneously.
  209. */
  210. static void efx_recycle_rx_page(struct efx_channel *channel,
  211. struct efx_rx_buffer *rx_buf)
  212. {
  213. struct page *page = rx_buf->page;
  214. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  215. struct efx_nic *efx = rx_queue->efx;
  216. unsigned index;
  217. /* Only recycle the page after processing the final buffer. */
  218. if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
  219. return;
  220. index = rx_queue->page_add & rx_queue->page_ptr_mask;
  221. if (rx_queue->page_ring[index] == NULL) {
  222. unsigned read_index = rx_queue->page_remove &
  223. rx_queue->page_ptr_mask;
  224. /* The next slot in the recycle ring is available, but
  225. * increment page_remove if the read pointer currently
  226. * points here.
  227. */
  228. if (read_index == index)
  229. ++rx_queue->page_remove;
  230. rx_queue->page_ring[index] = page;
  231. ++rx_queue->page_add;
  232. return;
  233. }
  234. ++rx_queue->page_recycle_full;
  235. efx_unmap_rx_buffer(efx, rx_buf);
  236. put_page(rx_buf->page);
  237. }
  238. static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
  239. struct efx_rx_buffer *rx_buf)
  240. {
  241. /* Release the page reference we hold for the buffer. */
  242. if (rx_buf->page)
  243. put_page(rx_buf->page);
  244. /* If this is the last buffer in a page, unmap and free it. */
  245. if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
  246. efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
  247. efx_free_rx_buffer(rx_buf);
  248. }
  249. rx_buf->page = NULL;
  250. }
  251. /* Recycle the pages that are used by buffers that have just been received. */
  252. static void efx_recycle_rx_pages(struct efx_channel *channel,
  253. struct efx_rx_buffer *rx_buf,
  254. unsigned int n_frags)
  255. {
  256. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  257. do {
  258. efx_recycle_rx_page(channel, rx_buf);
  259. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  260. } while (--n_frags);
  261. }
  262. static void efx_discard_rx_packet(struct efx_channel *channel,
  263. struct efx_rx_buffer *rx_buf,
  264. unsigned int n_frags)
  265. {
  266. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  267. efx_recycle_rx_pages(channel, rx_buf, n_frags);
  268. do {
  269. efx_free_rx_buffer(rx_buf);
  270. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  271. } while (--n_frags);
  272. }
  273. /**
  274. * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  275. * @rx_queue: RX descriptor queue
  276. *
  277. * This will aim to fill the RX descriptor queue up to
  278. * @rx_queue->@max_fill. If there is insufficient atomic
  279. * memory to do so, a slow fill will be scheduled.
  280. *
  281. * The caller must provide serialisation (none is used here). In practise,
  282. * this means this function must run from the NAPI handler, or be called
  283. * when NAPI is disabled.
  284. */
  285. void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
  286. {
  287. struct efx_nic *efx = rx_queue->efx;
  288. unsigned int fill_level, batch_size;
  289. int space, rc = 0;
  290. if (!rx_queue->refill_enabled)
  291. return;
  292. /* Calculate current fill level, and exit if we don't need to fill */
  293. fill_level = (rx_queue->added_count - rx_queue->removed_count);
  294. EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
  295. if (fill_level >= rx_queue->fast_fill_trigger)
  296. goto out;
  297. /* Record minimum fill level */
  298. if (unlikely(fill_level < rx_queue->min_fill)) {
  299. if (fill_level)
  300. rx_queue->min_fill = fill_level;
  301. }
  302. batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  303. space = rx_queue->max_fill - fill_level;
  304. EFX_BUG_ON_PARANOID(space < batch_size);
  305. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  306. "RX queue %d fast-filling descriptor ring from"
  307. " level %d to level %d\n",
  308. efx_rx_queue_index(rx_queue), fill_level,
  309. rx_queue->max_fill);
  310. do {
  311. rc = efx_init_rx_buffers(rx_queue);
  312. if (unlikely(rc)) {
  313. /* Ensure that we don't leave the rx queue empty */
  314. if (rx_queue->added_count == rx_queue->removed_count)
  315. efx_schedule_slow_fill(rx_queue);
  316. goto out;
  317. }
  318. } while ((space -= batch_size) >= batch_size);
  319. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  320. "RX queue %d fast-filled descriptor ring "
  321. "to level %d\n", efx_rx_queue_index(rx_queue),
  322. rx_queue->added_count - rx_queue->removed_count);
  323. out:
  324. if (rx_queue->notified_count != rx_queue->added_count)
  325. efx_nic_notify_rx_desc(rx_queue);
  326. }
  327. void efx_rx_slow_fill(unsigned long context)
  328. {
  329. struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
  330. /* Post an event to cause NAPI to run and refill the queue */
  331. efx_nic_generate_fill_event(rx_queue);
  332. ++rx_queue->slow_fill_count;
  333. }
  334. static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  335. struct efx_rx_buffer *rx_buf,
  336. int len)
  337. {
  338. struct efx_nic *efx = rx_queue->efx;
  339. unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
  340. if (likely(len <= max_len))
  341. return;
  342. /* The packet must be discarded, but this is only a fatal error
  343. * if the caller indicated it was
  344. */
  345. rx_buf->flags |= EFX_RX_PKT_DISCARD;
  346. if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
  347. if (net_ratelimit())
  348. netif_err(efx, rx_err, efx->net_dev,
  349. " RX queue %d seriously overlength "
  350. "RX event (0x%x > 0x%x+0x%x). Leaking\n",
  351. efx_rx_queue_index(rx_queue), len, max_len,
  352. efx->type->rx_buffer_padding);
  353. efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
  354. } else {
  355. if (net_ratelimit())
  356. netif_err(efx, rx_err, efx->net_dev,
  357. " RX queue %d overlength RX event "
  358. "(0x%x > 0x%x)\n",
  359. efx_rx_queue_index(rx_queue), len, max_len);
  360. }
  361. efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
  362. }
  363. /* Pass a received packet up through GRO. GRO can handle pages
  364. * regardless of checksum state and skbs with a good checksum.
  365. */
  366. static void
  367. efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
  368. unsigned int n_frags, u8 *eh)
  369. {
  370. struct napi_struct *napi = &channel->napi_str;
  371. gro_result_t gro_result;
  372. struct efx_nic *efx = channel->efx;
  373. struct sk_buff *skb;
  374. skb = napi_get_frags(napi);
  375. if (unlikely(!skb)) {
  376. while (n_frags--) {
  377. put_page(rx_buf->page);
  378. rx_buf->page = NULL;
  379. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  380. }
  381. return;
  382. }
  383. if (efx->net_dev->features & NETIF_F_RXHASH)
  384. skb->rxhash = efx_rx_buf_hash(efx, eh);
  385. skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
  386. CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
  387. for (;;) {
  388. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  389. rx_buf->page, rx_buf->page_offset,
  390. rx_buf->len);
  391. rx_buf->page = NULL;
  392. skb->len += rx_buf->len;
  393. if (skb_shinfo(skb)->nr_frags == n_frags)
  394. break;
  395. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  396. }
  397. skb->data_len = skb->len;
  398. skb->truesize += n_frags * efx->rx_buffer_truesize;
  399. skb_record_rx_queue(skb, channel->rx_queue.core_index);
  400. gro_result = napi_gro_frags(napi);
  401. if (gro_result != GRO_DROP)
  402. channel->irq_mod_score += 2;
  403. }
  404. /* Allocate and construct an SKB around page fragments */
  405. static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
  406. struct efx_rx_buffer *rx_buf,
  407. unsigned int n_frags,
  408. u8 *eh, int hdr_len)
  409. {
  410. struct efx_nic *efx = channel->efx;
  411. struct sk_buff *skb;
  412. /* Allocate an SKB to store the headers */
  413. skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
  414. if (unlikely(skb == NULL))
  415. return NULL;
  416. EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
  417. skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
  418. memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
  419. /* Append the remaining page(s) onto the frag list */
  420. if (rx_buf->len > hdr_len) {
  421. rx_buf->page_offset += hdr_len;
  422. rx_buf->len -= hdr_len;
  423. for (;;) {
  424. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  425. rx_buf->page, rx_buf->page_offset,
  426. rx_buf->len);
  427. rx_buf->page = NULL;
  428. skb->len += rx_buf->len;
  429. skb->data_len += rx_buf->len;
  430. if (skb_shinfo(skb)->nr_frags == n_frags)
  431. break;
  432. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  433. }
  434. } else {
  435. __free_pages(rx_buf->page, efx->rx_buffer_order);
  436. rx_buf->page = NULL;
  437. n_frags = 0;
  438. }
  439. skb->truesize += n_frags * efx->rx_buffer_truesize;
  440. /* Move past the ethernet header */
  441. skb->protocol = eth_type_trans(skb, efx->net_dev);
  442. return skb;
  443. }
  444. void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
  445. unsigned int n_frags, unsigned int len, u16 flags)
  446. {
  447. struct efx_nic *efx = rx_queue->efx;
  448. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  449. struct efx_rx_buffer *rx_buf;
  450. rx_buf = efx_rx_buffer(rx_queue, index);
  451. rx_buf->flags |= flags;
  452. /* Validate the number of fragments and completed length */
  453. if (n_frags == 1) {
  454. if (!(flags & EFX_RX_PKT_PREFIX_LEN))
  455. efx_rx_packet__check_len(rx_queue, rx_buf, len);
  456. } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
  457. unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
  458. unlikely(len > n_frags * efx->rx_dma_len) ||
  459. unlikely(!efx->rx_scatter)) {
  460. /* If this isn't an explicit discard request, either
  461. * the hardware or the driver is broken.
  462. */
  463. WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
  464. rx_buf->flags |= EFX_RX_PKT_DISCARD;
  465. }
  466. netif_vdbg(efx, rx_status, efx->net_dev,
  467. "RX queue %d received ids %x-%x len %d %s%s\n",
  468. efx_rx_queue_index(rx_queue), index,
  469. (index + n_frags - 1) & rx_queue->ptr_mask, len,
  470. (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
  471. (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
  472. /* Discard packet, if instructed to do so. Process the
  473. * previous receive first.
  474. */
  475. if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
  476. efx_rx_flush_packet(channel);
  477. efx_discard_rx_packet(channel, rx_buf, n_frags);
  478. return;
  479. }
  480. if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
  481. rx_buf->len = len;
  482. /* Release and/or sync the DMA mapping - assumes all RX buffers
  483. * consumed in-order per RX queue.
  484. */
  485. efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
  486. /* Prefetch nice and early so data will (hopefully) be in cache by
  487. * the time we look at it.
  488. */
  489. prefetch(efx_rx_buf_va(rx_buf));
  490. rx_buf->page_offset += efx->rx_prefix_size;
  491. rx_buf->len -= efx->rx_prefix_size;
  492. if (n_frags > 1) {
  493. /* Release/sync DMA mapping for additional fragments.
  494. * Fix length for last fragment.
  495. */
  496. unsigned int tail_frags = n_frags - 1;
  497. for (;;) {
  498. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  499. if (--tail_frags == 0)
  500. break;
  501. efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
  502. }
  503. rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
  504. efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
  505. }
  506. /* All fragments have been DMA-synced, so recycle pages. */
  507. rx_buf = efx_rx_buffer(rx_queue, index);
  508. efx_recycle_rx_pages(channel, rx_buf, n_frags);
  509. /* Pipeline receives so that we give time for packet headers to be
  510. * prefetched into cache.
  511. */
  512. efx_rx_flush_packet(channel);
  513. channel->rx_pkt_n_frags = n_frags;
  514. channel->rx_pkt_index = index;
  515. }
  516. static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
  517. struct efx_rx_buffer *rx_buf,
  518. unsigned int n_frags)
  519. {
  520. struct sk_buff *skb;
  521. u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
  522. skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
  523. if (unlikely(skb == NULL)) {
  524. efx_free_rx_buffer(rx_buf);
  525. return;
  526. }
  527. skb_record_rx_queue(skb, channel->rx_queue.core_index);
  528. /* Set the SKB flags */
  529. skb_checksum_none_assert(skb);
  530. if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
  531. skb->ip_summed = CHECKSUM_UNNECESSARY;
  532. if (channel->type->receive_skb)
  533. if (channel->type->receive_skb(channel, skb))
  534. return;
  535. /* Pass the packet up */
  536. netif_receive_skb(skb);
  537. }
  538. /* Handle a received packet. Second half: Touches packet payload. */
  539. void __efx_rx_packet(struct efx_channel *channel)
  540. {
  541. struct efx_nic *efx = channel->efx;
  542. struct efx_rx_buffer *rx_buf =
  543. efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
  544. u8 *eh = efx_rx_buf_va(rx_buf);
  545. /* Read length from the prefix if necessary. This already
  546. * excludes the length of the prefix itself.
  547. */
  548. if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
  549. rx_buf->len = le16_to_cpup((__le16 *)
  550. (eh + efx->rx_packet_len_offset));
  551. /* If we're in loopback test, then pass the packet directly to the
  552. * loopback layer, and free the rx_buf here
  553. */
  554. if (unlikely(efx->loopback_selftest)) {
  555. efx_loopback_rx_packet(efx, eh, rx_buf->len);
  556. efx_free_rx_buffer(rx_buf);
  557. goto out;
  558. }
  559. if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
  560. rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
  561. if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
  562. efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
  563. else
  564. efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
  565. out:
  566. channel->rx_pkt_n_frags = 0;
  567. }
  568. int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
  569. {
  570. struct efx_nic *efx = rx_queue->efx;
  571. unsigned int entries;
  572. int rc;
  573. /* Create the smallest power-of-two aligned ring */
  574. entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
  575. EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
  576. rx_queue->ptr_mask = entries - 1;
  577. netif_dbg(efx, probe, efx->net_dev,
  578. "creating RX queue %d size %#x mask %#x\n",
  579. efx_rx_queue_index(rx_queue), efx->rxq_entries,
  580. rx_queue->ptr_mask);
  581. /* Allocate RX buffers */
  582. rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
  583. GFP_KERNEL);
  584. if (!rx_queue->buffer)
  585. return -ENOMEM;
  586. rc = efx_nic_probe_rx(rx_queue);
  587. if (rc) {
  588. kfree(rx_queue->buffer);
  589. rx_queue->buffer = NULL;
  590. }
  591. return rc;
  592. }
  593. static void efx_init_rx_recycle_ring(struct efx_nic *efx,
  594. struct efx_rx_queue *rx_queue)
  595. {
  596. unsigned int bufs_in_recycle_ring, page_ring_size;
  597. /* Set the RX recycle ring size */
  598. #ifdef CONFIG_PPC64
  599. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
  600. #else
  601. if (iommu_present(&pci_bus_type))
  602. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
  603. else
  604. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
  605. #endif /* CONFIG_PPC64 */
  606. page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
  607. efx->rx_bufs_per_page);
  608. rx_queue->page_ring = kcalloc(page_ring_size,
  609. sizeof(*rx_queue->page_ring), GFP_KERNEL);
  610. rx_queue->page_ptr_mask = page_ring_size - 1;
  611. }
  612. void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
  613. {
  614. struct efx_nic *efx = rx_queue->efx;
  615. unsigned int max_fill, trigger, max_trigger;
  616. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  617. "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
  618. /* Initialise ptr fields */
  619. rx_queue->added_count = 0;
  620. rx_queue->notified_count = 0;
  621. rx_queue->removed_count = 0;
  622. rx_queue->min_fill = -1U;
  623. efx_init_rx_recycle_ring(efx, rx_queue);
  624. rx_queue->page_remove = 0;
  625. rx_queue->page_add = rx_queue->page_ptr_mask + 1;
  626. rx_queue->page_recycle_count = 0;
  627. rx_queue->page_recycle_failed = 0;
  628. rx_queue->page_recycle_full = 0;
  629. /* Initialise limit fields */
  630. max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
  631. max_trigger =
  632. max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  633. if (rx_refill_threshold != 0) {
  634. trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
  635. if (trigger > max_trigger)
  636. trigger = max_trigger;
  637. } else {
  638. trigger = max_trigger;
  639. }
  640. rx_queue->max_fill = max_fill;
  641. rx_queue->fast_fill_trigger = trigger;
  642. rx_queue->refill_enabled = true;
  643. /* Set up RX descriptor ring */
  644. efx_nic_init_rx(rx_queue);
  645. }
  646. void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
  647. {
  648. int i;
  649. struct efx_nic *efx = rx_queue->efx;
  650. struct efx_rx_buffer *rx_buf;
  651. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  652. "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
  653. del_timer_sync(&rx_queue->slow_fill);
  654. /* Release RX buffers from the current read ptr to the write ptr */
  655. if (rx_queue->buffer) {
  656. for (i = rx_queue->removed_count; i < rx_queue->added_count;
  657. i++) {
  658. unsigned index = i & rx_queue->ptr_mask;
  659. rx_buf = efx_rx_buffer(rx_queue, index);
  660. efx_fini_rx_buffer(rx_queue, rx_buf);
  661. }
  662. }
  663. /* Unmap and release the pages in the recycle ring. Remove the ring. */
  664. for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
  665. struct page *page = rx_queue->page_ring[i];
  666. struct efx_rx_page_state *state;
  667. if (page == NULL)
  668. continue;
  669. state = page_address(page);
  670. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  671. PAGE_SIZE << efx->rx_buffer_order,
  672. DMA_FROM_DEVICE);
  673. put_page(page);
  674. }
  675. kfree(rx_queue->page_ring);
  676. rx_queue->page_ring = NULL;
  677. }
  678. void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
  679. {
  680. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  681. "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
  682. efx_nic_remove_rx(rx_queue);
  683. kfree(rx_queue->buffer);
  684. rx_queue->buffer = NULL;
  685. }
  686. module_param(rx_refill_threshold, uint, 0444);
  687. MODULE_PARM_DESC(rx_refill_threshold,
  688. "RX descriptor ring refill threshold (%)");
  689. #ifdef CONFIG_RFS_ACCEL
  690. int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
  691. u16 rxq_index, u32 flow_id)
  692. {
  693. struct efx_nic *efx = netdev_priv(net_dev);
  694. struct efx_channel *channel;
  695. struct efx_filter_spec spec;
  696. const struct iphdr *ip;
  697. const __be16 *ports;
  698. int nhoff;
  699. int rc;
  700. nhoff = skb_network_offset(skb);
  701. if (skb->protocol == htons(ETH_P_8021Q)) {
  702. EFX_BUG_ON_PARANOID(skb_headlen(skb) <
  703. nhoff + sizeof(struct vlan_hdr));
  704. if (((const struct vlan_hdr *)skb->data + nhoff)->
  705. h_vlan_encapsulated_proto != htons(ETH_P_IP))
  706. return -EPROTONOSUPPORT;
  707. /* This is IP over 802.1q VLAN. We can't filter on the
  708. * IP 5-tuple and the vlan together, so just strip the
  709. * vlan header and filter on the IP part.
  710. */
  711. nhoff += sizeof(struct vlan_hdr);
  712. } else if (skb->protocol != htons(ETH_P_IP)) {
  713. return -EPROTONOSUPPORT;
  714. }
  715. /* RFS must validate the IP header length before calling us */
  716. EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
  717. ip = (const struct iphdr *)(skb->data + nhoff);
  718. if (ip_is_fragment(ip))
  719. return -EPROTONOSUPPORT;
  720. EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
  721. ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
  722. efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
  723. efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
  724. rxq_index);
  725. rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
  726. ip->daddr, ports[1], ip->saddr, ports[0]);
  727. if (rc)
  728. return rc;
  729. rc = efx->type->filter_rfs_insert(efx, &spec);
  730. if (rc < 0)
  731. return rc;
  732. /* Remember this so we can check whether to expire the filter later */
  733. efx->rps_flow_id[rc] = flow_id;
  734. channel = efx_get_channel(efx, skb_get_rx_queue(skb));
  735. ++channel->rfs_filters_added;
  736. netif_info(efx, rx_status, efx->net_dev,
  737. "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
  738. (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
  739. &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
  740. rxq_index, flow_id, rc);
  741. return rc;
  742. }
  743. bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
  744. {
  745. bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
  746. unsigned int index, size;
  747. u32 flow_id;
  748. if (!spin_trylock_bh(&efx->filter_lock))
  749. return false;
  750. expire_one = efx->type->filter_rfs_expire_one;
  751. index = efx->rps_expire_index;
  752. size = efx->type->max_rx_ip_filters;
  753. while (quota--) {
  754. flow_id = efx->rps_flow_id[index];
  755. if (expire_one(efx, flow_id, index))
  756. netif_info(efx, rx_status, efx->net_dev,
  757. "expired filter %d [flow %u]\n",
  758. index, flow_id);
  759. if (++index == size)
  760. index = 0;
  761. }
  762. efx->rps_expire_index = index;
  763. spin_unlock_bh(&efx->filter_lock);
  764. return true;
  765. }
  766. #endif /* CONFIG_RFS_ACCEL */
  767. /**
  768. * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
  769. * @spec: Specification to test
  770. *
  771. * Return: %true if the specification is a non-drop RX filter that
  772. * matches a local MAC address I/G bit value of 1 or matches a local
  773. * IPv4 or IPv6 address value in the respective multicast address
  774. * range. Otherwise %false.
  775. */
  776. bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
  777. {
  778. if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
  779. spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  780. return false;
  781. if (spec->match_flags &
  782. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
  783. is_multicast_ether_addr(spec->loc_mac))
  784. return true;
  785. if ((spec->match_flags &
  786. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
  787. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
  788. if (spec->ether_type == htons(ETH_P_IP) &&
  789. ipv4_is_multicast(spec->loc_host[0]))
  790. return true;
  791. if (spec->ether_type == htons(ETH_P_IPV6) &&
  792. ((const u8 *)spec->loc_host)[0] == 0xff)
  793. return true;
  794. }
  795. return false;
  796. }