en_tx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <asm/page.h>
  34. #include <linux/mlx4/cq.h>
  35. #include <linux/mlx4/qp.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/vmalloc.h>
  39. #include "mlx4_en.h"
  40. enum {
  41. MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
  42. };
  43. static int inline_thold __read_mostly = MAX_INLINE;
  44. module_param_named(inline_thold, inline_thold, int, 0444);
  45. MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
  46. int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
  47. struct mlx4_en_tx_ring *ring, u32 size,
  48. u16 stride)
  49. {
  50. struct mlx4_en_dev *mdev = priv->mdev;
  51. int tmp;
  52. int err;
  53. ring->size = size;
  54. ring->size_mask = size - 1;
  55. ring->stride = stride;
  56. inline_thold = min(inline_thold, MAX_INLINE);
  57. spin_lock_init(&ring->comp_lock);
  58. tmp = size * sizeof(struct mlx4_en_tx_info);
  59. ring->tx_info = vmalloc(tmp);
  60. if (!ring->tx_info) {
  61. en_err(priv, "Failed allocating tx_info ring\n");
  62. return -ENOMEM;
  63. }
  64. en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
  65. ring->tx_info, tmp);
  66. ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
  67. if (!ring->bounce_buf) {
  68. en_err(priv, "Failed allocating bounce buffer\n");
  69. err = -ENOMEM;
  70. goto err_tx;
  71. }
  72. ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
  73. err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
  74. 2 * PAGE_SIZE);
  75. if (err) {
  76. en_err(priv, "Failed allocating hwq resources\n");
  77. goto err_bounce;
  78. }
  79. err = mlx4_en_map_buffer(&ring->wqres.buf);
  80. if (err) {
  81. en_err(priv, "Failed to map TX buffer\n");
  82. goto err_hwq_res;
  83. }
  84. ring->buf = ring->wqres.buf.direct.buf;
  85. en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
  86. "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
  87. ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
  88. err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
  89. if (err) {
  90. en_err(priv, "Failed reserving qp for tx ring.\n");
  91. goto err_map;
  92. }
  93. err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
  94. if (err) {
  95. en_err(priv, "Failed allocating qp %d\n", ring->qpn);
  96. goto err_reserve;
  97. }
  98. ring->qp.event = mlx4_en_sqp_event;
  99. return 0;
  100. err_reserve:
  101. mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
  102. err_map:
  103. mlx4_en_unmap_buffer(&ring->wqres.buf);
  104. err_hwq_res:
  105. mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
  106. err_bounce:
  107. kfree(ring->bounce_buf);
  108. ring->bounce_buf = NULL;
  109. err_tx:
  110. vfree(ring->tx_info);
  111. ring->tx_info = NULL;
  112. return err;
  113. }
  114. void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
  115. struct mlx4_en_tx_ring *ring)
  116. {
  117. struct mlx4_en_dev *mdev = priv->mdev;
  118. en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
  119. mlx4_qp_remove(mdev->dev, &ring->qp);
  120. mlx4_qp_free(mdev->dev, &ring->qp);
  121. mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
  122. mlx4_en_unmap_buffer(&ring->wqres.buf);
  123. mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
  124. kfree(ring->bounce_buf);
  125. ring->bounce_buf = NULL;
  126. vfree(ring->tx_info);
  127. ring->tx_info = NULL;
  128. }
  129. int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
  130. struct mlx4_en_tx_ring *ring,
  131. int cq, int srqn)
  132. {
  133. struct mlx4_en_dev *mdev = priv->mdev;
  134. int err;
  135. ring->cqn = cq;
  136. ring->prod = 0;
  137. ring->cons = 0xffffffff;
  138. ring->last_nr_txbb = 1;
  139. ring->poll_cnt = 0;
  140. ring->blocked = 0;
  141. memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
  142. memset(ring->buf, 0, ring->buf_size);
  143. ring->qp_state = MLX4_QP_STATE_RST;
  144. ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
  145. mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
  146. ring->cqn, srqn, &ring->context);
  147. err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
  148. &ring->qp, &ring->qp_state);
  149. return err;
  150. }
  151. void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
  152. struct mlx4_en_tx_ring *ring)
  153. {
  154. struct mlx4_en_dev *mdev = priv->mdev;
  155. mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
  156. MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
  157. }
  158. static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
  159. struct mlx4_en_tx_ring *ring,
  160. int index, u8 owner)
  161. {
  162. struct mlx4_en_dev *mdev = priv->mdev;
  163. struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
  164. struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
  165. struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
  166. struct sk_buff *skb = tx_info->skb;
  167. struct skb_frag_struct *frag;
  168. void *end = ring->buf + ring->buf_size;
  169. int frags = skb_shinfo(skb)->nr_frags;
  170. int i;
  171. __be32 *ptr = (__be32 *)tx_desc;
  172. __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
  173. /* Optimize the common case when there are no wraparounds */
  174. if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
  175. if (!tx_info->inl) {
  176. if (tx_info->linear) {
  177. pci_unmap_single(mdev->pdev,
  178. (dma_addr_t) be64_to_cpu(data->addr),
  179. be32_to_cpu(data->byte_count),
  180. PCI_DMA_TODEVICE);
  181. ++data;
  182. }
  183. for (i = 0; i < frags; i++) {
  184. frag = &skb_shinfo(skb)->frags[i];
  185. pci_unmap_page(mdev->pdev,
  186. (dma_addr_t) be64_to_cpu(data[i].addr),
  187. frag->size, PCI_DMA_TODEVICE);
  188. }
  189. }
  190. /* Stamp the freed descriptor */
  191. for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
  192. *ptr = stamp;
  193. ptr += STAMP_DWORDS;
  194. }
  195. } else {
  196. if (!tx_info->inl) {
  197. if ((void *) data >= end) {
  198. data = (struct mlx4_wqe_data_seg *)
  199. (ring->buf + ((void *) data - end));
  200. }
  201. if (tx_info->linear) {
  202. pci_unmap_single(mdev->pdev,
  203. (dma_addr_t) be64_to_cpu(data->addr),
  204. be32_to_cpu(data->byte_count),
  205. PCI_DMA_TODEVICE);
  206. ++data;
  207. }
  208. for (i = 0; i < frags; i++) {
  209. /* Check for wraparound before unmapping */
  210. if ((void *) data >= end)
  211. data = (struct mlx4_wqe_data_seg *) ring->buf;
  212. frag = &skb_shinfo(skb)->frags[i];
  213. pci_unmap_page(mdev->pdev,
  214. (dma_addr_t) be64_to_cpu(data->addr),
  215. frag->size, PCI_DMA_TODEVICE);
  216. }
  217. }
  218. /* Stamp the freed descriptor */
  219. for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
  220. *ptr = stamp;
  221. ptr += STAMP_DWORDS;
  222. if ((void *) ptr >= end) {
  223. ptr = ring->buf;
  224. stamp ^= cpu_to_be32(0x80000000);
  225. }
  226. }
  227. }
  228. dev_kfree_skb_any(skb);
  229. return tx_info->nr_txbb;
  230. }
  231. int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
  232. {
  233. struct mlx4_en_priv *priv = netdev_priv(dev);
  234. int cnt = 0;
  235. /* Skip last polled descriptor */
  236. ring->cons += ring->last_nr_txbb;
  237. en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
  238. ring->cons, ring->prod);
  239. if ((u32) (ring->prod - ring->cons) > ring->size) {
  240. if (netif_msg_tx_err(priv))
  241. en_warn(priv, "Tx consumer passed producer!\n");
  242. return 0;
  243. }
  244. while (ring->cons != ring->prod) {
  245. ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
  246. ring->cons & ring->size_mask,
  247. !!(ring->cons & ring->size));
  248. ring->cons += ring->last_nr_txbb;
  249. cnt++;
  250. }
  251. if (cnt)
  252. en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
  253. return cnt;
  254. }
  255. static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
  256. {
  257. struct mlx4_en_priv *priv = netdev_priv(dev);
  258. struct mlx4_cq *mcq = &cq->mcq;
  259. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  260. struct mlx4_cqe *cqe = cq->buf;
  261. u16 index;
  262. u16 new_index;
  263. u32 txbbs_skipped = 0;
  264. u32 cq_last_sav;
  265. /* index always points to the first TXBB of the last polled descriptor */
  266. index = ring->cons & ring->size_mask;
  267. new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
  268. if (index == new_index)
  269. return;
  270. if (!priv->port_up)
  271. return;
  272. /*
  273. * We use a two-stage loop:
  274. * - the first samples the HW-updated CQE
  275. * - the second frees TXBBs until the last sample
  276. * This lets us amortize CQE cache misses, while still polling the CQ
  277. * until is quiescent.
  278. */
  279. cq_last_sav = mcq->cons_index;
  280. do {
  281. do {
  282. /* Skip over last polled CQE */
  283. index = (index + ring->last_nr_txbb) & ring->size_mask;
  284. txbbs_skipped += ring->last_nr_txbb;
  285. /* Poll next CQE */
  286. ring->last_nr_txbb = mlx4_en_free_tx_desc(
  287. priv, ring, index,
  288. !!((ring->cons + txbbs_skipped) &
  289. ring->size));
  290. ++mcq->cons_index;
  291. } while (index != new_index);
  292. new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
  293. } while (index != new_index);
  294. AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
  295. (u32) (mcq->cons_index - cq_last_sav));
  296. /*
  297. * To prevent CQ overflow we first update CQ consumer and only then
  298. * the ring consumer.
  299. */
  300. mlx4_cq_set_ci(mcq);
  301. wmb();
  302. ring->cons += txbbs_skipped;
  303. /* Wakeup Tx queue if this ring stopped it */
  304. if (unlikely(ring->blocked)) {
  305. if ((u32) (ring->prod - ring->cons) <=
  306. ring->size - HEADROOM - MAX_DESC_TXBBS) {
  307. ring->blocked = 0;
  308. netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
  309. priv->port_stats.wake_queue++;
  310. }
  311. }
  312. }
  313. void mlx4_en_tx_irq(struct mlx4_cq *mcq)
  314. {
  315. struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
  316. struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  317. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  318. if (!spin_trylock(&ring->comp_lock))
  319. return;
  320. mlx4_en_process_tx_cq(cq->dev, cq);
  321. mod_timer(&cq->timer, jiffies + 1);
  322. spin_unlock(&ring->comp_lock);
  323. }
  324. void mlx4_en_poll_tx_cq(unsigned long data)
  325. {
  326. struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
  327. struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  328. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  329. u32 inflight;
  330. INC_PERF_COUNTER(priv->pstats.tx_poll);
  331. if (!spin_trylock_irq(&ring->comp_lock)) {
  332. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  333. return;
  334. }
  335. mlx4_en_process_tx_cq(cq->dev, cq);
  336. inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
  337. /* If there are still packets in flight and the timer has not already
  338. * been scheduled by the Tx routine then schedule it here to guarantee
  339. * completion processing of these packets */
  340. if (inflight && priv->port_up)
  341. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  342. spin_unlock_irq(&ring->comp_lock);
  343. }
  344. static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
  345. struct mlx4_en_tx_ring *ring,
  346. u32 index,
  347. unsigned int desc_size)
  348. {
  349. u32 copy = (ring->size - index) * TXBB_SIZE;
  350. int i;
  351. for (i = desc_size - copy - 4; i >= 0; i -= 4) {
  352. if ((i & (TXBB_SIZE - 1)) == 0)
  353. wmb();
  354. *((u32 *) (ring->buf + i)) =
  355. *((u32 *) (ring->bounce_buf + copy + i));
  356. }
  357. for (i = copy - 4; i >= 4 ; i -= 4) {
  358. if ((i & (TXBB_SIZE - 1)) == 0)
  359. wmb();
  360. *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
  361. *((u32 *) (ring->bounce_buf + i));
  362. }
  363. /* Return real descriptor location */
  364. return ring->buf + index * TXBB_SIZE;
  365. }
  366. static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
  367. {
  368. struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
  369. struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
  370. /* If we don't have a pending timer, set one up to catch our recent
  371. post in case the interface becomes idle */
  372. if (!timer_pending(&cq->timer))
  373. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  374. /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
  375. if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
  376. if (spin_trylock_irq(&ring->comp_lock)) {
  377. mlx4_en_process_tx_cq(priv->dev, cq);
  378. spin_unlock_irq(&ring->comp_lock);
  379. }
  380. }
  381. static void *get_frag_ptr(struct sk_buff *skb)
  382. {
  383. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  384. struct page *page = frag->page;
  385. void *ptr;
  386. ptr = page_address(page);
  387. if (unlikely(!ptr))
  388. return NULL;
  389. return ptr + frag->page_offset;
  390. }
  391. static int is_inline(struct sk_buff *skb, void **pfrag)
  392. {
  393. void *ptr;
  394. if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
  395. if (skb_shinfo(skb)->nr_frags == 1) {
  396. ptr = get_frag_ptr(skb);
  397. if (unlikely(!ptr))
  398. return 0;
  399. if (pfrag)
  400. *pfrag = ptr;
  401. return 1;
  402. } else if (unlikely(skb_shinfo(skb)->nr_frags))
  403. return 0;
  404. else
  405. return 1;
  406. }
  407. return 0;
  408. }
  409. static int inline_size(struct sk_buff *skb)
  410. {
  411. if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
  412. <= MLX4_INLINE_ALIGN)
  413. return ALIGN(skb->len + CTRL_SIZE +
  414. sizeof(struct mlx4_wqe_inline_seg), 16);
  415. else
  416. return ALIGN(skb->len + CTRL_SIZE + 2 *
  417. sizeof(struct mlx4_wqe_inline_seg), 16);
  418. }
  419. static int get_real_size(struct sk_buff *skb, struct net_device *dev,
  420. int *lso_header_size)
  421. {
  422. struct mlx4_en_priv *priv = netdev_priv(dev);
  423. int real_size;
  424. if (skb_is_gso(skb)) {
  425. *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
  426. real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
  427. ALIGN(*lso_header_size + 4, DS_SIZE);
  428. if (unlikely(*lso_header_size != skb_headlen(skb))) {
  429. /* We add a segment for the skb linear buffer only if
  430. * it contains data */
  431. if (*lso_header_size < skb_headlen(skb))
  432. real_size += DS_SIZE;
  433. else {
  434. if (netif_msg_tx_err(priv))
  435. en_warn(priv, "Non-linear headers\n");
  436. return 0;
  437. }
  438. }
  439. } else {
  440. *lso_header_size = 0;
  441. if (!is_inline(skb, NULL))
  442. real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
  443. else
  444. real_size = inline_size(skb);
  445. }
  446. return real_size;
  447. }
  448. static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
  449. int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
  450. {
  451. struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
  452. int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
  453. if (skb->len <= spc) {
  454. inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
  455. skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
  456. if (skb_shinfo(skb)->nr_frags)
  457. memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
  458. skb_shinfo(skb)->frags[0].size);
  459. } else {
  460. inl->byte_count = cpu_to_be32(1 << 31 | spc);
  461. if (skb_headlen(skb) <= spc) {
  462. skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
  463. if (skb_headlen(skb) < spc) {
  464. memcpy(((void *)(inl + 1)) + skb_headlen(skb),
  465. fragptr, spc - skb_headlen(skb));
  466. fragptr += spc - skb_headlen(skb);
  467. }
  468. inl = (void *) (inl + 1) + spc;
  469. memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
  470. } else {
  471. skb_copy_from_linear_data(skb, inl + 1, spc);
  472. inl = (void *) (inl + 1) + spc;
  473. skb_copy_from_linear_data_offset(skb, spc, inl + 1,
  474. skb_headlen(skb) - spc);
  475. if (skb_shinfo(skb)->nr_frags)
  476. memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
  477. fragptr, skb_shinfo(skb)->frags[0].size);
  478. }
  479. wmb();
  480. inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
  481. }
  482. tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
  483. tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
  484. tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
  485. }
  486. u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
  487. {
  488. struct mlx4_en_priv *priv = netdev_priv(dev);
  489. u16 vlan_tag = 0;
  490. /* If we support per priority flow control and the packet contains
  491. * a vlan tag, send the packet to the TX ring assigned to that priority
  492. */
  493. if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
  494. vlan_tag = vlan_tx_tag_get(skb);
  495. return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
  496. }
  497. return skb_tx_hash(dev, skb);
  498. }
  499. int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
  500. {
  501. struct mlx4_en_priv *priv = netdev_priv(dev);
  502. struct mlx4_en_dev *mdev = priv->mdev;
  503. struct mlx4_en_tx_ring *ring;
  504. struct mlx4_en_cq *cq;
  505. struct mlx4_en_tx_desc *tx_desc;
  506. struct mlx4_wqe_data_seg *data;
  507. struct skb_frag_struct *frag;
  508. struct mlx4_en_tx_info *tx_info;
  509. int tx_ind = 0;
  510. int nr_txbb;
  511. int desc_size;
  512. int real_size;
  513. dma_addr_t dma;
  514. u32 index;
  515. __be32 op_own;
  516. u16 vlan_tag = 0;
  517. int i;
  518. int lso_header_size;
  519. void *fragptr;
  520. real_size = get_real_size(skb, dev, &lso_header_size);
  521. if (unlikely(!real_size))
  522. goto tx_drop;
  523. /* Allign descriptor to TXBB size */
  524. desc_size = ALIGN(real_size, TXBB_SIZE);
  525. nr_txbb = desc_size / TXBB_SIZE;
  526. if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
  527. if (netif_msg_tx_err(priv))
  528. en_warn(priv, "Oversized header or SG list\n");
  529. goto tx_drop;
  530. }
  531. tx_ind = skb->queue_mapping;
  532. ring = &priv->tx_ring[tx_ind];
  533. if (priv->vlgrp && vlan_tx_tag_present(skb))
  534. vlan_tag = vlan_tx_tag_get(skb);
  535. /* Check available TXBBs And 2K spare for prefetch */
  536. if (unlikely(((int)(ring->prod - ring->cons)) >
  537. ring->size - HEADROOM - MAX_DESC_TXBBS)) {
  538. /* every full Tx ring stops queue */
  539. netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
  540. ring->blocked = 1;
  541. priv->port_stats.queue_stopped++;
  542. /* Use interrupts to find out when queue opened */
  543. cq = &priv->tx_cq[tx_ind];
  544. mlx4_en_arm_cq(priv, cq);
  545. return NETDEV_TX_BUSY;
  546. }
  547. /* Track current inflight packets for performance analysis */
  548. AVG_PERF_COUNTER(priv->pstats.inflight_avg,
  549. (u32) (ring->prod - ring->cons - 1));
  550. /* Packet is good - grab an index and transmit it */
  551. index = ring->prod & ring->size_mask;
  552. /* See if we have enough space for whole descriptor TXBB for setting
  553. * SW ownership on next descriptor; if not, use a bounce buffer. */
  554. if (likely(index + nr_txbb <= ring->size))
  555. tx_desc = ring->buf + index * TXBB_SIZE;
  556. else
  557. tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
  558. /* Save skb in tx_info ring */
  559. tx_info = &ring->tx_info[index];
  560. tx_info->skb = skb;
  561. tx_info->nr_txbb = nr_txbb;
  562. /* Prepare ctrl segement apart opcode+ownership, which depends on
  563. * whether LSO is used */
  564. tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
  565. tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
  566. tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
  567. tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
  568. MLX4_WQE_CTRL_SOLICITED);
  569. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  570. tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
  571. MLX4_WQE_CTRL_TCP_UDP_CSUM);
  572. priv->port_stats.tx_chksum_offload++;
  573. }
  574. /* Handle LSO (TSO) packets */
  575. if (lso_header_size) {
  576. /* Mark opcode as LSO */
  577. op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
  578. ((ring->prod & ring->size) ?
  579. cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
  580. /* Fill in the LSO prefix */
  581. tx_desc->lso.mss_hdr_size = cpu_to_be32(
  582. skb_shinfo(skb)->gso_size << 16 | lso_header_size);
  583. /* Copy headers;
  584. * note that we already verified that it is linear */
  585. memcpy(tx_desc->lso.header, skb->data, lso_header_size);
  586. data = ((void *) &tx_desc->lso +
  587. ALIGN(lso_header_size + 4, DS_SIZE));
  588. priv->port_stats.tso_packets++;
  589. i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
  590. !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
  591. ring->bytes += skb->len + (i - 1) * lso_header_size;
  592. ring->packets += i;
  593. } else {
  594. /* Normal (Non LSO) packet */
  595. op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
  596. ((ring->prod & ring->size) ?
  597. cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
  598. data = &tx_desc->data;
  599. ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
  600. ring->packets++;
  601. }
  602. AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
  603. /* valid only for none inline segments */
  604. tx_info->data_offset = (void *) data - (void *) tx_desc;
  605. tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
  606. data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
  607. if (!is_inline(skb, &fragptr)) {
  608. /* Map fragments */
  609. for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
  610. frag = &skb_shinfo(skb)->frags[i];
  611. dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
  612. frag->size, PCI_DMA_TODEVICE);
  613. data->addr = cpu_to_be64(dma);
  614. data->lkey = cpu_to_be32(mdev->mr.key);
  615. wmb();
  616. data->byte_count = cpu_to_be32(frag->size);
  617. --data;
  618. }
  619. /* Map linear part */
  620. if (tx_info->linear) {
  621. dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
  622. skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
  623. data->addr = cpu_to_be64(dma);
  624. data->lkey = cpu_to_be32(mdev->mr.key);
  625. wmb();
  626. data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
  627. }
  628. tx_info->inl = 0;
  629. } else {
  630. build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
  631. tx_info->inl = 1;
  632. }
  633. ring->prod += nr_txbb;
  634. /* If we used a bounce buffer then copy descriptor back into place */
  635. if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
  636. tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
  637. /* Run destructor before passing skb to HW */
  638. if (likely(!skb_shared(skb)))
  639. skb_orphan(skb);
  640. /* Ensure new descirptor hits memory
  641. * before setting ownership of this descriptor to HW */
  642. wmb();
  643. tx_desc->ctrl.owner_opcode = op_own;
  644. /* Ring doorbell! */
  645. wmb();
  646. writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
  647. /* Poll CQ here */
  648. mlx4_en_xmit_poll(priv, tx_ind);
  649. return 0;
  650. tx_drop:
  651. dev_kfree_skb_any(skb);
  652. priv->stats.tx_dropped++;
  653. return NETDEV_TX_OK;
  654. }