en_tx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <asm/page.h>
  34. #include <linux/mlx4/cq.h>
  35. #include <linux/mlx4/qp.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/vmalloc.h>
  39. #include "mlx4_en.h"
  40. enum {
  41. MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
  42. };
  43. static int inline_thold __read_mostly = MAX_INLINE;
  44. module_param_named(inline_thold, inline_thold, int, 0444);
  45. MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
  46. int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
  47. struct mlx4_en_tx_ring *ring, u32 size,
  48. u16 stride)
  49. {
  50. struct mlx4_en_dev *mdev = priv->mdev;
  51. int tmp;
  52. int err;
  53. ring->size = size;
  54. ring->size_mask = size - 1;
  55. ring->stride = stride;
  56. inline_thold = min(inline_thold, MAX_INLINE);
  57. spin_lock_init(&ring->comp_lock);
  58. tmp = size * sizeof(struct mlx4_en_tx_info);
  59. ring->tx_info = vmalloc(tmp);
  60. if (!ring->tx_info) {
  61. mlx4_err(mdev, "Failed allocating tx_info ring\n");
  62. return -ENOMEM;
  63. }
  64. mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
  65. ring->tx_info, tmp);
  66. ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
  67. if (!ring->bounce_buf) {
  68. mlx4_err(mdev, "Failed allocating bounce buffer\n");
  69. err = -ENOMEM;
  70. goto err_tx;
  71. }
  72. ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
  73. err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
  74. 2 * PAGE_SIZE);
  75. if (err) {
  76. mlx4_err(mdev, "Failed allocating hwq resources\n");
  77. goto err_bounce;
  78. }
  79. err = mlx4_en_map_buffer(&ring->wqres.buf);
  80. if (err) {
  81. mlx4_err(mdev, "Failed to map TX buffer\n");
  82. goto err_hwq_res;
  83. }
  84. ring->buf = ring->wqres.buf.direct.buf;
  85. mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
  86. "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
  87. ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
  88. err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
  89. if (err) {
  90. mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
  91. goto err_map;
  92. }
  93. err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
  94. if (err) {
  95. mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
  96. goto err_reserve;
  97. }
  98. return 0;
  99. err_reserve:
  100. mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
  101. err_map:
  102. mlx4_en_unmap_buffer(&ring->wqres.buf);
  103. err_hwq_res:
  104. mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
  105. err_bounce:
  106. kfree(ring->bounce_buf);
  107. ring->bounce_buf = NULL;
  108. err_tx:
  109. vfree(ring->tx_info);
  110. ring->tx_info = NULL;
  111. return err;
  112. }
  113. void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
  114. struct mlx4_en_tx_ring *ring)
  115. {
  116. struct mlx4_en_dev *mdev = priv->mdev;
  117. mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
  118. mlx4_qp_remove(mdev->dev, &ring->qp);
  119. mlx4_qp_free(mdev->dev, &ring->qp);
  120. mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
  121. mlx4_en_unmap_buffer(&ring->wqres.buf);
  122. mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
  123. kfree(ring->bounce_buf);
  124. ring->bounce_buf = NULL;
  125. vfree(ring->tx_info);
  126. ring->tx_info = NULL;
  127. }
  128. int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
  129. struct mlx4_en_tx_ring *ring,
  130. int cq, int srqn)
  131. {
  132. struct mlx4_en_dev *mdev = priv->mdev;
  133. int err;
  134. ring->cqn = cq;
  135. ring->prod = 0;
  136. ring->cons = 0xffffffff;
  137. ring->last_nr_txbb = 1;
  138. ring->poll_cnt = 0;
  139. ring->blocked = 0;
  140. memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
  141. memset(ring->buf, 0, ring->buf_size);
  142. ring->qp_state = MLX4_QP_STATE_RST;
  143. ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
  144. mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
  145. ring->cqn, srqn, &ring->context);
  146. err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
  147. &ring->qp, &ring->qp_state);
  148. return err;
  149. }
  150. void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
  151. struct mlx4_en_tx_ring *ring)
  152. {
  153. struct mlx4_en_dev *mdev = priv->mdev;
  154. mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
  155. MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
  156. }
  157. static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
  158. struct mlx4_en_tx_ring *ring,
  159. int index, u8 owner)
  160. {
  161. struct mlx4_en_dev *mdev = priv->mdev;
  162. struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
  163. struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
  164. struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
  165. struct sk_buff *skb = tx_info->skb;
  166. struct skb_frag_struct *frag;
  167. void *end = ring->buf + ring->buf_size;
  168. int frags = skb_shinfo(skb)->nr_frags;
  169. int i;
  170. __be32 *ptr = (__be32 *)tx_desc;
  171. __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
  172. /* Optimize the common case when there are no wraparounds */
  173. if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
  174. if (!tx_info->inl) {
  175. if (tx_info->linear) {
  176. pci_unmap_single(mdev->pdev,
  177. (dma_addr_t) be64_to_cpu(data->addr),
  178. be32_to_cpu(data->byte_count),
  179. PCI_DMA_TODEVICE);
  180. ++data;
  181. }
  182. for (i = 0; i < frags; i++) {
  183. frag = &skb_shinfo(skb)->frags[i];
  184. pci_unmap_page(mdev->pdev,
  185. (dma_addr_t) be64_to_cpu(data[i].addr),
  186. frag->size, PCI_DMA_TODEVICE);
  187. }
  188. }
  189. /* Stamp the freed descriptor */
  190. for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
  191. *ptr = stamp;
  192. ptr += STAMP_DWORDS;
  193. }
  194. } else {
  195. if (!tx_info->inl) {
  196. if ((void *) data >= end) {
  197. data = (struct mlx4_wqe_data_seg *)
  198. (ring->buf + ((void *) data - end));
  199. }
  200. if (tx_info->linear) {
  201. pci_unmap_single(mdev->pdev,
  202. (dma_addr_t) be64_to_cpu(data->addr),
  203. be32_to_cpu(data->byte_count),
  204. PCI_DMA_TODEVICE);
  205. ++data;
  206. }
  207. for (i = 0; i < frags; i++) {
  208. /* Check for wraparound before unmapping */
  209. if ((void *) data >= end)
  210. data = (struct mlx4_wqe_data_seg *) ring->buf;
  211. frag = &skb_shinfo(skb)->frags[i];
  212. pci_unmap_page(mdev->pdev,
  213. (dma_addr_t) be64_to_cpu(data->addr),
  214. frag->size, PCI_DMA_TODEVICE);
  215. }
  216. }
  217. /* Stamp the freed descriptor */
  218. for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
  219. *ptr = stamp;
  220. ptr += STAMP_DWORDS;
  221. if ((void *) ptr >= end) {
  222. ptr = ring->buf;
  223. stamp ^= cpu_to_be32(0x80000000);
  224. }
  225. }
  226. }
  227. dev_kfree_skb_any(skb);
  228. return tx_info->nr_txbb;
  229. }
  230. int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
  231. {
  232. struct mlx4_en_priv *priv = netdev_priv(dev);
  233. int cnt = 0;
  234. /* Skip last polled descriptor */
  235. ring->cons += ring->last_nr_txbb;
  236. mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
  237. ring->cons, ring->prod);
  238. if ((u32) (ring->prod - ring->cons) > ring->size) {
  239. if (netif_msg_tx_err(priv))
  240. mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
  241. return 0;
  242. }
  243. while (ring->cons != ring->prod) {
  244. ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
  245. ring->cons & ring->size_mask,
  246. !!(ring->cons & ring->size));
  247. ring->cons += ring->last_nr_txbb;
  248. cnt++;
  249. }
  250. if (cnt)
  251. mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
  252. return cnt;
  253. }
  254. void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
  255. {
  256. int block = 8 / ring_num;
  257. int extra = 8 - (block * ring_num);
  258. int num = 0;
  259. u16 ring = 1;
  260. int prio;
  261. if (ring_num == 1) {
  262. for (prio = 0; prio < 8; prio++)
  263. prio_map[prio] = 0;
  264. return;
  265. }
  266. for (prio = 0; prio < 8; prio++) {
  267. if (extra && (num == block + 1)) {
  268. ring++;
  269. num = 0;
  270. extra--;
  271. } else if (!extra && (num == block)) {
  272. ring++;
  273. num = 0;
  274. }
  275. prio_map[prio] = ring;
  276. mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
  277. num++;
  278. }
  279. }
  280. static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
  281. {
  282. struct mlx4_en_priv *priv = netdev_priv(dev);
  283. struct mlx4_cq *mcq = &cq->mcq;
  284. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  285. struct mlx4_cqe *cqe = cq->buf;
  286. u16 index;
  287. u16 new_index;
  288. u32 txbbs_skipped = 0;
  289. u32 cq_last_sav;
  290. /* index always points to the first TXBB of the last polled descriptor */
  291. index = ring->cons & ring->size_mask;
  292. new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
  293. if (index == new_index)
  294. return;
  295. if (!priv->port_up)
  296. return;
  297. /*
  298. * We use a two-stage loop:
  299. * - the first samples the HW-updated CQE
  300. * - the second frees TXBBs until the last sample
  301. * This lets us amortize CQE cache misses, while still polling the CQ
  302. * until is quiescent.
  303. */
  304. cq_last_sav = mcq->cons_index;
  305. do {
  306. do {
  307. /* Skip over last polled CQE */
  308. index = (index + ring->last_nr_txbb) & ring->size_mask;
  309. txbbs_skipped += ring->last_nr_txbb;
  310. /* Poll next CQE */
  311. ring->last_nr_txbb = mlx4_en_free_tx_desc(
  312. priv, ring, index,
  313. !!((ring->cons + txbbs_skipped) &
  314. ring->size));
  315. ++mcq->cons_index;
  316. } while (index != new_index);
  317. new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
  318. } while (index != new_index);
  319. AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
  320. (u32) (mcq->cons_index - cq_last_sav));
  321. /*
  322. * To prevent CQ overflow we first update CQ consumer and only then
  323. * the ring consumer.
  324. */
  325. mlx4_cq_set_ci(mcq);
  326. wmb();
  327. ring->cons += txbbs_skipped;
  328. /* Wakeup Tx queue if this ring stopped it */
  329. if (unlikely(ring->blocked)) {
  330. if ((u32) (ring->prod - ring->cons) <=
  331. ring->size - HEADROOM - MAX_DESC_TXBBS) {
  332. /* TODO: support multiqueue netdevs. Currently, we block
  333. * when *any* ring is full. Note that:
  334. * - 2 Tx rings can unblock at the same time and call
  335. * netif_wake_queue(), which is OK since this
  336. * operation is idempotent.
  337. * - We might wake the queue just after another ring
  338. * stopped it. This is no big deal because the next
  339. * transmission on that ring would stop the queue.
  340. */
  341. ring->blocked = 0;
  342. netif_wake_queue(dev);
  343. priv->port_stats.wake_queue++;
  344. }
  345. }
  346. }
  347. void mlx4_en_tx_irq(struct mlx4_cq *mcq)
  348. {
  349. struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
  350. struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  351. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  352. if (!spin_trylock(&ring->comp_lock))
  353. return;
  354. mlx4_en_process_tx_cq(cq->dev, cq);
  355. mod_timer(&cq->timer, jiffies + 1);
  356. spin_unlock(&ring->comp_lock);
  357. }
  358. void mlx4_en_poll_tx_cq(unsigned long data)
  359. {
  360. struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
  361. struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  362. struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
  363. u32 inflight;
  364. INC_PERF_COUNTER(priv->pstats.tx_poll);
  365. if (!spin_trylock(&ring->comp_lock)) {
  366. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  367. return;
  368. }
  369. mlx4_en_process_tx_cq(cq->dev, cq);
  370. inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
  371. /* If there are still packets in flight and the timer has not already
  372. * been scheduled by the Tx routine then schedule it here to guarantee
  373. * completion processing of these packets */
  374. if (inflight && priv->port_up)
  375. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  376. spin_unlock(&ring->comp_lock);
  377. }
  378. static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
  379. struct mlx4_en_tx_ring *ring,
  380. u32 index,
  381. unsigned int desc_size)
  382. {
  383. u32 copy = (ring->size - index) * TXBB_SIZE;
  384. int i;
  385. for (i = desc_size - copy - 4; i >= 0; i -= 4) {
  386. if ((i & (TXBB_SIZE - 1)) == 0)
  387. wmb();
  388. *((u32 *) (ring->buf + i)) =
  389. *((u32 *) (ring->bounce_buf + copy + i));
  390. }
  391. for (i = copy - 4; i >= 4 ; i -= 4) {
  392. if ((i & (TXBB_SIZE - 1)) == 0)
  393. wmb();
  394. *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
  395. *((u32 *) (ring->bounce_buf + i));
  396. }
  397. /* Return real descriptor location */
  398. return ring->buf + index * TXBB_SIZE;
  399. }
  400. static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
  401. {
  402. struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
  403. struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
  404. /* If we don't have a pending timer, set one up to catch our recent
  405. post in case the interface becomes idle */
  406. if (!timer_pending(&cq->timer))
  407. mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
  408. /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
  409. if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
  410. if (spin_trylock(&ring->comp_lock)) {
  411. mlx4_en_process_tx_cq(priv->dev, cq);
  412. spin_unlock(&ring->comp_lock);
  413. }
  414. }
  415. static void *get_frag_ptr(struct sk_buff *skb)
  416. {
  417. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  418. struct page *page = frag->page;
  419. void *ptr;
  420. ptr = page_address(page);
  421. if (unlikely(!ptr))
  422. return NULL;
  423. return ptr + frag->page_offset;
  424. }
  425. static int is_inline(struct sk_buff *skb, void **pfrag)
  426. {
  427. void *ptr;
  428. if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
  429. if (skb_shinfo(skb)->nr_frags == 1) {
  430. ptr = get_frag_ptr(skb);
  431. if (unlikely(!ptr))
  432. return 0;
  433. if (pfrag)
  434. *pfrag = ptr;
  435. return 1;
  436. } else if (unlikely(skb_shinfo(skb)->nr_frags))
  437. return 0;
  438. else
  439. return 1;
  440. }
  441. return 0;
  442. }
  443. static int inline_size(struct sk_buff *skb)
  444. {
  445. if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
  446. <= MLX4_INLINE_ALIGN)
  447. return ALIGN(skb->len + CTRL_SIZE +
  448. sizeof(struct mlx4_wqe_inline_seg), 16);
  449. else
  450. return ALIGN(skb->len + CTRL_SIZE + 2 *
  451. sizeof(struct mlx4_wqe_inline_seg), 16);
  452. }
  453. static int get_real_size(struct sk_buff *skb, struct net_device *dev,
  454. int *lso_header_size)
  455. {
  456. struct mlx4_en_priv *priv = netdev_priv(dev);
  457. struct mlx4_en_dev *mdev = priv->mdev;
  458. int real_size;
  459. if (skb_is_gso(skb)) {
  460. *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
  461. real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
  462. ALIGN(*lso_header_size + 4, DS_SIZE);
  463. if (unlikely(*lso_header_size != skb_headlen(skb))) {
  464. /* We add a segment for the skb linear buffer only if
  465. * it contains data */
  466. if (*lso_header_size < skb_headlen(skb))
  467. real_size += DS_SIZE;
  468. else {
  469. if (netif_msg_tx_err(priv))
  470. mlx4_warn(mdev, "Non-linear headers\n");
  471. dev_kfree_skb_any(skb);
  472. return 0;
  473. }
  474. }
  475. if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
  476. if (netif_msg_tx_err(priv))
  477. mlx4_warn(mdev, "LSO header size too big\n");
  478. dev_kfree_skb_any(skb);
  479. return 0;
  480. }
  481. } else {
  482. *lso_header_size = 0;
  483. if (!is_inline(skb, NULL))
  484. real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
  485. else
  486. real_size = inline_size(skb);
  487. }
  488. return real_size;
  489. }
  490. static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
  491. int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
  492. {
  493. struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
  494. int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
  495. if (skb->len <= spc) {
  496. inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
  497. skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
  498. if (skb_shinfo(skb)->nr_frags)
  499. memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
  500. skb_shinfo(skb)->frags[0].size);
  501. } else {
  502. inl->byte_count = cpu_to_be32(1 << 31 | spc);
  503. if (skb_headlen(skb) <= spc) {
  504. skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
  505. if (skb_headlen(skb) < spc) {
  506. memcpy(((void *)(inl + 1)) + skb_headlen(skb),
  507. fragptr, spc - skb_headlen(skb));
  508. fragptr += spc - skb_headlen(skb);
  509. }
  510. inl = (void *) (inl + 1) + spc;
  511. memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
  512. } else {
  513. skb_copy_from_linear_data(skb, inl + 1, spc);
  514. inl = (void *) (inl + 1) + spc;
  515. skb_copy_from_linear_data_offset(skb, spc, inl + 1,
  516. skb_headlen(skb) - spc);
  517. if (skb_shinfo(skb)->nr_frags)
  518. memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
  519. fragptr, skb_shinfo(skb)->frags[0].size);
  520. }
  521. wmb();
  522. inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
  523. }
  524. tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
  525. tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
  526. tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
  527. }
  528. static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
  529. u16 *vlan_tag)
  530. {
  531. int tx_ind;
  532. /* Obtain VLAN information if present */
  533. if (priv->vlgrp && vlan_tx_tag_present(skb)) {
  534. *vlan_tag = vlan_tx_tag_get(skb);
  535. /* Set the Tx ring to use according to vlan priority */
  536. tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
  537. } else {
  538. *vlan_tag = 0;
  539. tx_ind = 0;
  540. }
  541. return tx_ind;
  542. }
  543. int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
  544. {
  545. struct mlx4_en_priv *priv = netdev_priv(dev);
  546. struct mlx4_en_dev *mdev = priv->mdev;
  547. struct mlx4_en_tx_ring *ring;
  548. struct mlx4_en_cq *cq;
  549. struct mlx4_en_tx_desc *tx_desc;
  550. struct mlx4_wqe_data_seg *data;
  551. struct skb_frag_struct *frag;
  552. struct mlx4_en_tx_info *tx_info;
  553. int tx_ind = 0;
  554. int nr_txbb;
  555. int desc_size;
  556. int real_size;
  557. dma_addr_t dma;
  558. u32 index;
  559. __be32 op_own;
  560. u16 vlan_tag;
  561. int i;
  562. int lso_header_size;
  563. void *fragptr;
  564. if (unlikely(!skb->len)) {
  565. dev_kfree_skb_any(skb);
  566. return NETDEV_TX_OK;
  567. }
  568. real_size = get_real_size(skb, dev, &lso_header_size);
  569. if (unlikely(!real_size))
  570. return NETDEV_TX_OK;
  571. /* Allign descriptor to TXBB size */
  572. desc_size = ALIGN(real_size, TXBB_SIZE);
  573. nr_txbb = desc_size / TXBB_SIZE;
  574. if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
  575. if (netif_msg_tx_err(priv))
  576. mlx4_warn(mdev, "Oversized header or SG list\n");
  577. dev_kfree_skb_any(skb);
  578. return NETDEV_TX_OK;
  579. }
  580. tx_ind = get_vlan_info(priv, skb, &vlan_tag);
  581. ring = &priv->tx_ring[tx_ind];
  582. /* Check available TXBBs And 2K spare for prefetch */
  583. if (unlikely(((int)(ring->prod - ring->cons)) >
  584. ring->size - HEADROOM - MAX_DESC_TXBBS)) {
  585. /* every full Tx ring stops queue.
  586. * TODO: implement multi-queue support (per-queue stop) */
  587. netif_stop_queue(dev);
  588. ring->blocked = 1;
  589. priv->port_stats.queue_stopped++;
  590. /* Use interrupts to find out when queue opened */
  591. cq = &priv->tx_cq[tx_ind];
  592. mlx4_en_arm_cq(priv, cq);
  593. return NETDEV_TX_BUSY;
  594. }
  595. /* Now that we know what Tx ring to use */
  596. if (unlikely(!priv->port_up)) {
  597. if (netif_msg_tx_err(priv))
  598. mlx4_warn(mdev, "xmit: port down!\n");
  599. dev_kfree_skb_any(skb);
  600. return NETDEV_TX_OK;
  601. }
  602. /* Track current inflight packets for performance analysis */
  603. AVG_PERF_COUNTER(priv->pstats.inflight_avg,
  604. (u32) (ring->prod - ring->cons - 1));
  605. /* Packet is good - grab an index and transmit it */
  606. index = ring->prod & ring->size_mask;
  607. /* See if we have enough space for whole descriptor TXBB for setting
  608. * SW ownership on next descriptor; if not, use a bounce buffer. */
  609. if (likely(index + nr_txbb <= ring->size))
  610. tx_desc = ring->buf + index * TXBB_SIZE;
  611. else
  612. tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
  613. /* Save skb in tx_info ring */
  614. tx_info = &ring->tx_info[index];
  615. tx_info->skb = skb;
  616. tx_info->nr_txbb = nr_txbb;
  617. /* Prepare ctrl segement apart opcode+ownership, which depends on
  618. * whether LSO is used */
  619. tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
  620. tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
  621. tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
  622. tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
  623. MLX4_WQE_CTRL_SOLICITED);
  624. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  625. tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
  626. MLX4_WQE_CTRL_TCP_UDP_CSUM);
  627. priv->port_stats.tx_chksum_offload++;
  628. }
  629. /* Handle LSO (TSO) packets */
  630. if (lso_header_size) {
  631. /* Mark opcode as LSO */
  632. op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
  633. ((ring->prod & ring->size) ?
  634. cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
  635. /* Fill in the LSO prefix */
  636. tx_desc->lso.mss_hdr_size = cpu_to_be32(
  637. skb_shinfo(skb)->gso_size << 16 | lso_header_size);
  638. /* Copy headers;
  639. * note that we already verified that it is linear */
  640. memcpy(tx_desc->lso.header, skb->data, lso_header_size);
  641. data = ((void *) &tx_desc->lso +
  642. ALIGN(lso_header_size + 4, DS_SIZE));
  643. priv->port_stats.tso_packets++;
  644. i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
  645. !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
  646. ring->bytes += skb->len + (i - 1) * lso_header_size;
  647. ring->packets += i;
  648. } else {
  649. /* Normal (Non LSO) packet */
  650. op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
  651. ((ring->prod & ring->size) ?
  652. cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
  653. data = &tx_desc->data;
  654. ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
  655. ring->packets++;
  656. }
  657. AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
  658. /* valid only for none inline segments */
  659. tx_info->data_offset = (void *) data - (void *) tx_desc;
  660. tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
  661. data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
  662. if (!is_inline(skb, &fragptr)) {
  663. /* Map fragments */
  664. for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
  665. frag = &skb_shinfo(skb)->frags[i];
  666. dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
  667. frag->size, PCI_DMA_TODEVICE);
  668. data->addr = cpu_to_be64(dma);
  669. data->lkey = cpu_to_be32(mdev->mr.key);
  670. wmb();
  671. data->byte_count = cpu_to_be32(frag->size);
  672. --data;
  673. }
  674. /* Map linear part */
  675. if (tx_info->linear) {
  676. dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
  677. skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
  678. data->addr = cpu_to_be64(dma);
  679. data->lkey = cpu_to_be32(mdev->mr.key);
  680. wmb();
  681. data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
  682. }
  683. tx_info->inl = 0;
  684. } else {
  685. build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
  686. tx_info->inl = 1;
  687. }
  688. ring->prod += nr_txbb;
  689. /* If we used a bounce buffer then copy descriptor back into place */
  690. if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
  691. tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
  692. /* Run destructor before passing skb to HW */
  693. if (likely(!skb_shared(skb)))
  694. skb_orphan(skb);
  695. /* Ensure new descirptor hits memory
  696. * before setting ownership of this descriptor to HW */
  697. wmb();
  698. tx_desc->ctrl.owner_opcode = op_own;
  699. /* Ring doorbell! */
  700. wmb();
  701. writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
  702. dev->trans_start = jiffies;
  703. /* Poll CQ here */
  704. mlx4_en_xmit_poll(priv, tx_ind);
  705. return 0;
  706. }