ipoib_ib.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/ip.h>
  38. #include <linux/tcp.h>
  39. #include "ipoib.h"
  40. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
  41. static int data_debug_level;
  42. module_param(data_debug_level, int, 0644);
  43. MODULE_PARM_DESC(data_debug_level,
  44. "Enable data path debug tracing if > 0");
  45. #endif
  46. static DEFINE_MUTEX(pkey_mutex);
  47. struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
  48. struct ib_pd *pd, struct ib_ah_attr *attr)
  49. {
  50. struct ipoib_ah *ah;
  51. ah = kmalloc(sizeof *ah, GFP_KERNEL);
  52. if (!ah)
  53. return NULL;
  54. ah->dev = dev;
  55. ah->last_send = 0;
  56. kref_init(&ah->ref);
  57. ah->ah = ib_create_ah(pd, attr);
  58. if (IS_ERR(ah->ah)) {
  59. kfree(ah);
  60. ah = NULL;
  61. } else
  62. ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
  63. return ah;
  64. }
  65. void ipoib_free_ah(struct kref *kref)
  66. {
  67. struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
  68. struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
  69. unsigned long flags;
  70. spin_lock_irqsave(&priv->lock, flags);
  71. list_add_tail(&ah->list, &priv->dead_ahs);
  72. spin_unlock_irqrestore(&priv->lock, flags);
  73. }
  74. static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
  75. u64 mapping[IPOIB_UD_RX_SG])
  76. {
  77. if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
  78. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
  79. DMA_FROM_DEVICE);
  80. ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
  81. DMA_FROM_DEVICE);
  82. } else
  83. ib_dma_unmap_single(priv->ca, mapping[0],
  84. IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
  85. DMA_FROM_DEVICE);
  86. }
  87. static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
  88. struct sk_buff *skb,
  89. unsigned int length)
  90. {
  91. if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
  92. skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
  93. unsigned int size;
  94. /*
  95. * There is only two buffers needed for max_payload = 4K,
  96. * first buf size is IPOIB_UD_HEAD_SIZE
  97. */
  98. skb->tail += IPOIB_UD_HEAD_SIZE;
  99. skb->len += length;
  100. size = length - IPOIB_UD_HEAD_SIZE;
  101. frag->size = size;
  102. skb->data_len += size;
  103. skb->truesize += size;
  104. } else
  105. skb_put(skb, length);
  106. }
  107. static int ipoib_ib_post_receive(struct net_device *dev, int id)
  108. {
  109. struct ipoib_dev_priv *priv = netdev_priv(dev);
  110. struct ib_recv_wr *bad_wr;
  111. int ret;
  112. priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
  113. priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
  114. priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
  115. ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
  116. if (unlikely(ret)) {
  117. ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
  118. ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
  119. dev_kfree_skb_any(priv->rx_ring[id].skb);
  120. priv->rx_ring[id].skb = NULL;
  121. }
  122. return ret;
  123. }
  124. static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
  125. {
  126. struct ipoib_dev_priv *priv = netdev_priv(dev);
  127. struct sk_buff *skb;
  128. int buf_size;
  129. u64 *mapping;
  130. if (ipoib_ud_need_sg(priv->max_ib_mtu))
  131. buf_size = IPOIB_UD_HEAD_SIZE;
  132. else
  133. buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
  134. skb = dev_alloc_skb(buf_size + 4);
  135. if (unlikely(!skb))
  136. return NULL;
  137. /*
  138. * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
  139. * header. So we need 4 more bytes to get to 48 and align the
  140. * IP header to a multiple of 16.
  141. */
  142. skb_reserve(skb, 4);
  143. mapping = priv->rx_ring[id].mapping;
  144. mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
  145. DMA_FROM_DEVICE);
  146. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
  147. goto error;
  148. if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
  149. struct page *page = alloc_page(GFP_ATOMIC);
  150. if (!page)
  151. goto partial_error;
  152. skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
  153. mapping[1] =
  154. ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
  155. 0, PAGE_SIZE, DMA_FROM_DEVICE);
  156. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
  157. goto partial_error;
  158. }
  159. priv->rx_ring[id].skb = skb;
  160. return skb;
  161. partial_error:
  162. ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
  163. error:
  164. dev_kfree_skb_any(skb);
  165. return NULL;
  166. }
  167. static int ipoib_ib_post_receives(struct net_device *dev)
  168. {
  169. struct ipoib_dev_priv *priv = netdev_priv(dev);
  170. int i;
  171. for (i = 0; i < ipoib_recvq_size; ++i) {
  172. if (!ipoib_alloc_rx_skb(dev, i)) {
  173. ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
  174. return -ENOMEM;
  175. }
  176. if (ipoib_ib_post_receive(dev, i)) {
  177. ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
  178. return -EIO;
  179. }
  180. }
  181. return 0;
  182. }
  183. static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
  184. {
  185. struct ipoib_dev_priv *priv = netdev_priv(dev);
  186. unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
  187. struct sk_buff *skb;
  188. u64 mapping[IPOIB_UD_RX_SG];
  189. ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
  190. wr_id, wc->status);
  191. if (unlikely(wr_id >= ipoib_recvq_size)) {
  192. ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
  193. wr_id, ipoib_recvq_size);
  194. return;
  195. }
  196. skb = priv->rx_ring[wr_id].skb;
  197. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  198. if (wc->status != IB_WC_WR_FLUSH_ERR)
  199. ipoib_warn(priv, "failed recv event "
  200. "(status=%d, wrid=%d vend_err %x)\n",
  201. wc->status, wr_id, wc->vendor_err);
  202. ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
  203. dev_kfree_skb_any(skb);
  204. priv->rx_ring[wr_id].skb = NULL;
  205. return;
  206. }
  207. /*
  208. * Drop packets that this interface sent, ie multicast packets
  209. * that the HCA has replicated.
  210. */
  211. if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
  212. goto repost;
  213. memcpy(mapping, priv->rx_ring[wr_id].mapping,
  214. IPOIB_UD_RX_SG * sizeof *mapping);
  215. /*
  216. * If we can't allocate a new RX buffer, dump
  217. * this packet and reuse the old buffer.
  218. */
  219. if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
  220. ++dev->stats.rx_dropped;
  221. goto repost;
  222. }
  223. ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
  224. wc->byte_len, wc->slid);
  225. ipoib_ud_dma_unmap_rx(priv, mapping);
  226. ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
  227. skb_pull(skb, IB_GRH_BYTES);
  228. skb->protocol = ((struct ipoib_header *) skb->data)->proto;
  229. skb_reset_mac_header(skb);
  230. skb_pull(skb, IPOIB_ENCAP_LEN);
  231. ++dev->stats.rx_packets;
  232. dev->stats.rx_bytes += skb->len;
  233. skb->dev = dev;
  234. /* XXX get correct PACKET_ type here */
  235. skb->pkt_type = PACKET_HOST;
  236. if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
  237. skb->ip_summed = CHECKSUM_UNNECESSARY;
  238. if (dev->features & NETIF_F_LRO)
  239. lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
  240. else
  241. netif_receive_skb(skb);
  242. repost:
  243. if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
  244. ipoib_warn(priv, "ipoib_ib_post_receive failed "
  245. "for buf %d\n", wr_id);
  246. }
  247. static int ipoib_dma_map_tx(struct ib_device *ca,
  248. struct ipoib_tx_buf *tx_req)
  249. {
  250. struct sk_buff *skb = tx_req->skb;
  251. u64 *mapping = tx_req->mapping;
  252. int i;
  253. int off;
  254. if (skb_headlen(skb)) {
  255. mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
  256. DMA_TO_DEVICE);
  257. if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
  258. return -EIO;
  259. off = 1;
  260. } else
  261. off = 0;
  262. for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
  263. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  264. mapping[i + off] = ib_dma_map_page(ca, frag->page,
  265. frag->page_offset, frag->size,
  266. DMA_TO_DEVICE);
  267. if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
  268. goto partial_error;
  269. }
  270. return 0;
  271. partial_error:
  272. for (; i > 0; --i) {
  273. skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
  274. ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
  275. }
  276. if (off)
  277. ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
  278. return -EIO;
  279. }
  280. static void ipoib_dma_unmap_tx(struct ib_device *ca,
  281. struct ipoib_tx_buf *tx_req)
  282. {
  283. struct sk_buff *skb = tx_req->skb;
  284. u64 *mapping = tx_req->mapping;
  285. int i;
  286. int off;
  287. if (skb_headlen(skb)) {
  288. ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
  289. off = 1;
  290. } else
  291. off = 0;
  292. for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
  293. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  294. ib_dma_unmap_page(ca, mapping[i + off], frag->size,
  295. DMA_TO_DEVICE);
  296. }
  297. }
  298. static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
  299. {
  300. struct ipoib_dev_priv *priv = netdev_priv(dev);
  301. unsigned int wr_id = wc->wr_id;
  302. struct ipoib_tx_buf *tx_req;
  303. ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
  304. wr_id, wc->status);
  305. if (unlikely(wr_id >= ipoib_sendq_size)) {
  306. ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
  307. wr_id, ipoib_sendq_size);
  308. return;
  309. }
  310. tx_req = &priv->tx_ring[wr_id];
  311. ipoib_dma_unmap_tx(priv->ca, tx_req);
  312. ++dev->stats.tx_packets;
  313. dev->stats.tx_bytes += tx_req->skb->len;
  314. dev_kfree_skb_any(tx_req->skb);
  315. ++priv->tx_tail;
  316. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  317. netif_queue_stopped(dev) &&
  318. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  319. netif_wake_queue(dev);
  320. if (wc->status != IB_WC_SUCCESS &&
  321. wc->status != IB_WC_WR_FLUSH_ERR)
  322. ipoib_warn(priv, "failed send event "
  323. "(status=%d, wrid=%d vend_err %x)\n",
  324. wc->status, wr_id, wc->vendor_err);
  325. }
  326. static int poll_tx(struct ipoib_dev_priv *priv)
  327. {
  328. int n, i;
  329. n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
  330. for (i = 0; i < n; ++i)
  331. ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
  332. return n == MAX_SEND_CQE;
  333. }
  334. int ipoib_poll(struct napi_struct *napi, int budget)
  335. {
  336. struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
  337. struct net_device *dev = priv->dev;
  338. int done;
  339. int t;
  340. int n, i;
  341. done = 0;
  342. poll_more:
  343. while (done < budget) {
  344. int max = (budget - done);
  345. t = min(IPOIB_NUM_WC, max);
  346. n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
  347. for (i = 0; i < n; i++) {
  348. struct ib_wc *wc = priv->ibwc + i;
  349. if (wc->wr_id & IPOIB_OP_RECV) {
  350. ++done;
  351. if (wc->wr_id & IPOIB_OP_CM)
  352. ipoib_cm_handle_rx_wc(dev, wc);
  353. else
  354. ipoib_ib_handle_rx_wc(dev, wc);
  355. } else
  356. ipoib_cm_handle_tx_wc(priv->dev, wc);
  357. }
  358. if (n != t)
  359. break;
  360. }
  361. if (done < budget) {
  362. if (dev->features & NETIF_F_LRO)
  363. lro_flush_all(&priv->lro.lro_mgr);
  364. napi_complete(napi);
  365. if (unlikely(ib_req_notify_cq(priv->recv_cq,
  366. IB_CQ_NEXT_COMP |
  367. IB_CQ_REPORT_MISSED_EVENTS)) &&
  368. napi_reschedule(napi))
  369. goto poll_more;
  370. }
  371. return done;
  372. }
  373. void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
  374. {
  375. struct net_device *dev = dev_ptr;
  376. struct ipoib_dev_priv *priv = netdev_priv(dev);
  377. napi_schedule(&priv->napi);
  378. }
  379. static void drain_tx_cq(struct net_device *dev)
  380. {
  381. struct ipoib_dev_priv *priv = netdev_priv(dev);
  382. netif_tx_lock(dev);
  383. while (poll_tx(priv))
  384. ; /* nothing */
  385. if (netif_queue_stopped(dev))
  386. mod_timer(&priv->poll_timer, jiffies + 1);
  387. netif_tx_unlock(dev);
  388. }
  389. void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
  390. {
  391. struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
  392. mod_timer(&priv->poll_timer, jiffies);
  393. }
  394. static inline int post_send(struct ipoib_dev_priv *priv,
  395. unsigned int wr_id,
  396. struct ib_ah *address, u32 qpn,
  397. struct ipoib_tx_buf *tx_req,
  398. void *head, int hlen)
  399. {
  400. struct ib_send_wr *bad_wr;
  401. int i, off;
  402. struct sk_buff *skb = tx_req->skb;
  403. skb_frag_t *frags = skb_shinfo(skb)->frags;
  404. int nr_frags = skb_shinfo(skb)->nr_frags;
  405. u64 *mapping = tx_req->mapping;
  406. if (skb_headlen(skb)) {
  407. priv->tx_sge[0].addr = mapping[0];
  408. priv->tx_sge[0].length = skb_headlen(skb);
  409. off = 1;
  410. } else
  411. off = 0;
  412. for (i = 0; i < nr_frags; ++i) {
  413. priv->tx_sge[i + off].addr = mapping[i + off];
  414. priv->tx_sge[i + off].length = frags[i].size;
  415. }
  416. priv->tx_wr.num_sge = nr_frags + off;
  417. priv->tx_wr.wr_id = wr_id;
  418. priv->tx_wr.wr.ud.remote_qpn = qpn;
  419. priv->tx_wr.wr.ud.ah = address;
  420. if (head) {
  421. priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
  422. priv->tx_wr.wr.ud.header = head;
  423. priv->tx_wr.wr.ud.hlen = hlen;
  424. priv->tx_wr.opcode = IB_WR_LSO;
  425. } else
  426. priv->tx_wr.opcode = IB_WR_SEND;
  427. return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
  428. }
  429. void ipoib_send(struct net_device *dev, struct sk_buff *skb,
  430. struct ipoib_ah *address, u32 qpn)
  431. {
  432. struct ipoib_dev_priv *priv = netdev_priv(dev);
  433. struct ipoib_tx_buf *tx_req;
  434. int hlen;
  435. void *phead;
  436. if (skb_is_gso(skb)) {
  437. hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
  438. phead = skb->data;
  439. if (unlikely(!skb_pull(skb, hlen))) {
  440. ipoib_warn(priv, "linear data too small\n");
  441. ++dev->stats.tx_dropped;
  442. ++dev->stats.tx_errors;
  443. dev_kfree_skb_any(skb);
  444. return;
  445. }
  446. } else {
  447. if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
  448. ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  449. skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
  450. ++dev->stats.tx_dropped;
  451. ++dev->stats.tx_errors;
  452. ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
  453. return;
  454. }
  455. phead = NULL;
  456. hlen = 0;
  457. }
  458. ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
  459. skb->len, address, qpn);
  460. /*
  461. * We put the skb into the tx_ring _before_ we call post_send()
  462. * because it's entirely possible that the completion handler will
  463. * run before we execute anything after the post_send(). That
  464. * means we have to make sure everything is properly recorded and
  465. * our state is consistent before we call post_send().
  466. */
  467. tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
  468. tx_req->skb = skb;
  469. if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
  470. ++dev->stats.tx_errors;
  471. dev_kfree_skb_any(skb);
  472. return;
  473. }
  474. if (skb->ip_summed == CHECKSUM_PARTIAL)
  475. priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
  476. else
  477. priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
  478. if (++priv->tx_outstanding == ipoib_sendq_size) {
  479. ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
  480. if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
  481. ipoib_warn(priv, "request notify on send CQ failed\n");
  482. netif_stop_queue(dev);
  483. }
  484. if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
  485. address->ah, qpn, tx_req, phead, hlen))) {
  486. ipoib_warn(priv, "post_send failed\n");
  487. ++dev->stats.tx_errors;
  488. --priv->tx_outstanding;
  489. ipoib_dma_unmap_tx(priv->ca, tx_req);
  490. dev_kfree_skb_any(skb);
  491. if (netif_queue_stopped(dev))
  492. netif_wake_queue(dev);
  493. } else {
  494. dev->trans_start = jiffies;
  495. address->last_send = priv->tx_head;
  496. ++priv->tx_head;
  497. skb_orphan(skb);
  498. }
  499. if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
  500. while (poll_tx(priv))
  501. ; /* nothing */
  502. }
  503. static void __ipoib_reap_ah(struct net_device *dev)
  504. {
  505. struct ipoib_dev_priv *priv = netdev_priv(dev);
  506. struct ipoib_ah *ah, *tah;
  507. LIST_HEAD(remove_list);
  508. unsigned long flags;
  509. netif_tx_lock_bh(dev);
  510. spin_lock_irqsave(&priv->lock, flags);
  511. list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
  512. if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
  513. list_del(&ah->list);
  514. ib_destroy_ah(ah->ah);
  515. kfree(ah);
  516. }
  517. spin_unlock_irqrestore(&priv->lock, flags);
  518. netif_tx_unlock_bh(dev);
  519. }
  520. void ipoib_reap_ah(struct work_struct *work)
  521. {
  522. struct ipoib_dev_priv *priv =
  523. container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
  524. struct net_device *dev = priv->dev;
  525. __ipoib_reap_ah(dev);
  526. if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
  527. queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
  528. round_jiffies_relative(HZ));
  529. }
  530. static void ipoib_ib_tx_timer_func(unsigned long ctx)
  531. {
  532. drain_tx_cq((struct net_device *)ctx);
  533. }
  534. int ipoib_ib_dev_open(struct net_device *dev)
  535. {
  536. struct ipoib_dev_priv *priv = netdev_priv(dev);
  537. int ret;
  538. if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
  539. ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
  540. clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  541. return -1;
  542. }
  543. set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  544. ret = ipoib_init_qp(dev);
  545. if (ret) {
  546. ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
  547. return -1;
  548. }
  549. ret = ipoib_ib_post_receives(dev);
  550. if (ret) {
  551. ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
  552. ipoib_ib_dev_stop(dev, 1);
  553. return -1;
  554. }
  555. ret = ipoib_cm_dev_open(dev);
  556. if (ret) {
  557. ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
  558. ipoib_ib_dev_stop(dev, 1);
  559. return -1;
  560. }
  561. clear_bit(IPOIB_STOP_REAPER, &priv->flags);
  562. queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
  563. round_jiffies_relative(HZ));
  564. if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
  565. napi_enable(&priv->napi);
  566. return 0;
  567. }
  568. static void ipoib_pkey_dev_check_presence(struct net_device *dev)
  569. {
  570. struct ipoib_dev_priv *priv = netdev_priv(dev);
  571. u16 pkey_index = 0;
  572. if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
  573. clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  574. else
  575. set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  576. }
  577. int ipoib_ib_dev_up(struct net_device *dev)
  578. {
  579. struct ipoib_dev_priv *priv = netdev_priv(dev);
  580. ipoib_pkey_dev_check_presence(dev);
  581. if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
  582. ipoib_dbg(priv, "PKEY is not assigned.\n");
  583. return 0;
  584. }
  585. set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  586. return ipoib_mcast_start_thread(dev);
  587. }
  588. int ipoib_ib_dev_down(struct net_device *dev, int flush)
  589. {
  590. struct ipoib_dev_priv *priv = netdev_priv(dev);
  591. ipoib_dbg(priv, "downing ib_dev\n");
  592. clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  593. netif_carrier_off(dev);
  594. /* Shutdown the P_Key thread if still active */
  595. if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
  596. mutex_lock(&pkey_mutex);
  597. set_bit(IPOIB_PKEY_STOP, &priv->flags);
  598. cancel_delayed_work(&priv->pkey_poll_task);
  599. mutex_unlock(&pkey_mutex);
  600. if (flush)
  601. flush_workqueue(ipoib_workqueue);
  602. }
  603. ipoib_mcast_stop_thread(dev, flush);
  604. ipoib_mcast_dev_flush(dev);
  605. ipoib_flush_paths(dev);
  606. return 0;
  607. }
  608. static int recvs_pending(struct net_device *dev)
  609. {
  610. struct ipoib_dev_priv *priv = netdev_priv(dev);
  611. int pending = 0;
  612. int i;
  613. for (i = 0; i < ipoib_recvq_size; ++i)
  614. if (priv->rx_ring[i].skb)
  615. ++pending;
  616. return pending;
  617. }
  618. void ipoib_drain_cq(struct net_device *dev)
  619. {
  620. struct ipoib_dev_priv *priv = netdev_priv(dev);
  621. int i, n;
  622. /*
  623. * We call completion handling routines that expect to be
  624. * called from the BH-disabled NAPI poll context, so disable
  625. * BHs here too.
  626. */
  627. local_bh_disable();
  628. do {
  629. n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
  630. for (i = 0; i < n; ++i) {
  631. /*
  632. * Convert any successful completions to flush
  633. * errors to avoid passing packets up the
  634. * stack after bringing the device down.
  635. */
  636. if (priv->ibwc[i].status == IB_WC_SUCCESS)
  637. priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
  638. if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
  639. if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
  640. ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
  641. else
  642. ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
  643. } else
  644. ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
  645. }
  646. } while (n == IPOIB_NUM_WC);
  647. while (poll_tx(priv))
  648. ; /* nothing */
  649. local_bh_enable();
  650. }
  651. int ipoib_ib_dev_stop(struct net_device *dev, int flush)
  652. {
  653. struct ipoib_dev_priv *priv = netdev_priv(dev);
  654. struct ib_qp_attr qp_attr;
  655. unsigned long begin;
  656. struct ipoib_tx_buf *tx_req;
  657. int i;
  658. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
  659. napi_disable(&priv->napi);
  660. ipoib_cm_dev_stop(dev);
  661. /*
  662. * Move our QP to the error state and then reinitialize in
  663. * when all work requests have completed or have been flushed.
  664. */
  665. qp_attr.qp_state = IB_QPS_ERR;
  666. if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
  667. ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
  668. /* Wait for all sends and receives to complete */
  669. begin = jiffies;
  670. while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
  671. if (time_after(jiffies, begin + 5 * HZ)) {
  672. ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
  673. priv->tx_head - priv->tx_tail, recvs_pending(dev));
  674. /*
  675. * assume the HW is wedged and just free up
  676. * all our pending work requests.
  677. */
  678. while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
  679. tx_req = &priv->tx_ring[priv->tx_tail &
  680. (ipoib_sendq_size - 1)];
  681. ipoib_dma_unmap_tx(priv->ca, tx_req);
  682. dev_kfree_skb_any(tx_req->skb);
  683. ++priv->tx_tail;
  684. --priv->tx_outstanding;
  685. }
  686. for (i = 0; i < ipoib_recvq_size; ++i) {
  687. struct ipoib_rx_buf *rx_req;
  688. rx_req = &priv->rx_ring[i];
  689. if (!rx_req->skb)
  690. continue;
  691. ipoib_ud_dma_unmap_rx(priv,
  692. priv->rx_ring[i].mapping);
  693. dev_kfree_skb_any(rx_req->skb);
  694. rx_req->skb = NULL;
  695. }
  696. goto timeout;
  697. }
  698. ipoib_drain_cq(dev);
  699. msleep(1);
  700. }
  701. ipoib_dbg(priv, "All sends and receives done.\n");
  702. timeout:
  703. del_timer_sync(&priv->poll_timer);
  704. qp_attr.qp_state = IB_QPS_RESET;
  705. if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
  706. ipoib_warn(priv, "Failed to modify QP to RESET state\n");
  707. /* Wait for all AHs to be reaped */
  708. set_bit(IPOIB_STOP_REAPER, &priv->flags);
  709. cancel_delayed_work(&priv->ah_reap_task);
  710. if (flush)
  711. flush_workqueue(ipoib_workqueue);
  712. begin = jiffies;
  713. while (!list_empty(&priv->dead_ahs)) {
  714. __ipoib_reap_ah(dev);
  715. if (time_after(jiffies, begin + HZ)) {
  716. ipoib_warn(priv, "timing out; will leak address handles\n");
  717. break;
  718. }
  719. msleep(1);
  720. }
  721. ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  722. return 0;
  723. }
  724. int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
  725. {
  726. struct ipoib_dev_priv *priv = netdev_priv(dev);
  727. priv->ca = ca;
  728. priv->port = port;
  729. priv->qp = NULL;
  730. if (ipoib_transport_dev_init(dev, ca)) {
  731. printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
  732. return -ENODEV;
  733. }
  734. setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
  735. (unsigned long) dev);
  736. if (dev->flags & IFF_UP) {
  737. if (ipoib_ib_dev_open(dev)) {
  738. ipoib_transport_dev_cleanup(dev);
  739. return -ENODEV;
  740. }
  741. }
  742. return 0;
  743. }
  744. static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
  745. enum ipoib_flush_level level)
  746. {
  747. struct ipoib_dev_priv *cpriv;
  748. struct net_device *dev = priv->dev;
  749. u16 new_index;
  750. mutex_lock(&priv->vlan_mutex);
  751. /*
  752. * Flush any child interfaces too -- they might be up even if
  753. * the parent is down.
  754. */
  755. list_for_each_entry(cpriv, &priv->child_intfs, list)
  756. __ipoib_ib_dev_flush(cpriv, level);
  757. mutex_unlock(&priv->vlan_mutex);
  758. if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
  759. ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
  760. return;
  761. }
  762. if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
  763. ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
  764. return;
  765. }
  766. if (level == IPOIB_FLUSH_HEAVY) {
  767. if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
  768. clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  769. ipoib_ib_dev_down(dev, 0);
  770. ipoib_ib_dev_stop(dev, 0);
  771. if (ipoib_pkey_dev_delay_open(dev))
  772. return;
  773. }
  774. /* restart QP only if P_Key index is changed */
  775. if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
  776. new_index == priv->pkey_index) {
  777. ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
  778. return;
  779. }
  780. priv->pkey_index = new_index;
  781. }
  782. if (level == IPOIB_FLUSH_LIGHT) {
  783. ipoib_mark_paths_invalid(dev);
  784. ipoib_mcast_dev_flush(dev);
  785. }
  786. if (level >= IPOIB_FLUSH_NORMAL)
  787. ipoib_ib_dev_down(dev, 0);
  788. if (level == IPOIB_FLUSH_HEAVY) {
  789. ipoib_ib_dev_stop(dev, 0);
  790. ipoib_ib_dev_open(dev);
  791. }
  792. /*
  793. * The device could have been brought down between the start and when
  794. * we get here, don't bring it back up if it's not configured up
  795. */
  796. if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
  797. if (level >= IPOIB_FLUSH_NORMAL)
  798. ipoib_ib_dev_up(dev);
  799. ipoib_mcast_restart_task(&priv->restart_task);
  800. }
  801. }
  802. void ipoib_ib_dev_flush_light(struct work_struct *work)
  803. {
  804. struct ipoib_dev_priv *priv =
  805. container_of(work, struct ipoib_dev_priv, flush_light);
  806. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
  807. }
  808. void ipoib_ib_dev_flush_normal(struct work_struct *work)
  809. {
  810. struct ipoib_dev_priv *priv =
  811. container_of(work, struct ipoib_dev_priv, flush_normal);
  812. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
  813. }
  814. void ipoib_ib_dev_flush_heavy(struct work_struct *work)
  815. {
  816. struct ipoib_dev_priv *priv =
  817. container_of(work, struct ipoib_dev_priv, flush_heavy);
  818. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
  819. }
  820. void ipoib_ib_dev_cleanup(struct net_device *dev)
  821. {
  822. struct ipoib_dev_priv *priv = netdev_priv(dev);
  823. ipoib_dbg(priv, "cleaning up ib_dev\n");
  824. ipoib_mcast_stop_thread(dev, 1);
  825. ipoib_mcast_dev_flush(dev);
  826. ipoib_transport_dev_cleanup(dev);
  827. }
  828. /*
  829. * Delayed P_Key Assigment Interim Support
  830. *
  831. * The following is initial implementation of delayed P_Key assigment
  832. * mechanism. It is using the same approach implemented for the multicast
  833. * group join. The single goal of this implementation is to quickly address
  834. * Bug #2507. This implementation will probably be removed when the P_Key
  835. * change async notification is available.
  836. */
  837. void ipoib_pkey_poll(struct work_struct *work)
  838. {
  839. struct ipoib_dev_priv *priv =
  840. container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
  841. struct net_device *dev = priv->dev;
  842. ipoib_pkey_dev_check_presence(dev);
  843. if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
  844. ipoib_open(dev);
  845. else {
  846. mutex_lock(&pkey_mutex);
  847. if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
  848. queue_delayed_work(ipoib_workqueue,
  849. &priv->pkey_poll_task,
  850. HZ);
  851. mutex_unlock(&pkey_mutex);
  852. }
  853. }
  854. int ipoib_pkey_dev_delay_open(struct net_device *dev)
  855. {
  856. struct ipoib_dev_priv *priv = netdev_priv(dev);
  857. /* Look for the interface pkey value in the IB Port P_Key table and */
  858. /* set the interface pkey assigment flag */
  859. ipoib_pkey_dev_check_presence(dev);
  860. /* P_Key value not assigned yet - start polling */
  861. if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
  862. mutex_lock(&pkey_mutex);
  863. clear_bit(IPOIB_PKEY_STOP, &priv->flags);
  864. queue_delayed_work(ipoib_workqueue,
  865. &priv->pkey_poll_task,
  866. HZ);
  867. mutex_unlock(&pkey_mutex);
  868. return 1;
  869. }
  870. return 0;
  871. }