ipoib_cm.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603
  1. /*
  2. * Copyright (c) 2006 Mellanox Technologies. All rights reserved
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/ib_cm.h>
  33. #include <net/dst.h>
  34. #include <net/icmp.h>
  35. #include <linux/icmpv6.h>
  36. #include <linux/delay.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/moduleparam.h>
  40. #include "ipoib.h"
  41. int ipoib_max_conn_qp = 128;
  42. module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
  43. MODULE_PARM_DESC(max_nonsrq_conn_qp,
  44. "Max number of connected-mode QPs per interface "
  45. "(applied only if shared receive queue is not available)");
  46. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
  47. static int data_debug_level;
  48. module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
  49. MODULE_PARM_DESC(cm_data_debug_level,
  50. "Enable data path debug tracing for connected mode if > 0");
  51. #endif
  52. #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
  53. #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
  54. #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
  55. #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
  56. #define IPOIB_CM_RX_UPDATE_MASK (0x3)
  57. static struct ib_qp_attr ipoib_cm_err_attr = {
  58. .qp_state = IB_QPS_ERR
  59. };
  60. #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
  61. static struct ib_send_wr ipoib_cm_rx_drain_wr = {
  62. .wr_id = IPOIB_CM_RX_DRAIN_WRID,
  63. .opcode = IB_WR_SEND,
  64. };
  65. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  66. struct ib_cm_event *event);
  67. static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
  68. u64 mapping[IPOIB_CM_RX_SG])
  69. {
  70. int i;
  71. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  72. for (i = 0; i < frags; ++i)
  73. ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
  74. }
  75. static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
  76. {
  77. struct ipoib_dev_priv *priv = netdev_priv(dev);
  78. struct ib_recv_wr *bad_wr;
  79. int i, ret;
  80. priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  81. for (i = 0; i < priv->cm.num_frags; ++i)
  82. priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
  83. ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
  84. if (unlikely(ret)) {
  85. ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
  86. ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
  87. priv->cm.srq_ring[id].mapping);
  88. dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
  89. priv->cm.srq_ring[id].skb = NULL;
  90. }
  91. return ret;
  92. }
  93. static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
  94. struct ipoib_cm_rx *rx,
  95. struct ib_recv_wr *wr,
  96. struct ib_sge *sge, int id)
  97. {
  98. struct ipoib_dev_priv *priv = netdev_priv(dev);
  99. struct ib_recv_wr *bad_wr;
  100. int i, ret;
  101. wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  102. for (i = 0; i < IPOIB_CM_RX_SG; ++i)
  103. sge[i].addr = rx->rx_ring[id].mapping[i];
  104. ret = ib_post_recv(rx->qp, wr, &bad_wr);
  105. if (unlikely(ret)) {
  106. ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
  107. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  108. rx->rx_ring[id].mapping);
  109. dev_kfree_skb_any(rx->rx_ring[id].skb);
  110. rx->rx_ring[id].skb = NULL;
  111. }
  112. return ret;
  113. }
  114. static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
  115. struct ipoib_cm_rx_buf *rx_ring,
  116. int id, int frags,
  117. u64 mapping[IPOIB_CM_RX_SG],
  118. gfp_t gfp)
  119. {
  120. struct ipoib_dev_priv *priv = netdev_priv(dev);
  121. struct sk_buff *skb;
  122. int i;
  123. skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
  124. if (unlikely(!skb))
  125. return NULL;
  126. /*
  127. * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
  128. * IP header to a multiple of 16.
  129. */
  130. skb_reserve(skb, 12);
  131. mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
  132. DMA_FROM_DEVICE);
  133. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
  134. dev_kfree_skb_any(skb);
  135. return NULL;
  136. }
  137. for (i = 0; i < frags; i++) {
  138. struct page *page = alloc_page(gfp);
  139. if (!page)
  140. goto partial_error;
  141. skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
  142. mapping[i + 1] = ib_dma_map_page(priv->ca, page,
  143. 0, PAGE_SIZE, DMA_FROM_DEVICE);
  144. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
  145. goto partial_error;
  146. }
  147. rx_ring[id].skb = skb;
  148. return skb;
  149. partial_error:
  150. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  151. for (; i > 0; --i)
  152. ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
  153. dev_kfree_skb_any(skb);
  154. return NULL;
  155. }
  156. static void ipoib_cm_free_rx_ring(struct net_device *dev,
  157. struct ipoib_cm_rx_buf *rx_ring)
  158. {
  159. struct ipoib_dev_priv *priv = netdev_priv(dev);
  160. int i;
  161. for (i = 0; i < ipoib_recvq_size; ++i)
  162. if (rx_ring[i].skb) {
  163. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  164. rx_ring[i].mapping);
  165. dev_kfree_skb_any(rx_ring[i].skb);
  166. }
  167. vfree(rx_ring);
  168. }
  169. static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
  170. {
  171. struct ib_send_wr *bad_wr;
  172. struct ipoib_cm_rx *p;
  173. /* We only reserved 1 extra slot in CQ for drain WRs, so
  174. * make sure we have at most 1 outstanding WR. */
  175. if (list_empty(&priv->cm.rx_flush_list) ||
  176. !list_empty(&priv->cm.rx_drain_list))
  177. return;
  178. /*
  179. * QPs on flush list are error state. This way, a "flush
  180. * error" WC will be immediately generated for each WR we post.
  181. */
  182. p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
  183. if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
  184. ipoib_warn(priv, "failed to post drain wr\n");
  185. list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
  186. }
  187. static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
  188. {
  189. struct ipoib_cm_rx *p = ctx;
  190. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  191. unsigned long flags;
  192. if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
  193. return;
  194. spin_lock_irqsave(&priv->lock, flags);
  195. list_move(&p->list, &priv->cm.rx_flush_list);
  196. p->state = IPOIB_CM_RX_FLUSH;
  197. ipoib_cm_start_rx_drain(priv);
  198. spin_unlock_irqrestore(&priv->lock, flags);
  199. }
  200. static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
  201. struct ipoib_cm_rx *p)
  202. {
  203. struct ipoib_dev_priv *priv = netdev_priv(dev);
  204. struct ib_qp_init_attr attr = {
  205. .event_handler = ipoib_cm_rx_event_handler,
  206. .send_cq = priv->recv_cq, /* For drain WR */
  207. .recv_cq = priv->recv_cq,
  208. .srq = priv->cm.srq,
  209. .cap.max_send_wr = 1, /* For drain WR */
  210. .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
  211. .sq_sig_type = IB_SIGNAL_ALL_WR,
  212. .qp_type = IB_QPT_RC,
  213. .qp_context = p,
  214. };
  215. if (!ipoib_cm_has_srq(dev)) {
  216. attr.cap.max_recv_wr = ipoib_recvq_size;
  217. attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
  218. }
  219. return ib_create_qp(priv->pd, &attr);
  220. }
  221. static int ipoib_cm_modify_rx_qp(struct net_device *dev,
  222. struct ib_cm_id *cm_id, struct ib_qp *qp,
  223. unsigned psn)
  224. {
  225. struct ipoib_dev_priv *priv = netdev_priv(dev);
  226. struct ib_qp_attr qp_attr;
  227. int qp_attr_mask, ret;
  228. qp_attr.qp_state = IB_QPS_INIT;
  229. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  230. if (ret) {
  231. ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
  232. return ret;
  233. }
  234. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  235. if (ret) {
  236. ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
  237. return ret;
  238. }
  239. qp_attr.qp_state = IB_QPS_RTR;
  240. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  241. if (ret) {
  242. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  243. return ret;
  244. }
  245. qp_attr.rq_psn = psn;
  246. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  247. if (ret) {
  248. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  249. return ret;
  250. }
  251. /*
  252. * Current Mellanox HCA firmware won't generate completions
  253. * with error for drain WRs unless the QP has been moved to
  254. * RTS first. This work-around leaves a window where a QP has
  255. * moved to error asynchronously, but this will eventually get
  256. * fixed in firmware, so let's not error out if modify QP
  257. * fails.
  258. */
  259. qp_attr.qp_state = IB_QPS_RTS;
  260. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  261. if (ret) {
  262. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  263. return 0;
  264. }
  265. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  266. if (ret) {
  267. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  268. return 0;
  269. }
  270. return 0;
  271. }
  272. static void ipoib_cm_init_rx_wr(struct net_device *dev,
  273. struct ib_recv_wr *wr,
  274. struct ib_sge *sge)
  275. {
  276. struct ipoib_dev_priv *priv = netdev_priv(dev);
  277. int i;
  278. for (i = 0; i < priv->cm.num_frags; ++i)
  279. sge[i].lkey = priv->mr->lkey;
  280. sge[0].length = IPOIB_CM_HEAD_SIZE;
  281. for (i = 1; i < priv->cm.num_frags; ++i)
  282. sge[i].length = PAGE_SIZE;
  283. wr->next = NULL;
  284. wr->sg_list = sge;
  285. wr->num_sge = priv->cm.num_frags;
  286. }
  287. static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
  288. struct ipoib_cm_rx *rx)
  289. {
  290. struct ipoib_dev_priv *priv = netdev_priv(dev);
  291. struct {
  292. struct ib_recv_wr wr;
  293. struct ib_sge sge[IPOIB_CM_RX_SG];
  294. } *t;
  295. int ret;
  296. int i;
  297. rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
  298. if (!rx->rx_ring) {
  299. printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
  300. priv->ca->name, ipoib_recvq_size);
  301. return -ENOMEM;
  302. }
  303. t = kmalloc(sizeof *t, GFP_KERNEL);
  304. if (!t) {
  305. ret = -ENOMEM;
  306. goto err_free;
  307. }
  308. ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
  309. spin_lock_irq(&priv->lock);
  310. if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
  311. spin_unlock_irq(&priv->lock);
  312. ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
  313. ret = -EINVAL;
  314. goto err_free;
  315. } else
  316. ++priv->cm.nonsrq_conn_qp;
  317. spin_unlock_irq(&priv->lock);
  318. for (i = 0; i < ipoib_recvq_size; ++i) {
  319. if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
  320. rx->rx_ring[i].mapping,
  321. GFP_KERNEL)) {
  322. ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
  323. ret = -ENOMEM;
  324. goto err_count;
  325. }
  326. ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
  327. if (ret) {
  328. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
  329. "failed for buf %d\n", i);
  330. ret = -EIO;
  331. goto err_count;
  332. }
  333. }
  334. rx->recv_count = ipoib_recvq_size;
  335. kfree(t);
  336. return 0;
  337. err_count:
  338. spin_lock_irq(&priv->lock);
  339. --priv->cm.nonsrq_conn_qp;
  340. spin_unlock_irq(&priv->lock);
  341. err_free:
  342. kfree(t);
  343. ipoib_cm_free_rx_ring(dev, rx->rx_ring);
  344. return ret;
  345. }
  346. static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
  347. struct ib_qp *qp, struct ib_cm_req_event_param *req,
  348. unsigned psn)
  349. {
  350. struct ipoib_dev_priv *priv = netdev_priv(dev);
  351. struct ipoib_cm_data data = {};
  352. struct ib_cm_rep_param rep = {};
  353. data.qpn = cpu_to_be32(priv->qp->qp_num);
  354. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  355. rep.private_data = &data;
  356. rep.private_data_len = sizeof data;
  357. rep.flow_control = 0;
  358. rep.rnr_retry_count = req->rnr_retry_count;
  359. rep.srq = ipoib_cm_has_srq(dev);
  360. rep.qp_num = qp->qp_num;
  361. rep.starting_psn = psn;
  362. return ib_send_cm_rep(cm_id, &rep);
  363. }
  364. static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  365. {
  366. struct net_device *dev = cm_id->context;
  367. struct ipoib_dev_priv *priv = netdev_priv(dev);
  368. struct ipoib_cm_rx *p;
  369. unsigned psn;
  370. int ret;
  371. ipoib_dbg(priv, "REQ arrived\n");
  372. p = kzalloc(sizeof *p, GFP_KERNEL);
  373. if (!p)
  374. return -ENOMEM;
  375. p->dev = dev;
  376. p->id = cm_id;
  377. cm_id->context = p;
  378. p->state = IPOIB_CM_RX_LIVE;
  379. p->jiffies = jiffies;
  380. INIT_LIST_HEAD(&p->list);
  381. p->qp = ipoib_cm_create_rx_qp(dev, p);
  382. if (IS_ERR(p->qp)) {
  383. ret = PTR_ERR(p->qp);
  384. goto err_qp;
  385. }
  386. psn = prandom_u32() & 0xffffff;
  387. ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
  388. if (ret)
  389. goto err_modify;
  390. if (!ipoib_cm_has_srq(dev)) {
  391. ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
  392. if (ret)
  393. goto err_modify;
  394. }
  395. spin_lock_irq(&priv->lock);
  396. queue_delayed_work(ipoib_workqueue,
  397. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  398. /* Add this entry to passive ids list head, but do not re-add it
  399. * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
  400. p->jiffies = jiffies;
  401. if (p->state == IPOIB_CM_RX_LIVE)
  402. list_move(&p->list, &priv->cm.passive_ids);
  403. spin_unlock_irq(&priv->lock);
  404. ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
  405. if (ret) {
  406. ipoib_warn(priv, "failed to send REP: %d\n", ret);
  407. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  408. ipoib_warn(priv, "unable to move qp to error state\n");
  409. }
  410. return 0;
  411. err_modify:
  412. ib_destroy_qp(p->qp);
  413. err_qp:
  414. kfree(p);
  415. return ret;
  416. }
  417. static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
  418. struct ib_cm_event *event)
  419. {
  420. struct ipoib_cm_rx *p;
  421. struct ipoib_dev_priv *priv;
  422. switch (event->event) {
  423. case IB_CM_REQ_RECEIVED:
  424. return ipoib_cm_req_handler(cm_id, event);
  425. case IB_CM_DREQ_RECEIVED:
  426. p = cm_id->context;
  427. ib_send_cm_drep(cm_id, NULL, 0);
  428. /* Fall through */
  429. case IB_CM_REJ_RECEIVED:
  430. p = cm_id->context;
  431. priv = netdev_priv(p->dev);
  432. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  433. ipoib_warn(priv, "unable to move qp to error state\n");
  434. /* Fall through */
  435. default:
  436. return 0;
  437. }
  438. }
  439. /* Adjust length of skb with fragments to match received data */
  440. static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
  441. unsigned int length, struct sk_buff *toskb)
  442. {
  443. int i, num_frags;
  444. unsigned int size;
  445. /* put header into skb */
  446. size = min(length, hdr_space);
  447. skb->tail += size;
  448. skb->len += size;
  449. length -= size;
  450. num_frags = skb_shinfo(skb)->nr_frags;
  451. for (i = 0; i < num_frags; i++) {
  452. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  453. if (length == 0) {
  454. /* don't need this page */
  455. skb_fill_page_desc(toskb, i, skb_frag_page(frag),
  456. 0, PAGE_SIZE);
  457. --skb_shinfo(skb)->nr_frags;
  458. } else {
  459. size = min(length, (unsigned) PAGE_SIZE);
  460. skb_frag_size_set(frag, size);
  461. skb->data_len += size;
  462. skb->truesize += size;
  463. skb->len += size;
  464. length -= size;
  465. }
  466. }
  467. }
  468. void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
  469. {
  470. struct ipoib_dev_priv *priv = netdev_priv(dev);
  471. struct ipoib_cm_rx_buf *rx_ring;
  472. unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
  473. struct sk_buff *skb, *newskb;
  474. struct ipoib_cm_rx *p;
  475. unsigned long flags;
  476. u64 mapping[IPOIB_CM_RX_SG];
  477. int frags;
  478. int has_srq;
  479. struct sk_buff *small_skb;
  480. ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
  481. wr_id, wc->status);
  482. if (unlikely(wr_id >= ipoib_recvq_size)) {
  483. if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
  484. spin_lock_irqsave(&priv->lock, flags);
  485. list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
  486. ipoib_cm_start_rx_drain(priv);
  487. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  488. spin_unlock_irqrestore(&priv->lock, flags);
  489. } else
  490. ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
  491. wr_id, ipoib_recvq_size);
  492. return;
  493. }
  494. p = wc->qp->qp_context;
  495. has_srq = ipoib_cm_has_srq(dev);
  496. rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
  497. skb = rx_ring[wr_id].skb;
  498. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  499. ipoib_dbg(priv, "cm recv error "
  500. "(status=%d, wrid=%d vend_err %x)\n",
  501. wc->status, wr_id, wc->vendor_err);
  502. ++dev->stats.rx_dropped;
  503. if (has_srq)
  504. goto repost;
  505. else {
  506. if (!--p->recv_count) {
  507. spin_lock_irqsave(&priv->lock, flags);
  508. list_move(&p->list, &priv->cm.rx_reap_list);
  509. spin_unlock_irqrestore(&priv->lock, flags);
  510. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  511. }
  512. return;
  513. }
  514. }
  515. if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
  516. if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
  517. spin_lock_irqsave(&priv->lock, flags);
  518. p->jiffies = jiffies;
  519. /* Move this entry to list head, but do not re-add it
  520. * if it has been moved out of list. */
  521. if (p->state == IPOIB_CM_RX_LIVE)
  522. list_move(&p->list, &priv->cm.passive_ids);
  523. spin_unlock_irqrestore(&priv->lock, flags);
  524. }
  525. }
  526. if (wc->byte_len < IPOIB_CM_COPYBREAK) {
  527. int dlen = wc->byte_len;
  528. small_skb = dev_alloc_skb(dlen + 12);
  529. if (small_skb) {
  530. skb_reserve(small_skb, 12);
  531. ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
  532. dlen, DMA_FROM_DEVICE);
  533. skb_copy_from_linear_data(skb, small_skb->data, dlen);
  534. ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
  535. dlen, DMA_FROM_DEVICE);
  536. skb_put(small_skb, dlen);
  537. skb = small_skb;
  538. goto copied;
  539. }
  540. }
  541. frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
  542. (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
  543. newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
  544. mapping, GFP_ATOMIC);
  545. if (unlikely(!newskb)) {
  546. /*
  547. * If we can't allocate a new RX buffer, dump
  548. * this packet and reuse the old buffer.
  549. */
  550. ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
  551. ++dev->stats.rx_dropped;
  552. goto repost;
  553. }
  554. ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
  555. memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
  556. ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
  557. wc->byte_len, wc->slid);
  558. skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
  559. copied:
  560. skb->protocol = ((struct ipoib_header *) skb->data)->proto;
  561. skb_reset_mac_header(skb);
  562. skb_pull(skb, IPOIB_ENCAP_LEN);
  563. ++dev->stats.rx_packets;
  564. dev->stats.rx_bytes += skb->len;
  565. skb->dev = dev;
  566. /* XXX get correct PACKET_ type here */
  567. skb->pkt_type = PACKET_HOST;
  568. netif_receive_skb(skb);
  569. repost:
  570. if (has_srq) {
  571. if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
  572. ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
  573. "for buf %d\n", wr_id);
  574. } else {
  575. if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
  576. &priv->cm.rx_wr,
  577. priv->cm.rx_sge,
  578. wr_id))) {
  579. --p->recv_count;
  580. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
  581. "for buf %d\n", wr_id);
  582. }
  583. }
  584. }
  585. static inline int post_send(struct ipoib_dev_priv *priv,
  586. struct ipoib_cm_tx *tx,
  587. unsigned int wr_id,
  588. u64 addr, int len)
  589. {
  590. struct ib_send_wr *bad_wr;
  591. priv->tx_sge[0].addr = addr;
  592. priv->tx_sge[0].length = len;
  593. priv->tx_wr.num_sge = 1;
  594. priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
  595. return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
  596. }
  597. void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
  598. {
  599. struct ipoib_dev_priv *priv = netdev_priv(dev);
  600. struct ipoib_cm_tx_buf *tx_req;
  601. u64 addr;
  602. int rc;
  603. if (unlikely(skb->len > tx->mtu)) {
  604. ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  605. skb->len, tx->mtu);
  606. ++dev->stats.tx_dropped;
  607. ++dev->stats.tx_errors;
  608. ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  609. return;
  610. }
  611. ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
  612. tx->tx_head, skb->len, tx->qp->qp_num);
  613. /*
  614. * We put the skb into the tx_ring _before_ we call post_send()
  615. * because it's entirely possible that the completion handler will
  616. * run before we execute anything after the post_send(). That
  617. * means we have to make sure everything is properly recorded and
  618. * our state is consistent before we call post_send().
  619. */
  620. tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
  621. tx_req->skb = skb;
  622. addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  623. if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
  624. ++dev->stats.tx_errors;
  625. dev_kfree_skb_any(skb);
  626. return;
  627. }
  628. tx_req->mapping = addr;
  629. skb_orphan(skb);
  630. skb_dst_drop(skb);
  631. rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  632. addr, skb->len);
  633. if (unlikely(rc)) {
  634. ipoib_warn(priv, "post_send failed, error %d\n", rc);
  635. ++dev->stats.tx_errors;
  636. ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  637. dev_kfree_skb_any(skb);
  638. } else {
  639. dev->trans_start = jiffies;
  640. ++tx->tx_head;
  641. if (++priv->tx_outstanding == ipoib_sendq_size) {
  642. ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
  643. tx->qp->qp_num);
  644. netif_stop_queue(dev);
  645. rc = ib_req_notify_cq(priv->send_cq,
  646. IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
  647. if (rc < 0)
  648. ipoib_warn(priv, "request notify on send CQ failed\n");
  649. else if (rc)
  650. ipoib_send_comp_handler(priv->send_cq, dev);
  651. }
  652. }
  653. }
  654. void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
  655. {
  656. struct ipoib_dev_priv *priv = netdev_priv(dev);
  657. struct ipoib_cm_tx *tx = wc->qp->qp_context;
  658. unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
  659. struct ipoib_cm_tx_buf *tx_req;
  660. unsigned long flags;
  661. ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
  662. wr_id, wc->status);
  663. if (unlikely(wr_id >= ipoib_sendq_size)) {
  664. ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
  665. wr_id, ipoib_sendq_size);
  666. return;
  667. }
  668. tx_req = &tx->tx_ring[wr_id];
  669. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  670. /* FIXME: is this right? Shouldn't we only increment on success? */
  671. ++dev->stats.tx_packets;
  672. dev->stats.tx_bytes += tx_req->skb->len;
  673. dev_kfree_skb_any(tx_req->skb);
  674. netif_tx_lock(dev);
  675. ++tx->tx_tail;
  676. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  677. netif_queue_stopped(dev) &&
  678. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  679. netif_wake_queue(dev);
  680. if (wc->status != IB_WC_SUCCESS &&
  681. wc->status != IB_WC_WR_FLUSH_ERR) {
  682. struct ipoib_neigh *neigh;
  683. ipoib_dbg(priv, "failed cm send event "
  684. "(status=%d, wrid=%d vend_err %x)\n",
  685. wc->status, wr_id, wc->vendor_err);
  686. spin_lock_irqsave(&priv->lock, flags);
  687. neigh = tx->neigh;
  688. if (neigh) {
  689. neigh->cm = NULL;
  690. ipoib_neigh_free(neigh);
  691. tx->neigh = NULL;
  692. }
  693. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  694. list_move(&tx->list, &priv->cm.reap_list);
  695. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  696. }
  697. clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
  698. spin_unlock_irqrestore(&priv->lock, flags);
  699. }
  700. netif_tx_unlock(dev);
  701. }
  702. int ipoib_cm_dev_open(struct net_device *dev)
  703. {
  704. struct ipoib_dev_priv *priv = netdev_priv(dev);
  705. int ret;
  706. if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
  707. return 0;
  708. priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
  709. if (IS_ERR(priv->cm.id)) {
  710. printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
  711. ret = PTR_ERR(priv->cm.id);
  712. goto err_cm;
  713. }
  714. ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
  715. 0, NULL);
  716. if (ret) {
  717. printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
  718. IPOIB_CM_IETF_ID | priv->qp->qp_num);
  719. goto err_listen;
  720. }
  721. return 0;
  722. err_listen:
  723. ib_destroy_cm_id(priv->cm.id);
  724. err_cm:
  725. priv->cm.id = NULL;
  726. return ret;
  727. }
  728. static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
  729. {
  730. struct ipoib_dev_priv *priv = netdev_priv(dev);
  731. struct ipoib_cm_rx *rx, *n;
  732. LIST_HEAD(list);
  733. spin_lock_irq(&priv->lock);
  734. list_splice_init(&priv->cm.rx_reap_list, &list);
  735. spin_unlock_irq(&priv->lock);
  736. list_for_each_entry_safe(rx, n, &list, list) {
  737. ib_destroy_cm_id(rx->id);
  738. ib_destroy_qp(rx->qp);
  739. if (!ipoib_cm_has_srq(dev)) {
  740. ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
  741. spin_lock_irq(&priv->lock);
  742. --priv->cm.nonsrq_conn_qp;
  743. spin_unlock_irq(&priv->lock);
  744. }
  745. kfree(rx);
  746. }
  747. }
  748. void ipoib_cm_dev_stop(struct net_device *dev)
  749. {
  750. struct ipoib_dev_priv *priv = netdev_priv(dev);
  751. struct ipoib_cm_rx *p;
  752. unsigned long begin;
  753. int ret;
  754. if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
  755. return;
  756. ib_destroy_cm_id(priv->cm.id);
  757. priv->cm.id = NULL;
  758. spin_lock_irq(&priv->lock);
  759. while (!list_empty(&priv->cm.passive_ids)) {
  760. p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
  761. list_move(&p->list, &priv->cm.rx_error_list);
  762. p->state = IPOIB_CM_RX_ERROR;
  763. spin_unlock_irq(&priv->lock);
  764. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  765. if (ret)
  766. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  767. spin_lock_irq(&priv->lock);
  768. }
  769. /* Wait for all RX to be drained */
  770. begin = jiffies;
  771. while (!list_empty(&priv->cm.rx_error_list) ||
  772. !list_empty(&priv->cm.rx_flush_list) ||
  773. !list_empty(&priv->cm.rx_drain_list)) {
  774. if (time_after(jiffies, begin + 5 * HZ)) {
  775. ipoib_warn(priv, "RX drain timing out\n");
  776. /*
  777. * assume the HW is wedged and just free up everything.
  778. */
  779. list_splice_init(&priv->cm.rx_flush_list,
  780. &priv->cm.rx_reap_list);
  781. list_splice_init(&priv->cm.rx_error_list,
  782. &priv->cm.rx_reap_list);
  783. list_splice_init(&priv->cm.rx_drain_list,
  784. &priv->cm.rx_reap_list);
  785. break;
  786. }
  787. spin_unlock_irq(&priv->lock);
  788. msleep(1);
  789. ipoib_drain_cq(dev);
  790. spin_lock_irq(&priv->lock);
  791. }
  792. spin_unlock_irq(&priv->lock);
  793. ipoib_cm_free_rx_reap_list(dev);
  794. cancel_delayed_work(&priv->cm.stale_task);
  795. }
  796. static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  797. {
  798. struct ipoib_cm_tx *p = cm_id->context;
  799. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  800. struct ipoib_cm_data *data = event->private_data;
  801. struct sk_buff_head skqueue;
  802. struct ib_qp_attr qp_attr;
  803. int qp_attr_mask, ret;
  804. struct sk_buff *skb;
  805. p->mtu = be32_to_cpu(data->mtu);
  806. if (p->mtu <= IPOIB_ENCAP_LEN) {
  807. ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
  808. p->mtu, IPOIB_ENCAP_LEN);
  809. return -EINVAL;
  810. }
  811. qp_attr.qp_state = IB_QPS_RTR;
  812. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  813. if (ret) {
  814. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  815. return ret;
  816. }
  817. qp_attr.rq_psn = 0 /* FIXME */;
  818. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  819. if (ret) {
  820. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  821. return ret;
  822. }
  823. qp_attr.qp_state = IB_QPS_RTS;
  824. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  825. if (ret) {
  826. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  827. return ret;
  828. }
  829. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  830. if (ret) {
  831. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  832. return ret;
  833. }
  834. skb_queue_head_init(&skqueue);
  835. spin_lock_irq(&priv->lock);
  836. set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
  837. if (p->neigh)
  838. while ((skb = __skb_dequeue(&p->neigh->queue)))
  839. __skb_queue_tail(&skqueue, skb);
  840. spin_unlock_irq(&priv->lock);
  841. while ((skb = __skb_dequeue(&skqueue))) {
  842. skb->dev = p->dev;
  843. if (dev_queue_xmit(skb))
  844. ipoib_warn(priv, "dev_queue_xmit failed "
  845. "to requeue packet\n");
  846. }
  847. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  848. if (ret) {
  849. ipoib_warn(priv, "failed to send RTU: %d\n", ret);
  850. return ret;
  851. }
  852. return 0;
  853. }
  854. static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
  855. {
  856. struct ipoib_dev_priv *priv = netdev_priv(dev);
  857. struct ib_qp_init_attr attr = {
  858. .send_cq = priv->recv_cq,
  859. .recv_cq = priv->recv_cq,
  860. .srq = priv->cm.srq,
  861. .cap.max_send_wr = ipoib_sendq_size,
  862. .cap.max_send_sge = 1,
  863. .sq_sig_type = IB_SIGNAL_ALL_WR,
  864. .qp_type = IB_QPT_RC,
  865. .qp_context = tx
  866. };
  867. return ib_create_qp(priv->pd, &attr);
  868. }
  869. static int ipoib_cm_send_req(struct net_device *dev,
  870. struct ib_cm_id *id, struct ib_qp *qp,
  871. u32 qpn,
  872. struct ib_sa_path_rec *pathrec)
  873. {
  874. struct ipoib_dev_priv *priv = netdev_priv(dev);
  875. struct ipoib_cm_data data = {};
  876. struct ib_cm_req_param req = {};
  877. data.qpn = cpu_to_be32(priv->qp->qp_num);
  878. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  879. req.primary_path = pathrec;
  880. req.alternate_path = NULL;
  881. req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
  882. req.qp_num = qp->qp_num;
  883. req.qp_type = qp->qp_type;
  884. req.private_data = &data;
  885. req.private_data_len = sizeof data;
  886. req.flow_control = 0;
  887. req.starting_psn = 0; /* FIXME */
  888. /*
  889. * Pick some arbitrary defaults here; we could make these
  890. * module parameters if anyone cared about setting them.
  891. */
  892. req.responder_resources = 4;
  893. req.remote_cm_response_timeout = 20;
  894. req.local_cm_response_timeout = 20;
  895. req.retry_count = 0; /* RFC draft warns against retries */
  896. req.rnr_retry_count = 0; /* RFC draft warns against retries */
  897. req.max_cm_retries = 15;
  898. req.srq = ipoib_cm_has_srq(dev);
  899. return ib_send_cm_req(id, &req);
  900. }
  901. static int ipoib_cm_modify_tx_init(struct net_device *dev,
  902. struct ib_cm_id *cm_id, struct ib_qp *qp)
  903. {
  904. struct ipoib_dev_priv *priv = netdev_priv(dev);
  905. struct ib_qp_attr qp_attr;
  906. int qp_attr_mask, ret;
  907. ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
  908. if (ret) {
  909. ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
  910. return ret;
  911. }
  912. qp_attr.qp_state = IB_QPS_INIT;
  913. qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
  914. qp_attr.port_num = priv->port;
  915. qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
  916. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  917. if (ret) {
  918. ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
  919. return ret;
  920. }
  921. return 0;
  922. }
  923. static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
  924. struct ib_sa_path_rec *pathrec)
  925. {
  926. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  927. int ret;
  928. p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
  929. if (!p->tx_ring) {
  930. ipoib_warn(priv, "failed to allocate tx ring\n");
  931. ret = -ENOMEM;
  932. goto err_tx;
  933. }
  934. p->qp = ipoib_cm_create_tx_qp(p->dev, p);
  935. if (IS_ERR(p->qp)) {
  936. ret = PTR_ERR(p->qp);
  937. ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
  938. goto err_qp;
  939. }
  940. p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
  941. if (IS_ERR(p->id)) {
  942. ret = PTR_ERR(p->id);
  943. ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
  944. goto err_id;
  945. }
  946. ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
  947. if (ret) {
  948. ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
  949. goto err_modify;
  950. }
  951. ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
  952. if (ret) {
  953. ipoib_warn(priv, "failed to send cm req: %d\n", ret);
  954. goto err_send_cm;
  955. }
  956. ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
  957. p->qp->qp_num, pathrec->dgid.raw, qpn);
  958. return 0;
  959. err_send_cm:
  960. err_modify:
  961. ib_destroy_cm_id(p->id);
  962. err_id:
  963. p->id = NULL;
  964. ib_destroy_qp(p->qp);
  965. err_qp:
  966. p->qp = NULL;
  967. vfree(p->tx_ring);
  968. err_tx:
  969. return ret;
  970. }
  971. static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
  972. {
  973. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  974. struct ipoib_cm_tx_buf *tx_req;
  975. unsigned long begin;
  976. ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
  977. p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
  978. if (p->id)
  979. ib_destroy_cm_id(p->id);
  980. if (p->tx_ring) {
  981. /* Wait for all sends to complete */
  982. begin = jiffies;
  983. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  984. if (time_after(jiffies, begin + 5 * HZ)) {
  985. ipoib_warn(priv, "timing out; %d sends not completed\n",
  986. p->tx_head - p->tx_tail);
  987. goto timeout;
  988. }
  989. msleep(1);
  990. }
  991. }
  992. timeout:
  993. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  994. tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
  995. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
  996. DMA_TO_DEVICE);
  997. dev_kfree_skb_any(tx_req->skb);
  998. ++p->tx_tail;
  999. netif_tx_lock_bh(p->dev);
  1000. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  1001. netif_queue_stopped(p->dev) &&
  1002. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  1003. netif_wake_queue(p->dev);
  1004. netif_tx_unlock_bh(p->dev);
  1005. }
  1006. if (p->qp)
  1007. ib_destroy_qp(p->qp);
  1008. vfree(p->tx_ring);
  1009. kfree(p);
  1010. }
  1011. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  1012. struct ib_cm_event *event)
  1013. {
  1014. struct ipoib_cm_tx *tx = cm_id->context;
  1015. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1016. struct net_device *dev = priv->dev;
  1017. struct ipoib_neigh *neigh;
  1018. unsigned long flags;
  1019. int ret;
  1020. switch (event->event) {
  1021. case IB_CM_DREQ_RECEIVED:
  1022. ipoib_dbg(priv, "DREQ received.\n");
  1023. ib_send_cm_drep(cm_id, NULL, 0);
  1024. break;
  1025. case IB_CM_REP_RECEIVED:
  1026. ipoib_dbg(priv, "REP received.\n");
  1027. ret = ipoib_cm_rep_handler(cm_id, event);
  1028. if (ret)
  1029. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  1030. NULL, 0, NULL, 0);
  1031. break;
  1032. case IB_CM_REQ_ERROR:
  1033. case IB_CM_REJ_RECEIVED:
  1034. case IB_CM_TIMEWAIT_EXIT:
  1035. ipoib_dbg(priv, "CM error %d.\n", event->event);
  1036. netif_tx_lock_bh(dev);
  1037. spin_lock_irqsave(&priv->lock, flags);
  1038. neigh = tx->neigh;
  1039. if (neigh) {
  1040. neigh->cm = NULL;
  1041. ipoib_neigh_free(neigh);
  1042. tx->neigh = NULL;
  1043. }
  1044. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1045. list_move(&tx->list, &priv->cm.reap_list);
  1046. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1047. }
  1048. spin_unlock_irqrestore(&priv->lock, flags);
  1049. netif_tx_unlock_bh(dev);
  1050. break;
  1051. default:
  1052. break;
  1053. }
  1054. return 0;
  1055. }
  1056. struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
  1057. struct ipoib_neigh *neigh)
  1058. {
  1059. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1060. struct ipoib_cm_tx *tx;
  1061. tx = kzalloc(sizeof *tx, GFP_ATOMIC);
  1062. if (!tx)
  1063. return NULL;
  1064. neigh->cm = tx;
  1065. tx->neigh = neigh;
  1066. tx->path = path;
  1067. tx->dev = dev;
  1068. list_add(&tx->list, &priv->cm.start_list);
  1069. set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
  1070. queue_work(ipoib_workqueue, &priv->cm.start_task);
  1071. return tx;
  1072. }
  1073. void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
  1074. {
  1075. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1076. unsigned long flags;
  1077. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1078. spin_lock_irqsave(&priv->lock, flags);
  1079. list_move(&tx->list, &priv->cm.reap_list);
  1080. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1081. ipoib_dbg(priv, "Reap connection for gid %pI6\n",
  1082. tx->neigh->daddr + 4);
  1083. tx->neigh = NULL;
  1084. spin_unlock_irqrestore(&priv->lock, flags);
  1085. }
  1086. }
  1087. static void ipoib_cm_tx_start(struct work_struct *work)
  1088. {
  1089. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1090. cm.start_task);
  1091. struct net_device *dev = priv->dev;
  1092. struct ipoib_neigh *neigh;
  1093. struct ipoib_cm_tx *p;
  1094. unsigned long flags;
  1095. int ret;
  1096. struct ib_sa_path_rec pathrec;
  1097. u32 qpn;
  1098. netif_tx_lock_bh(dev);
  1099. spin_lock_irqsave(&priv->lock, flags);
  1100. while (!list_empty(&priv->cm.start_list)) {
  1101. p = list_entry(priv->cm.start_list.next, typeof(*p), list);
  1102. list_del_init(&p->list);
  1103. neigh = p->neigh;
  1104. qpn = IPOIB_QPN(neigh->daddr);
  1105. memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
  1106. spin_unlock_irqrestore(&priv->lock, flags);
  1107. netif_tx_unlock_bh(dev);
  1108. ret = ipoib_cm_tx_init(p, qpn, &pathrec);
  1109. netif_tx_lock_bh(dev);
  1110. spin_lock_irqsave(&priv->lock, flags);
  1111. if (ret) {
  1112. neigh = p->neigh;
  1113. if (neigh) {
  1114. neigh->cm = NULL;
  1115. ipoib_neigh_free(neigh);
  1116. }
  1117. list_del(&p->list);
  1118. kfree(p);
  1119. }
  1120. }
  1121. spin_unlock_irqrestore(&priv->lock, flags);
  1122. netif_tx_unlock_bh(dev);
  1123. }
  1124. static void ipoib_cm_tx_reap(struct work_struct *work)
  1125. {
  1126. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1127. cm.reap_task);
  1128. struct net_device *dev = priv->dev;
  1129. struct ipoib_cm_tx *p;
  1130. unsigned long flags;
  1131. netif_tx_lock_bh(dev);
  1132. spin_lock_irqsave(&priv->lock, flags);
  1133. while (!list_empty(&priv->cm.reap_list)) {
  1134. p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
  1135. list_del(&p->list);
  1136. spin_unlock_irqrestore(&priv->lock, flags);
  1137. netif_tx_unlock_bh(dev);
  1138. ipoib_cm_tx_destroy(p);
  1139. netif_tx_lock_bh(dev);
  1140. spin_lock_irqsave(&priv->lock, flags);
  1141. }
  1142. spin_unlock_irqrestore(&priv->lock, flags);
  1143. netif_tx_unlock_bh(dev);
  1144. }
  1145. static void ipoib_cm_skb_reap(struct work_struct *work)
  1146. {
  1147. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1148. cm.skb_task);
  1149. struct net_device *dev = priv->dev;
  1150. struct sk_buff *skb;
  1151. unsigned long flags;
  1152. unsigned mtu = priv->mcast_mtu;
  1153. netif_tx_lock_bh(dev);
  1154. spin_lock_irqsave(&priv->lock, flags);
  1155. while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
  1156. spin_unlock_irqrestore(&priv->lock, flags);
  1157. netif_tx_unlock_bh(dev);
  1158. if (skb->protocol == htons(ETH_P_IP))
  1159. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
  1160. #if IS_ENABLED(CONFIG_IPV6)
  1161. else if (skb->protocol == htons(ETH_P_IPV6))
  1162. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  1163. #endif
  1164. dev_kfree_skb_any(skb);
  1165. netif_tx_lock_bh(dev);
  1166. spin_lock_irqsave(&priv->lock, flags);
  1167. }
  1168. spin_unlock_irqrestore(&priv->lock, flags);
  1169. netif_tx_unlock_bh(dev);
  1170. }
  1171. void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
  1172. unsigned int mtu)
  1173. {
  1174. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1175. int e = skb_queue_empty(&priv->cm.skb_queue);
  1176. if (skb_dst(skb))
  1177. skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
  1178. skb_queue_tail(&priv->cm.skb_queue, skb);
  1179. if (e)
  1180. queue_work(ipoib_workqueue, &priv->cm.skb_task);
  1181. }
  1182. static void ipoib_cm_rx_reap(struct work_struct *work)
  1183. {
  1184. ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
  1185. cm.rx_reap_task)->dev);
  1186. }
  1187. static void ipoib_cm_stale_task(struct work_struct *work)
  1188. {
  1189. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1190. cm.stale_task.work);
  1191. struct ipoib_cm_rx *p;
  1192. int ret;
  1193. spin_lock_irq(&priv->lock);
  1194. while (!list_empty(&priv->cm.passive_ids)) {
  1195. /* List is sorted by LRU, start from tail,
  1196. * stop when we see a recently used entry */
  1197. p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
  1198. if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
  1199. break;
  1200. list_move(&p->list, &priv->cm.rx_error_list);
  1201. p->state = IPOIB_CM_RX_ERROR;
  1202. spin_unlock_irq(&priv->lock);
  1203. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  1204. if (ret)
  1205. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  1206. spin_lock_irq(&priv->lock);
  1207. }
  1208. if (!list_empty(&priv->cm.passive_ids))
  1209. queue_delayed_work(ipoib_workqueue,
  1210. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  1211. spin_unlock_irq(&priv->lock);
  1212. }
  1213. static ssize_t show_mode(struct device *d, struct device_attribute *attr,
  1214. char *buf)
  1215. {
  1216. struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
  1217. if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
  1218. return sprintf(buf, "connected\n");
  1219. else
  1220. return sprintf(buf, "datagram\n");
  1221. }
  1222. static ssize_t set_mode(struct device *d, struct device_attribute *attr,
  1223. const char *buf, size_t count)
  1224. {
  1225. struct net_device *dev = to_net_dev(d);
  1226. int ret;
  1227. if (!rtnl_trylock())
  1228. return restart_syscall();
  1229. ret = ipoib_set_mode(dev, buf);
  1230. rtnl_unlock();
  1231. if (!ret)
  1232. return count;
  1233. return ret;
  1234. }
  1235. static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
  1236. int ipoib_cm_add_mode_attr(struct net_device *dev)
  1237. {
  1238. return device_create_file(&dev->dev, &dev_attr_mode);
  1239. }
  1240. static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
  1241. {
  1242. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1243. struct ib_srq_init_attr srq_init_attr = {
  1244. .srq_type = IB_SRQT_BASIC,
  1245. .attr = {
  1246. .max_wr = ipoib_recvq_size,
  1247. .max_sge = max_sge
  1248. }
  1249. };
  1250. priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
  1251. if (IS_ERR(priv->cm.srq)) {
  1252. if (PTR_ERR(priv->cm.srq) != -ENOSYS)
  1253. printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
  1254. priv->ca->name, PTR_ERR(priv->cm.srq));
  1255. priv->cm.srq = NULL;
  1256. return;
  1257. }
  1258. priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
  1259. if (!priv->cm.srq_ring) {
  1260. printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
  1261. priv->ca->name, ipoib_recvq_size);
  1262. ib_destroy_srq(priv->cm.srq);
  1263. priv->cm.srq = NULL;
  1264. return;
  1265. }
  1266. }
  1267. int ipoib_cm_dev_init(struct net_device *dev)
  1268. {
  1269. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1270. int i, ret;
  1271. struct ib_device_attr attr;
  1272. INIT_LIST_HEAD(&priv->cm.passive_ids);
  1273. INIT_LIST_HEAD(&priv->cm.reap_list);
  1274. INIT_LIST_HEAD(&priv->cm.start_list);
  1275. INIT_LIST_HEAD(&priv->cm.rx_error_list);
  1276. INIT_LIST_HEAD(&priv->cm.rx_flush_list);
  1277. INIT_LIST_HEAD(&priv->cm.rx_drain_list);
  1278. INIT_LIST_HEAD(&priv->cm.rx_reap_list);
  1279. INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
  1280. INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
  1281. INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
  1282. INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
  1283. INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
  1284. skb_queue_head_init(&priv->cm.skb_queue);
  1285. ret = ib_query_device(priv->ca, &attr);
  1286. if (ret) {
  1287. printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
  1288. return ret;
  1289. }
  1290. ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
  1291. attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
  1292. ipoib_cm_create_srq(dev, attr.max_srq_sge);
  1293. if (ipoib_cm_has_srq(dev)) {
  1294. priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
  1295. priv->cm.num_frags = attr.max_srq_sge;
  1296. ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
  1297. priv->cm.max_cm_mtu, priv->cm.num_frags);
  1298. } else {
  1299. priv->cm.max_cm_mtu = IPOIB_CM_MTU;
  1300. priv->cm.num_frags = IPOIB_CM_RX_SG;
  1301. }
  1302. ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
  1303. if (ipoib_cm_has_srq(dev)) {
  1304. for (i = 0; i < ipoib_recvq_size; ++i) {
  1305. if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
  1306. priv->cm.num_frags - 1,
  1307. priv->cm.srq_ring[i].mapping,
  1308. GFP_KERNEL)) {
  1309. ipoib_warn(priv, "failed to allocate "
  1310. "receive buffer %d\n", i);
  1311. ipoib_cm_dev_cleanup(dev);
  1312. return -ENOMEM;
  1313. }
  1314. if (ipoib_cm_post_receive_srq(dev, i)) {
  1315. ipoib_warn(priv, "ipoib_cm_post_receive_srq "
  1316. "failed for buf %d\n", i);
  1317. ipoib_cm_dev_cleanup(dev);
  1318. return -EIO;
  1319. }
  1320. }
  1321. }
  1322. priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
  1323. return 0;
  1324. }
  1325. void ipoib_cm_dev_cleanup(struct net_device *dev)
  1326. {
  1327. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1328. int ret;
  1329. if (!priv->cm.srq)
  1330. return;
  1331. ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
  1332. ret = ib_destroy_srq(priv->cm.srq);
  1333. if (ret)
  1334. ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
  1335. priv->cm.srq = NULL;
  1336. if (!priv->cm.srq_ring)
  1337. return;
  1338. ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
  1339. priv->cm.srq_ring = NULL;
  1340. }