ipoib_cm.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594
  1. /*
  2. * Copyright (c) 2006 Mellanox Technologies. All rights reserved
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/ib_cm.h>
  33. #include <rdma/ib_cache.h>
  34. #include <net/dst.h>
  35. #include <net/icmp.h>
  36. #include <linux/icmpv6.h>
  37. #include <linux/delay.h>
  38. #include <linux/vmalloc.h>
  39. #include "ipoib.h"
  40. int ipoib_max_conn_qp = 128;
  41. module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
  42. MODULE_PARM_DESC(max_nonsrq_conn_qp,
  43. "Max number of connected-mode QPs per interface "
  44. "(applied only if shared receive queue is not available)");
  45. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
  46. static int data_debug_level;
  47. module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
  48. MODULE_PARM_DESC(cm_data_debug_level,
  49. "Enable data path debug tracing for connected mode if > 0");
  50. #endif
  51. #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
  52. #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
  53. #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
  54. #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
  55. #define IPOIB_CM_RX_UPDATE_MASK (0x3)
  56. static struct ib_qp_attr ipoib_cm_err_attr = {
  57. .qp_state = IB_QPS_ERR
  58. };
  59. #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
  60. static struct ib_send_wr ipoib_cm_rx_drain_wr = {
  61. .wr_id = IPOIB_CM_RX_DRAIN_WRID,
  62. .opcode = IB_WR_SEND,
  63. };
  64. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  65. struct ib_cm_event *event);
  66. static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
  67. u64 mapping[IPOIB_CM_RX_SG])
  68. {
  69. int i;
  70. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  71. for (i = 0; i < frags; ++i)
  72. ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
  73. }
  74. static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
  75. {
  76. struct ipoib_dev_priv *priv = netdev_priv(dev);
  77. struct ib_recv_wr *bad_wr;
  78. int i, ret;
  79. priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  80. for (i = 0; i < priv->cm.num_frags; ++i)
  81. priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
  82. ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
  83. if (unlikely(ret)) {
  84. ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
  85. ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
  86. priv->cm.srq_ring[id].mapping);
  87. dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
  88. priv->cm.srq_ring[id].skb = NULL;
  89. }
  90. return ret;
  91. }
  92. static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
  93. struct ipoib_cm_rx *rx,
  94. struct ib_recv_wr *wr,
  95. struct ib_sge *sge, int id)
  96. {
  97. struct ipoib_dev_priv *priv = netdev_priv(dev);
  98. struct ib_recv_wr *bad_wr;
  99. int i, ret;
  100. wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  101. for (i = 0; i < IPOIB_CM_RX_SG; ++i)
  102. sge[i].addr = rx->rx_ring[id].mapping[i];
  103. ret = ib_post_recv(rx->qp, wr, &bad_wr);
  104. if (unlikely(ret)) {
  105. ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
  106. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  107. rx->rx_ring[id].mapping);
  108. dev_kfree_skb_any(rx->rx_ring[id].skb);
  109. rx->rx_ring[id].skb = NULL;
  110. }
  111. return ret;
  112. }
  113. static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
  114. struct ipoib_cm_rx_buf *rx_ring,
  115. int id, int frags,
  116. u64 mapping[IPOIB_CM_RX_SG])
  117. {
  118. struct ipoib_dev_priv *priv = netdev_priv(dev);
  119. struct sk_buff *skb;
  120. int i;
  121. skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
  122. if (unlikely(!skb))
  123. return NULL;
  124. /*
  125. * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
  126. * IP header to a multiple of 16.
  127. */
  128. skb_reserve(skb, 12);
  129. mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
  130. DMA_FROM_DEVICE);
  131. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
  132. dev_kfree_skb_any(skb);
  133. return NULL;
  134. }
  135. for (i = 0; i < frags; i++) {
  136. struct page *page = alloc_page(GFP_ATOMIC);
  137. if (!page)
  138. goto partial_error;
  139. skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
  140. mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
  141. 0, PAGE_SIZE, DMA_FROM_DEVICE);
  142. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
  143. goto partial_error;
  144. }
  145. rx_ring[id].skb = skb;
  146. return skb;
  147. partial_error:
  148. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  149. for (; i > 0; --i)
  150. ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
  151. dev_kfree_skb_any(skb);
  152. return NULL;
  153. }
  154. static void ipoib_cm_free_rx_ring(struct net_device *dev,
  155. struct ipoib_cm_rx_buf *rx_ring)
  156. {
  157. struct ipoib_dev_priv *priv = netdev_priv(dev);
  158. int i;
  159. for (i = 0; i < ipoib_recvq_size; ++i)
  160. if (rx_ring[i].skb) {
  161. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  162. rx_ring[i].mapping);
  163. dev_kfree_skb_any(rx_ring[i].skb);
  164. }
  165. kfree(rx_ring);
  166. }
  167. static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
  168. {
  169. struct ib_send_wr *bad_wr;
  170. struct ipoib_cm_rx *p;
  171. /* We only reserved 1 extra slot in CQ for drain WRs, so
  172. * make sure we have at most 1 outstanding WR. */
  173. if (list_empty(&priv->cm.rx_flush_list) ||
  174. !list_empty(&priv->cm.rx_drain_list))
  175. return;
  176. /*
  177. * QPs on flush list are error state. This way, a "flush
  178. * error" WC will be immediately generated for each WR we post.
  179. */
  180. p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
  181. if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
  182. ipoib_warn(priv, "failed to post drain wr\n");
  183. list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
  184. }
  185. static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
  186. {
  187. struct ipoib_cm_rx *p = ctx;
  188. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  189. unsigned long flags;
  190. if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
  191. return;
  192. spin_lock_irqsave(&priv->lock, flags);
  193. list_move(&p->list, &priv->cm.rx_flush_list);
  194. p->state = IPOIB_CM_RX_FLUSH;
  195. ipoib_cm_start_rx_drain(priv);
  196. spin_unlock_irqrestore(&priv->lock, flags);
  197. }
  198. static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
  199. struct ipoib_cm_rx *p)
  200. {
  201. struct ipoib_dev_priv *priv = netdev_priv(dev);
  202. struct ib_qp_init_attr attr = {
  203. .event_handler = ipoib_cm_rx_event_handler,
  204. .send_cq = priv->recv_cq, /* For drain WR */
  205. .recv_cq = priv->recv_cq,
  206. .srq = priv->cm.srq,
  207. .cap.max_send_wr = 1, /* For drain WR */
  208. .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
  209. .sq_sig_type = IB_SIGNAL_ALL_WR,
  210. .qp_type = IB_QPT_RC,
  211. .qp_context = p,
  212. };
  213. if (!ipoib_cm_has_srq(dev)) {
  214. attr.cap.max_recv_wr = ipoib_recvq_size;
  215. attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
  216. }
  217. return ib_create_qp(priv->pd, &attr);
  218. }
  219. static int ipoib_cm_modify_rx_qp(struct net_device *dev,
  220. struct ib_cm_id *cm_id, struct ib_qp *qp,
  221. unsigned psn)
  222. {
  223. struct ipoib_dev_priv *priv = netdev_priv(dev);
  224. struct ib_qp_attr qp_attr;
  225. int qp_attr_mask, ret;
  226. qp_attr.qp_state = IB_QPS_INIT;
  227. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  228. if (ret) {
  229. ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
  230. return ret;
  231. }
  232. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  233. if (ret) {
  234. ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
  235. return ret;
  236. }
  237. qp_attr.qp_state = IB_QPS_RTR;
  238. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  239. if (ret) {
  240. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  241. return ret;
  242. }
  243. qp_attr.rq_psn = psn;
  244. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  245. if (ret) {
  246. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  247. return ret;
  248. }
  249. /*
  250. * Current Mellanox HCA firmware won't generate completions
  251. * with error for drain WRs unless the QP has been moved to
  252. * RTS first. This work-around leaves a window where a QP has
  253. * moved to error asynchronously, but this will eventually get
  254. * fixed in firmware, so let's not error out if modify QP
  255. * fails.
  256. */
  257. qp_attr.qp_state = IB_QPS_RTS;
  258. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  259. if (ret) {
  260. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  261. return 0;
  262. }
  263. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  264. if (ret) {
  265. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  266. return 0;
  267. }
  268. return 0;
  269. }
  270. static void ipoib_cm_init_rx_wr(struct net_device *dev,
  271. struct ib_recv_wr *wr,
  272. struct ib_sge *sge)
  273. {
  274. struct ipoib_dev_priv *priv = netdev_priv(dev);
  275. int i;
  276. for (i = 0; i < priv->cm.num_frags; ++i)
  277. sge[i].lkey = priv->mr->lkey;
  278. sge[0].length = IPOIB_CM_HEAD_SIZE;
  279. for (i = 1; i < priv->cm.num_frags; ++i)
  280. sge[i].length = PAGE_SIZE;
  281. wr->next = NULL;
  282. wr->sg_list = priv->cm.rx_sge;
  283. wr->num_sge = priv->cm.num_frags;
  284. }
  285. static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
  286. struct ipoib_cm_rx *rx)
  287. {
  288. struct ipoib_dev_priv *priv = netdev_priv(dev);
  289. struct {
  290. struct ib_recv_wr wr;
  291. struct ib_sge sge[IPOIB_CM_RX_SG];
  292. } *t;
  293. int ret;
  294. int i;
  295. rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
  296. if (!rx->rx_ring)
  297. return -ENOMEM;
  298. t = kmalloc(sizeof *t, GFP_KERNEL);
  299. if (!t) {
  300. ret = -ENOMEM;
  301. goto err_free;
  302. }
  303. ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
  304. spin_lock_irq(&priv->lock);
  305. if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
  306. spin_unlock_irq(&priv->lock);
  307. ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
  308. ret = -EINVAL;
  309. goto err_free;
  310. } else
  311. ++priv->cm.nonsrq_conn_qp;
  312. spin_unlock_irq(&priv->lock);
  313. for (i = 0; i < ipoib_recvq_size; ++i) {
  314. if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
  315. rx->rx_ring[i].mapping)) {
  316. ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
  317. ret = -ENOMEM;
  318. goto err_count;
  319. }
  320. ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
  321. if (ret) {
  322. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
  323. "failed for buf %d\n", i);
  324. ret = -EIO;
  325. goto err_count;
  326. }
  327. }
  328. rx->recv_count = ipoib_recvq_size;
  329. kfree(t);
  330. return 0;
  331. err_count:
  332. spin_lock_irq(&priv->lock);
  333. --priv->cm.nonsrq_conn_qp;
  334. spin_unlock_irq(&priv->lock);
  335. err_free:
  336. kfree(t);
  337. ipoib_cm_free_rx_ring(dev, rx->rx_ring);
  338. return ret;
  339. }
  340. static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
  341. struct ib_qp *qp, struct ib_cm_req_event_param *req,
  342. unsigned psn)
  343. {
  344. struct ipoib_dev_priv *priv = netdev_priv(dev);
  345. struct ipoib_cm_data data = {};
  346. struct ib_cm_rep_param rep = {};
  347. data.qpn = cpu_to_be32(priv->qp->qp_num);
  348. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  349. rep.private_data = &data;
  350. rep.private_data_len = sizeof data;
  351. rep.flow_control = 0;
  352. rep.rnr_retry_count = req->rnr_retry_count;
  353. rep.srq = ipoib_cm_has_srq(dev);
  354. rep.qp_num = qp->qp_num;
  355. rep.starting_psn = psn;
  356. return ib_send_cm_rep(cm_id, &rep);
  357. }
  358. static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  359. {
  360. struct net_device *dev = cm_id->context;
  361. struct ipoib_dev_priv *priv = netdev_priv(dev);
  362. struct ipoib_cm_rx *p;
  363. unsigned psn;
  364. int ret;
  365. ipoib_dbg(priv, "REQ arrived\n");
  366. p = kzalloc(sizeof *p, GFP_KERNEL);
  367. if (!p)
  368. return -ENOMEM;
  369. p->dev = dev;
  370. p->id = cm_id;
  371. cm_id->context = p;
  372. p->state = IPOIB_CM_RX_LIVE;
  373. p->jiffies = jiffies;
  374. INIT_LIST_HEAD(&p->list);
  375. p->qp = ipoib_cm_create_rx_qp(dev, p);
  376. if (IS_ERR(p->qp)) {
  377. ret = PTR_ERR(p->qp);
  378. goto err_qp;
  379. }
  380. psn = random32() & 0xffffff;
  381. ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
  382. if (ret)
  383. goto err_modify;
  384. if (!ipoib_cm_has_srq(dev)) {
  385. ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
  386. if (ret)
  387. goto err_modify;
  388. }
  389. spin_lock_irq(&priv->lock);
  390. queue_delayed_work(ipoib_workqueue,
  391. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  392. /* Add this entry to passive ids list head, but do not re-add it
  393. * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
  394. p->jiffies = jiffies;
  395. if (p->state == IPOIB_CM_RX_LIVE)
  396. list_move(&p->list, &priv->cm.passive_ids);
  397. spin_unlock_irq(&priv->lock);
  398. ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
  399. if (ret) {
  400. ipoib_warn(priv, "failed to send REP: %d\n", ret);
  401. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  402. ipoib_warn(priv, "unable to move qp to error state\n");
  403. }
  404. return 0;
  405. err_modify:
  406. ib_destroy_qp(p->qp);
  407. err_qp:
  408. kfree(p);
  409. return ret;
  410. }
  411. static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
  412. struct ib_cm_event *event)
  413. {
  414. struct ipoib_cm_rx *p;
  415. struct ipoib_dev_priv *priv;
  416. switch (event->event) {
  417. case IB_CM_REQ_RECEIVED:
  418. return ipoib_cm_req_handler(cm_id, event);
  419. case IB_CM_DREQ_RECEIVED:
  420. p = cm_id->context;
  421. ib_send_cm_drep(cm_id, NULL, 0);
  422. /* Fall through */
  423. case IB_CM_REJ_RECEIVED:
  424. p = cm_id->context;
  425. priv = netdev_priv(p->dev);
  426. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  427. ipoib_warn(priv, "unable to move qp to error state\n");
  428. /* Fall through */
  429. default:
  430. return 0;
  431. }
  432. }
  433. /* Adjust length of skb with fragments to match received data */
  434. static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
  435. unsigned int length, struct sk_buff *toskb)
  436. {
  437. int i, num_frags;
  438. unsigned int size;
  439. /* put header into skb */
  440. size = min(length, hdr_space);
  441. skb->tail += size;
  442. skb->len += size;
  443. length -= size;
  444. num_frags = skb_shinfo(skb)->nr_frags;
  445. for (i = 0; i < num_frags; i++) {
  446. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  447. if (length == 0) {
  448. /* don't need this page */
  449. skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
  450. --skb_shinfo(skb)->nr_frags;
  451. } else {
  452. size = min(length, (unsigned) PAGE_SIZE);
  453. frag->size = size;
  454. skb->data_len += size;
  455. skb->truesize += size;
  456. skb->len += size;
  457. length -= size;
  458. }
  459. }
  460. }
  461. void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
  462. {
  463. struct ipoib_dev_priv *priv = netdev_priv(dev);
  464. struct ipoib_cm_rx_buf *rx_ring;
  465. unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
  466. struct sk_buff *skb, *newskb;
  467. struct ipoib_cm_rx *p;
  468. unsigned long flags;
  469. u64 mapping[IPOIB_CM_RX_SG];
  470. int frags;
  471. int has_srq;
  472. struct sk_buff *small_skb;
  473. ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
  474. wr_id, wc->status);
  475. if (unlikely(wr_id >= ipoib_recvq_size)) {
  476. if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
  477. spin_lock_irqsave(&priv->lock, flags);
  478. list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
  479. ipoib_cm_start_rx_drain(priv);
  480. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  481. spin_unlock_irqrestore(&priv->lock, flags);
  482. } else
  483. ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
  484. wr_id, ipoib_recvq_size);
  485. return;
  486. }
  487. p = wc->qp->qp_context;
  488. has_srq = ipoib_cm_has_srq(dev);
  489. rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
  490. skb = rx_ring[wr_id].skb;
  491. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  492. ipoib_dbg(priv, "cm recv error "
  493. "(status=%d, wrid=%d vend_err %x)\n",
  494. wc->status, wr_id, wc->vendor_err);
  495. ++dev->stats.rx_dropped;
  496. if (has_srq)
  497. goto repost;
  498. else {
  499. if (!--p->recv_count) {
  500. spin_lock_irqsave(&priv->lock, flags);
  501. list_move(&p->list, &priv->cm.rx_reap_list);
  502. spin_unlock_irqrestore(&priv->lock, flags);
  503. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  504. }
  505. return;
  506. }
  507. }
  508. if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
  509. if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
  510. spin_lock_irqsave(&priv->lock, flags);
  511. p->jiffies = jiffies;
  512. /* Move this entry to list head, but do not re-add it
  513. * if it has been moved out of list. */
  514. if (p->state == IPOIB_CM_RX_LIVE)
  515. list_move(&p->list, &priv->cm.passive_ids);
  516. spin_unlock_irqrestore(&priv->lock, flags);
  517. }
  518. }
  519. if (wc->byte_len < IPOIB_CM_COPYBREAK) {
  520. int dlen = wc->byte_len;
  521. small_skb = dev_alloc_skb(dlen + 12);
  522. if (small_skb) {
  523. skb_reserve(small_skb, 12);
  524. ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
  525. dlen, DMA_FROM_DEVICE);
  526. skb_copy_from_linear_data(skb, small_skb->data, dlen);
  527. ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
  528. dlen, DMA_FROM_DEVICE);
  529. skb_put(small_skb, dlen);
  530. skb = small_skb;
  531. goto copied;
  532. }
  533. }
  534. frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
  535. (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
  536. newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
  537. if (unlikely(!newskb)) {
  538. /*
  539. * If we can't allocate a new RX buffer, dump
  540. * this packet and reuse the old buffer.
  541. */
  542. ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
  543. ++dev->stats.rx_dropped;
  544. goto repost;
  545. }
  546. ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
  547. memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
  548. ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
  549. wc->byte_len, wc->slid);
  550. skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
  551. copied:
  552. skb->protocol = ((struct ipoib_header *) skb->data)->proto;
  553. skb_reset_mac_header(skb);
  554. skb_pull(skb, IPOIB_ENCAP_LEN);
  555. dev->last_rx = jiffies;
  556. ++dev->stats.rx_packets;
  557. dev->stats.rx_bytes += skb->len;
  558. skb->dev = dev;
  559. /* XXX get correct PACKET_ type here */
  560. skb->pkt_type = PACKET_HOST;
  561. netif_receive_skb(skb);
  562. repost:
  563. if (has_srq) {
  564. if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
  565. ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
  566. "for buf %d\n", wr_id);
  567. } else {
  568. if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
  569. &priv->cm.rx_wr,
  570. priv->cm.rx_sge,
  571. wr_id))) {
  572. --p->recv_count;
  573. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
  574. "for buf %d\n", wr_id);
  575. }
  576. }
  577. }
  578. static inline int post_send(struct ipoib_dev_priv *priv,
  579. struct ipoib_cm_tx *tx,
  580. unsigned int wr_id,
  581. u64 addr, int len)
  582. {
  583. struct ib_send_wr *bad_wr;
  584. priv->tx_sge[0].addr = addr;
  585. priv->tx_sge[0].length = len;
  586. priv->tx_wr.num_sge = 1;
  587. priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
  588. return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
  589. }
  590. void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
  591. {
  592. struct ipoib_dev_priv *priv = netdev_priv(dev);
  593. struct ipoib_cm_tx_buf *tx_req;
  594. u64 addr;
  595. if (unlikely(skb->len > tx->mtu)) {
  596. ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  597. skb->len, tx->mtu);
  598. ++dev->stats.tx_dropped;
  599. ++dev->stats.tx_errors;
  600. ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  601. return;
  602. }
  603. ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
  604. tx->tx_head, skb->len, tx->qp->qp_num);
  605. /*
  606. * We put the skb into the tx_ring _before_ we call post_send()
  607. * because it's entirely possible that the completion handler will
  608. * run before we execute anything after the post_send(). That
  609. * means we have to make sure everything is properly recorded and
  610. * our state is consistent before we call post_send().
  611. */
  612. tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
  613. tx_req->skb = skb;
  614. addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  615. if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
  616. ++dev->stats.tx_errors;
  617. dev_kfree_skb_any(skb);
  618. return;
  619. }
  620. tx_req->mapping = addr;
  621. if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  622. addr, skb->len))) {
  623. ipoib_warn(priv, "post_send failed\n");
  624. ++dev->stats.tx_errors;
  625. ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  626. dev_kfree_skb_any(skb);
  627. } else {
  628. dev->trans_start = jiffies;
  629. ++tx->tx_head;
  630. if (++priv->tx_outstanding == ipoib_sendq_size) {
  631. ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
  632. tx->qp->qp_num);
  633. netif_stop_queue(dev);
  634. }
  635. }
  636. }
  637. void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
  638. {
  639. struct ipoib_dev_priv *priv = netdev_priv(dev);
  640. struct ipoib_cm_tx *tx = wc->qp->qp_context;
  641. unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
  642. struct ipoib_cm_tx_buf *tx_req;
  643. unsigned long flags;
  644. ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
  645. wr_id, wc->status);
  646. if (unlikely(wr_id >= ipoib_sendq_size)) {
  647. ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
  648. wr_id, ipoib_sendq_size);
  649. return;
  650. }
  651. tx_req = &tx->tx_ring[wr_id];
  652. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  653. /* FIXME: is this right? Shouldn't we only increment on success? */
  654. ++dev->stats.tx_packets;
  655. dev->stats.tx_bytes += tx_req->skb->len;
  656. dev_kfree_skb_any(tx_req->skb);
  657. spin_lock_irqsave(&priv->tx_lock, flags);
  658. ++tx->tx_tail;
  659. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  660. netif_queue_stopped(dev) &&
  661. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  662. netif_wake_queue(dev);
  663. if (wc->status != IB_WC_SUCCESS &&
  664. wc->status != IB_WC_WR_FLUSH_ERR) {
  665. struct ipoib_neigh *neigh;
  666. ipoib_dbg(priv, "failed cm send event "
  667. "(status=%d, wrid=%d vend_err %x)\n",
  668. wc->status, wr_id, wc->vendor_err);
  669. spin_lock(&priv->lock);
  670. neigh = tx->neigh;
  671. if (neigh) {
  672. neigh->cm = NULL;
  673. list_del(&neigh->list);
  674. if (neigh->ah)
  675. ipoib_put_ah(neigh->ah);
  676. ipoib_neigh_free(dev, neigh);
  677. tx->neigh = NULL;
  678. }
  679. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  680. list_move(&tx->list, &priv->cm.reap_list);
  681. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  682. }
  683. clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
  684. spin_unlock(&priv->lock);
  685. }
  686. spin_unlock_irqrestore(&priv->tx_lock, flags);
  687. }
  688. int ipoib_cm_dev_open(struct net_device *dev)
  689. {
  690. struct ipoib_dev_priv *priv = netdev_priv(dev);
  691. int ret;
  692. if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
  693. return 0;
  694. priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
  695. if (IS_ERR(priv->cm.id)) {
  696. printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
  697. ret = PTR_ERR(priv->cm.id);
  698. goto err_cm;
  699. }
  700. ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
  701. 0, NULL);
  702. if (ret) {
  703. printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
  704. IPOIB_CM_IETF_ID | priv->qp->qp_num);
  705. goto err_listen;
  706. }
  707. return 0;
  708. err_listen:
  709. ib_destroy_cm_id(priv->cm.id);
  710. err_cm:
  711. priv->cm.id = NULL;
  712. return ret;
  713. }
  714. static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
  715. {
  716. struct ipoib_dev_priv *priv = netdev_priv(dev);
  717. struct ipoib_cm_rx *rx, *n;
  718. LIST_HEAD(list);
  719. spin_lock_irq(&priv->lock);
  720. list_splice_init(&priv->cm.rx_reap_list, &list);
  721. spin_unlock_irq(&priv->lock);
  722. list_for_each_entry_safe(rx, n, &list, list) {
  723. ib_destroy_cm_id(rx->id);
  724. ib_destroy_qp(rx->qp);
  725. if (!ipoib_cm_has_srq(dev)) {
  726. ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
  727. spin_lock_irq(&priv->lock);
  728. --priv->cm.nonsrq_conn_qp;
  729. spin_unlock_irq(&priv->lock);
  730. }
  731. kfree(rx);
  732. }
  733. }
  734. void ipoib_cm_dev_stop(struct net_device *dev)
  735. {
  736. struct ipoib_dev_priv *priv = netdev_priv(dev);
  737. struct ipoib_cm_rx *p;
  738. unsigned long begin;
  739. int ret;
  740. if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
  741. return;
  742. ib_destroy_cm_id(priv->cm.id);
  743. priv->cm.id = NULL;
  744. spin_lock_irq(&priv->lock);
  745. while (!list_empty(&priv->cm.passive_ids)) {
  746. p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
  747. list_move(&p->list, &priv->cm.rx_error_list);
  748. p->state = IPOIB_CM_RX_ERROR;
  749. spin_unlock_irq(&priv->lock);
  750. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  751. if (ret)
  752. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  753. spin_lock_irq(&priv->lock);
  754. }
  755. /* Wait for all RX to be drained */
  756. begin = jiffies;
  757. while (!list_empty(&priv->cm.rx_error_list) ||
  758. !list_empty(&priv->cm.rx_flush_list) ||
  759. !list_empty(&priv->cm.rx_drain_list)) {
  760. if (time_after(jiffies, begin + 5 * HZ)) {
  761. ipoib_warn(priv, "RX drain timing out\n");
  762. /*
  763. * assume the HW is wedged and just free up everything.
  764. */
  765. list_splice_init(&priv->cm.rx_flush_list,
  766. &priv->cm.rx_reap_list);
  767. list_splice_init(&priv->cm.rx_error_list,
  768. &priv->cm.rx_reap_list);
  769. list_splice_init(&priv->cm.rx_drain_list,
  770. &priv->cm.rx_reap_list);
  771. break;
  772. }
  773. spin_unlock_irq(&priv->lock);
  774. msleep(1);
  775. ipoib_drain_cq(dev);
  776. spin_lock_irq(&priv->lock);
  777. }
  778. spin_unlock_irq(&priv->lock);
  779. ipoib_cm_free_rx_reap_list(dev);
  780. cancel_delayed_work(&priv->cm.stale_task);
  781. }
  782. static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  783. {
  784. struct ipoib_cm_tx *p = cm_id->context;
  785. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  786. struct ipoib_cm_data *data = event->private_data;
  787. struct sk_buff_head skqueue;
  788. struct ib_qp_attr qp_attr;
  789. int qp_attr_mask, ret;
  790. struct sk_buff *skb;
  791. p->mtu = be32_to_cpu(data->mtu);
  792. if (p->mtu <= IPOIB_ENCAP_LEN) {
  793. ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
  794. p->mtu, IPOIB_ENCAP_LEN);
  795. return -EINVAL;
  796. }
  797. qp_attr.qp_state = IB_QPS_RTR;
  798. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  799. if (ret) {
  800. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  801. return ret;
  802. }
  803. qp_attr.rq_psn = 0 /* FIXME */;
  804. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  805. if (ret) {
  806. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  807. return ret;
  808. }
  809. qp_attr.qp_state = IB_QPS_RTS;
  810. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  811. if (ret) {
  812. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  813. return ret;
  814. }
  815. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  816. if (ret) {
  817. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  818. return ret;
  819. }
  820. skb_queue_head_init(&skqueue);
  821. spin_lock_irq(&priv->lock);
  822. set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
  823. if (p->neigh)
  824. while ((skb = __skb_dequeue(&p->neigh->queue)))
  825. __skb_queue_tail(&skqueue, skb);
  826. spin_unlock_irq(&priv->lock);
  827. while ((skb = __skb_dequeue(&skqueue))) {
  828. skb->dev = p->dev;
  829. if (dev_queue_xmit(skb))
  830. ipoib_warn(priv, "dev_queue_xmit failed "
  831. "to requeue packet\n");
  832. }
  833. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  834. if (ret) {
  835. ipoib_warn(priv, "failed to send RTU: %d\n", ret);
  836. return ret;
  837. }
  838. return 0;
  839. }
  840. static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
  841. {
  842. struct ipoib_dev_priv *priv = netdev_priv(dev);
  843. struct ib_qp_init_attr attr = {
  844. .send_cq = priv->recv_cq,
  845. .recv_cq = priv->recv_cq,
  846. .srq = priv->cm.srq,
  847. .cap.max_send_wr = ipoib_sendq_size,
  848. .cap.max_send_sge = 1,
  849. .sq_sig_type = IB_SIGNAL_ALL_WR,
  850. .qp_type = IB_QPT_RC,
  851. .qp_context = tx
  852. };
  853. return ib_create_qp(priv->pd, &attr);
  854. }
  855. static int ipoib_cm_send_req(struct net_device *dev,
  856. struct ib_cm_id *id, struct ib_qp *qp,
  857. u32 qpn,
  858. struct ib_sa_path_rec *pathrec)
  859. {
  860. struct ipoib_dev_priv *priv = netdev_priv(dev);
  861. struct ipoib_cm_data data = {};
  862. struct ib_cm_req_param req = {};
  863. data.qpn = cpu_to_be32(priv->qp->qp_num);
  864. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  865. req.primary_path = pathrec;
  866. req.alternate_path = NULL;
  867. req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
  868. req.qp_num = qp->qp_num;
  869. req.qp_type = qp->qp_type;
  870. req.private_data = &data;
  871. req.private_data_len = sizeof data;
  872. req.flow_control = 0;
  873. req.starting_psn = 0; /* FIXME */
  874. /*
  875. * Pick some arbitrary defaults here; we could make these
  876. * module parameters if anyone cared about setting them.
  877. */
  878. req.responder_resources = 4;
  879. req.remote_cm_response_timeout = 20;
  880. req.local_cm_response_timeout = 20;
  881. req.retry_count = 0; /* RFC draft warns against retries */
  882. req.rnr_retry_count = 0; /* RFC draft warns against retries */
  883. req.max_cm_retries = 15;
  884. req.srq = ipoib_cm_has_srq(dev);
  885. return ib_send_cm_req(id, &req);
  886. }
  887. static int ipoib_cm_modify_tx_init(struct net_device *dev,
  888. struct ib_cm_id *cm_id, struct ib_qp *qp)
  889. {
  890. struct ipoib_dev_priv *priv = netdev_priv(dev);
  891. struct ib_qp_attr qp_attr;
  892. int qp_attr_mask, ret;
  893. ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
  894. if (ret) {
  895. ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
  896. return ret;
  897. }
  898. qp_attr.qp_state = IB_QPS_INIT;
  899. qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
  900. qp_attr.port_num = priv->port;
  901. qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
  902. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  903. if (ret) {
  904. ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
  905. return ret;
  906. }
  907. return 0;
  908. }
  909. static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
  910. struct ib_sa_path_rec *pathrec)
  911. {
  912. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  913. int ret;
  914. p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
  915. if (!p->tx_ring) {
  916. ipoib_warn(priv, "failed to allocate tx ring\n");
  917. ret = -ENOMEM;
  918. goto err_tx;
  919. }
  920. memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
  921. p->qp = ipoib_cm_create_tx_qp(p->dev, p);
  922. if (IS_ERR(p->qp)) {
  923. ret = PTR_ERR(p->qp);
  924. ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
  925. goto err_qp;
  926. }
  927. p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
  928. if (IS_ERR(p->id)) {
  929. ret = PTR_ERR(p->id);
  930. ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
  931. goto err_id;
  932. }
  933. ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
  934. if (ret) {
  935. ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
  936. goto err_modify;
  937. }
  938. ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
  939. if (ret) {
  940. ipoib_warn(priv, "failed to send cm req: %d\n", ret);
  941. goto err_send_cm;
  942. }
  943. ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
  944. p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
  945. return 0;
  946. err_send_cm:
  947. err_modify:
  948. ib_destroy_cm_id(p->id);
  949. err_id:
  950. p->id = NULL;
  951. ib_destroy_qp(p->qp);
  952. err_qp:
  953. p->qp = NULL;
  954. vfree(p->tx_ring);
  955. err_tx:
  956. return ret;
  957. }
  958. static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
  959. {
  960. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  961. struct ipoib_cm_tx_buf *tx_req;
  962. unsigned long flags;
  963. unsigned long begin;
  964. ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
  965. p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
  966. if (p->id)
  967. ib_destroy_cm_id(p->id);
  968. if (p->tx_ring) {
  969. /* Wait for all sends to complete */
  970. begin = jiffies;
  971. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  972. if (time_after(jiffies, begin + 5 * HZ)) {
  973. ipoib_warn(priv, "timing out; %d sends not completed\n",
  974. p->tx_head - p->tx_tail);
  975. goto timeout;
  976. }
  977. msleep(1);
  978. }
  979. }
  980. timeout:
  981. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  982. tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
  983. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
  984. DMA_TO_DEVICE);
  985. dev_kfree_skb_any(tx_req->skb);
  986. ++p->tx_tail;
  987. spin_lock_irqsave(&priv->tx_lock, flags);
  988. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  989. netif_queue_stopped(p->dev) &&
  990. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  991. netif_wake_queue(p->dev);
  992. spin_unlock_irqrestore(&priv->tx_lock, flags);
  993. }
  994. if (p->qp)
  995. ib_destroy_qp(p->qp);
  996. vfree(p->tx_ring);
  997. kfree(p);
  998. }
  999. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  1000. struct ib_cm_event *event)
  1001. {
  1002. struct ipoib_cm_tx *tx = cm_id->context;
  1003. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1004. struct net_device *dev = priv->dev;
  1005. struct ipoib_neigh *neigh;
  1006. int ret;
  1007. switch (event->event) {
  1008. case IB_CM_DREQ_RECEIVED:
  1009. ipoib_dbg(priv, "DREQ received.\n");
  1010. ib_send_cm_drep(cm_id, NULL, 0);
  1011. break;
  1012. case IB_CM_REP_RECEIVED:
  1013. ipoib_dbg(priv, "REP received.\n");
  1014. ret = ipoib_cm_rep_handler(cm_id, event);
  1015. if (ret)
  1016. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  1017. NULL, 0, NULL, 0);
  1018. break;
  1019. case IB_CM_REQ_ERROR:
  1020. case IB_CM_REJ_RECEIVED:
  1021. case IB_CM_TIMEWAIT_EXIT:
  1022. ipoib_dbg(priv, "CM error %d.\n", event->event);
  1023. spin_lock_irq(&priv->tx_lock);
  1024. spin_lock(&priv->lock);
  1025. neigh = tx->neigh;
  1026. if (neigh) {
  1027. neigh->cm = NULL;
  1028. list_del(&neigh->list);
  1029. if (neigh->ah)
  1030. ipoib_put_ah(neigh->ah);
  1031. ipoib_neigh_free(dev, neigh);
  1032. tx->neigh = NULL;
  1033. }
  1034. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1035. list_move(&tx->list, &priv->cm.reap_list);
  1036. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1037. }
  1038. spin_unlock(&priv->lock);
  1039. spin_unlock_irq(&priv->tx_lock);
  1040. break;
  1041. default:
  1042. break;
  1043. }
  1044. return 0;
  1045. }
  1046. struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
  1047. struct ipoib_neigh *neigh)
  1048. {
  1049. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1050. struct ipoib_cm_tx *tx;
  1051. tx = kzalloc(sizeof *tx, GFP_ATOMIC);
  1052. if (!tx)
  1053. return NULL;
  1054. neigh->cm = tx;
  1055. tx->neigh = neigh;
  1056. tx->path = path;
  1057. tx->dev = dev;
  1058. list_add(&tx->list, &priv->cm.start_list);
  1059. set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
  1060. queue_work(ipoib_workqueue, &priv->cm.start_task);
  1061. return tx;
  1062. }
  1063. void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
  1064. {
  1065. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1066. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1067. list_move(&tx->list, &priv->cm.reap_list);
  1068. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1069. ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
  1070. IPOIB_GID_ARG(tx->neigh->dgid));
  1071. tx->neigh = NULL;
  1072. }
  1073. }
  1074. static void ipoib_cm_tx_start(struct work_struct *work)
  1075. {
  1076. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1077. cm.start_task);
  1078. struct net_device *dev = priv->dev;
  1079. struct ipoib_neigh *neigh;
  1080. struct ipoib_cm_tx *p;
  1081. unsigned long flags;
  1082. int ret;
  1083. struct ib_sa_path_rec pathrec;
  1084. u32 qpn;
  1085. spin_lock_irqsave(&priv->tx_lock, flags);
  1086. spin_lock(&priv->lock);
  1087. while (!list_empty(&priv->cm.start_list)) {
  1088. p = list_entry(priv->cm.start_list.next, typeof(*p), list);
  1089. list_del_init(&p->list);
  1090. neigh = p->neigh;
  1091. qpn = IPOIB_QPN(neigh->neighbour->ha);
  1092. memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
  1093. spin_unlock(&priv->lock);
  1094. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1095. ret = ipoib_cm_tx_init(p, qpn, &pathrec);
  1096. spin_lock_irqsave(&priv->tx_lock, flags);
  1097. spin_lock(&priv->lock);
  1098. if (ret) {
  1099. neigh = p->neigh;
  1100. if (neigh) {
  1101. neigh->cm = NULL;
  1102. list_del(&neigh->list);
  1103. if (neigh->ah)
  1104. ipoib_put_ah(neigh->ah);
  1105. ipoib_neigh_free(dev, neigh);
  1106. }
  1107. list_del(&p->list);
  1108. kfree(p);
  1109. }
  1110. }
  1111. spin_unlock(&priv->lock);
  1112. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1113. }
  1114. static void ipoib_cm_tx_reap(struct work_struct *work)
  1115. {
  1116. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1117. cm.reap_task);
  1118. struct ipoib_cm_tx *p;
  1119. spin_lock_irq(&priv->tx_lock);
  1120. spin_lock(&priv->lock);
  1121. while (!list_empty(&priv->cm.reap_list)) {
  1122. p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
  1123. list_del(&p->list);
  1124. spin_unlock(&priv->lock);
  1125. spin_unlock_irq(&priv->tx_lock);
  1126. ipoib_cm_tx_destroy(p);
  1127. spin_lock_irq(&priv->tx_lock);
  1128. spin_lock(&priv->lock);
  1129. }
  1130. spin_unlock(&priv->lock);
  1131. spin_unlock_irq(&priv->tx_lock);
  1132. }
  1133. static void ipoib_cm_skb_reap(struct work_struct *work)
  1134. {
  1135. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1136. cm.skb_task);
  1137. struct sk_buff *skb;
  1138. unsigned mtu = priv->mcast_mtu;
  1139. spin_lock_irq(&priv->tx_lock);
  1140. spin_lock(&priv->lock);
  1141. while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
  1142. spin_unlock(&priv->lock);
  1143. spin_unlock_irq(&priv->tx_lock);
  1144. if (skb->protocol == htons(ETH_P_IP))
  1145. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
  1146. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  1147. else if (skb->protocol == htons(ETH_P_IPV6))
  1148. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
  1149. #endif
  1150. dev_kfree_skb_any(skb);
  1151. spin_lock_irq(&priv->tx_lock);
  1152. spin_lock(&priv->lock);
  1153. }
  1154. spin_unlock(&priv->lock);
  1155. spin_unlock_irq(&priv->tx_lock);
  1156. }
  1157. void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
  1158. unsigned int mtu)
  1159. {
  1160. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1161. int e = skb_queue_empty(&priv->cm.skb_queue);
  1162. if (skb->dst)
  1163. skb->dst->ops->update_pmtu(skb->dst, mtu);
  1164. skb_queue_tail(&priv->cm.skb_queue, skb);
  1165. if (e)
  1166. queue_work(ipoib_workqueue, &priv->cm.skb_task);
  1167. }
  1168. static void ipoib_cm_rx_reap(struct work_struct *work)
  1169. {
  1170. ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
  1171. cm.rx_reap_task)->dev);
  1172. }
  1173. static void ipoib_cm_stale_task(struct work_struct *work)
  1174. {
  1175. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1176. cm.stale_task.work);
  1177. struct ipoib_cm_rx *p;
  1178. int ret;
  1179. spin_lock_irq(&priv->lock);
  1180. while (!list_empty(&priv->cm.passive_ids)) {
  1181. /* List is sorted by LRU, start from tail,
  1182. * stop when we see a recently used entry */
  1183. p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
  1184. if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
  1185. break;
  1186. list_move(&p->list, &priv->cm.rx_error_list);
  1187. p->state = IPOIB_CM_RX_ERROR;
  1188. spin_unlock_irq(&priv->lock);
  1189. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  1190. if (ret)
  1191. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  1192. spin_lock_irq(&priv->lock);
  1193. }
  1194. if (!list_empty(&priv->cm.passive_ids))
  1195. queue_delayed_work(ipoib_workqueue,
  1196. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  1197. spin_unlock_irq(&priv->lock);
  1198. }
  1199. static ssize_t show_mode(struct device *d, struct device_attribute *attr,
  1200. char *buf)
  1201. {
  1202. struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
  1203. if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
  1204. return sprintf(buf, "connected\n");
  1205. else
  1206. return sprintf(buf, "datagram\n");
  1207. }
  1208. static ssize_t set_mode(struct device *d, struct device_attribute *attr,
  1209. const char *buf, size_t count)
  1210. {
  1211. struct net_device *dev = to_net_dev(d);
  1212. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1213. /* flush paths if we switch modes so that connections are restarted */
  1214. if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
  1215. set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
  1216. ipoib_warn(priv, "enabling connected mode "
  1217. "will cause multicast packet drops\n");
  1218. rtnl_lock();
  1219. dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
  1220. rtnl_unlock();
  1221. priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
  1222. ipoib_flush_paths(dev);
  1223. return count;
  1224. }
  1225. if (!strcmp(buf, "datagram\n")) {
  1226. clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
  1227. rtnl_lock();
  1228. if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
  1229. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  1230. if (priv->hca_caps & IB_DEVICE_UD_TSO)
  1231. dev->features |= NETIF_F_TSO;
  1232. }
  1233. dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
  1234. rtnl_unlock();
  1235. ipoib_flush_paths(dev);
  1236. return count;
  1237. }
  1238. return -EINVAL;
  1239. }
  1240. static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
  1241. int ipoib_cm_add_mode_attr(struct net_device *dev)
  1242. {
  1243. return device_create_file(&dev->dev, &dev_attr_mode);
  1244. }
  1245. static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
  1246. {
  1247. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1248. struct ib_srq_init_attr srq_init_attr = {
  1249. .attr = {
  1250. .max_wr = ipoib_recvq_size,
  1251. .max_sge = max_sge
  1252. }
  1253. };
  1254. priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
  1255. if (IS_ERR(priv->cm.srq)) {
  1256. if (PTR_ERR(priv->cm.srq) != -ENOSYS)
  1257. printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
  1258. priv->ca->name, PTR_ERR(priv->cm.srq));
  1259. priv->cm.srq = NULL;
  1260. return;
  1261. }
  1262. priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
  1263. GFP_KERNEL);
  1264. if (!priv->cm.srq_ring) {
  1265. printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
  1266. priv->ca->name, ipoib_recvq_size);
  1267. ib_destroy_srq(priv->cm.srq);
  1268. priv->cm.srq = NULL;
  1269. }
  1270. }
  1271. int ipoib_cm_dev_init(struct net_device *dev)
  1272. {
  1273. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1274. int i, ret;
  1275. struct ib_device_attr attr;
  1276. INIT_LIST_HEAD(&priv->cm.passive_ids);
  1277. INIT_LIST_HEAD(&priv->cm.reap_list);
  1278. INIT_LIST_HEAD(&priv->cm.start_list);
  1279. INIT_LIST_HEAD(&priv->cm.rx_error_list);
  1280. INIT_LIST_HEAD(&priv->cm.rx_flush_list);
  1281. INIT_LIST_HEAD(&priv->cm.rx_drain_list);
  1282. INIT_LIST_HEAD(&priv->cm.rx_reap_list);
  1283. INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
  1284. INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
  1285. INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
  1286. INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
  1287. INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
  1288. skb_queue_head_init(&priv->cm.skb_queue);
  1289. ret = ib_query_device(priv->ca, &attr);
  1290. if (ret) {
  1291. printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
  1292. return ret;
  1293. }
  1294. ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
  1295. attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
  1296. ipoib_cm_create_srq(dev, attr.max_srq_sge);
  1297. if (ipoib_cm_has_srq(dev)) {
  1298. priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
  1299. priv->cm.num_frags = attr.max_srq_sge;
  1300. ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
  1301. priv->cm.max_cm_mtu, priv->cm.num_frags);
  1302. } else {
  1303. priv->cm.max_cm_mtu = IPOIB_CM_MTU;
  1304. priv->cm.num_frags = IPOIB_CM_RX_SG;
  1305. }
  1306. ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
  1307. if (ipoib_cm_has_srq(dev)) {
  1308. for (i = 0; i < ipoib_recvq_size; ++i) {
  1309. if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
  1310. priv->cm.num_frags - 1,
  1311. priv->cm.srq_ring[i].mapping)) {
  1312. ipoib_warn(priv, "failed to allocate "
  1313. "receive buffer %d\n", i);
  1314. ipoib_cm_dev_cleanup(dev);
  1315. return -ENOMEM;
  1316. }
  1317. if (ipoib_cm_post_receive_srq(dev, i)) {
  1318. ipoib_warn(priv, "ipoib_cm_post_receive_srq "
  1319. "failed for buf %d\n", i);
  1320. ipoib_cm_dev_cleanup(dev);
  1321. return -EIO;
  1322. }
  1323. }
  1324. }
  1325. priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
  1326. return 0;
  1327. }
  1328. void ipoib_cm_dev_cleanup(struct net_device *dev)
  1329. {
  1330. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1331. int ret;
  1332. if (!priv->cm.srq)
  1333. return;
  1334. ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
  1335. ret = ib_destroy_srq(priv->cm.srq);
  1336. if (ret)
  1337. ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
  1338. priv->cm.srq = NULL;
  1339. if (!priv->cm.srq_ring)
  1340. return;
  1341. ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
  1342. priv->cm.srq_ring = NULL;
  1343. }