ipoib_cm.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598
  1. /*
  2. * Copyright (c) 2006 Mellanox Technologies. All rights reserved
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/ib_cm.h>
  33. #include <net/dst.h>
  34. #include <net/icmp.h>
  35. #include <linux/icmpv6.h>
  36. #include <linux/delay.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/moduleparam.h>
  40. #include "ipoib.h"
  41. int ipoib_max_conn_qp = 128;
  42. module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
  43. MODULE_PARM_DESC(max_nonsrq_conn_qp,
  44. "Max number of connected-mode QPs per interface "
  45. "(applied only if shared receive queue is not available)");
  46. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
  47. static int data_debug_level;
  48. module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
  49. MODULE_PARM_DESC(cm_data_debug_level,
  50. "Enable data path debug tracing for connected mode if > 0");
  51. #endif
  52. #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
  53. #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
  54. #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
  55. #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
  56. #define IPOIB_CM_RX_UPDATE_MASK (0x3)
  57. static struct ib_qp_attr ipoib_cm_err_attr = {
  58. .qp_state = IB_QPS_ERR
  59. };
  60. #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
  61. static struct ib_send_wr ipoib_cm_rx_drain_wr = {
  62. .wr_id = IPOIB_CM_RX_DRAIN_WRID,
  63. .opcode = IB_WR_SEND,
  64. };
  65. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  66. struct ib_cm_event *event);
  67. static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
  68. u64 mapping[IPOIB_CM_RX_SG])
  69. {
  70. int i;
  71. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  72. for (i = 0; i < frags; ++i)
  73. ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
  74. }
  75. static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
  76. {
  77. struct ipoib_dev_priv *priv = netdev_priv(dev);
  78. struct ib_recv_wr *bad_wr;
  79. int i, ret;
  80. priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  81. for (i = 0; i < priv->cm.num_frags; ++i)
  82. priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
  83. ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
  84. if (unlikely(ret)) {
  85. ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
  86. ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
  87. priv->cm.srq_ring[id].mapping);
  88. dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
  89. priv->cm.srq_ring[id].skb = NULL;
  90. }
  91. return ret;
  92. }
  93. static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
  94. struct ipoib_cm_rx *rx,
  95. struct ib_recv_wr *wr,
  96. struct ib_sge *sge, int id)
  97. {
  98. struct ipoib_dev_priv *priv = netdev_priv(dev);
  99. struct ib_recv_wr *bad_wr;
  100. int i, ret;
  101. wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  102. for (i = 0; i < IPOIB_CM_RX_SG; ++i)
  103. sge[i].addr = rx->rx_ring[id].mapping[i];
  104. ret = ib_post_recv(rx->qp, wr, &bad_wr);
  105. if (unlikely(ret)) {
  106. ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
  107. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  108. rx->rx_ring[id].mapping);
  109. dev_kfree_skb_any(rx->rx_ring[id].skb);
  110. rx->rx_ring[id].skb = NULL;
  111. }
  112. return ret;
  113. }
  114. static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
  115. struct ipoib_cm_rx_buf *rx_ring,
  116. int id, int frags,
  117. u64 mapping[IPOIB_CM_RX_SG])
  118. {
  119. struct ipoib_dev_priv *priv = netdev_priv(dev);
  120. struct sk_buff *skb;
  121. int i;
  122. skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
  123. if (unlikely(!skb))
  124. return NULL;
  125. /*
  126. * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
  127. * IP header to a multiple of 16.
  128. */
  129. skb_reserve(skb, 12);
  130. mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
  131. DMA_FROM_DEVICE);
  132. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
  133. dev_kfree_skb_any(skb);
  134. return NULL;
  135. }
  136. for (i = 0; i < frags; i++) {
  137. struct page *page = alloc_page(GFP_ATOMIC);
  138. if (!page)
  139. goto partial_error;
  140. skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
  141. mapping[i + 1] = ib_dma_map_page(priv->ca, page,
  142. 0, PAGE_SIZE, DMA_FROM_DEVICE);
  143. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
  144. goto partial_error;
  145. }
  146. rx_ring[id].skb = skb;
  147. return skb;
  148. partial_error:
  149. ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  150. for (; i > 0; --i)
  151. ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
  152. dev_kfree_skb_any(skb);
  153. return NULL;
  154. }
  155. static void ipoib_cm_free_rx_ring(struct net_device *dev,
  156. struct ipoib_cm_rx_buf *rx_ring)
  157. {
  158. struct ipoib_dev_priv *priv = netdev_priv(dev);
  159. int i;
  160. for (i = 0; i < ipoib_recvq_size; ++i)
  161. if (rx_ring[i].skb) {
  162. ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
  163. rx_ring[i].mapping);
  164. dev_kfree_skb_any(rx_ring[i].skb);
  165. }
  166. vfree(rx_ring);
  167. }
  168. static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
  169. {
  170. struct ib_send_wr *bad_wr;
  171. struct ipoib_cm_rx *p;
  172. /* We only reserved 1 extra slot in CQ for drain WRs, so
  173. * make sure we have at most 1 outstanding WR. */
  174. if (list_empty(&priv->cm.rx_flush_list) ||
  175. !list_empty(&priv->cm.rx_drain_list))
  176. return;
  177. /*
  178. * QPs on flush list are error state. This way, a "flush
  179. * error" WC will be immediately generated for each WR we post.
  180. */
  181. p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
  182. if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
  183. ipoib_warn(priv, "failed to post drain wr\n");
  184. list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
  185. }
  186. static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
  187. {
  188. struct ipoib_cm_rx *p = ctx;
  189. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  190. unsigned long flags;
  191. if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
  192. return;
  193. spin_lock_irqsave(&priv->lock, flags);
  194. list_move(&p->list, &priv->cm.rx_flush_list);
  195. p->state = IPOIB_CM_RX_FLUSH;
  196. ipoib_cm_start_rx_drain(priv);
  197. spin_unlock_irqrestore(&priv->lock, flags);
  198. }
  199. static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
  200. struct ipoib_cm_rx *p)
  201. {
  202. struct ipoib_dev_priv *priv = netdev_priv(dev);
  203. struct ib_qp_init_attr attr = {
  204. .event_handler = ipoib_cm_rx_event_handler,
  205. .send_cq = priv->recv_cq, /* For drain WR */
  206. .recv_cq = priv->recv_cq,
  207. .srq = priv->cm.srq,
  208. .cap.max_send_wr = 1, /* For drain WR */
  209. .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
  210. .sq_sig_type = IB_SIGNAL_ALL_WR,
  211. .qp_type = IB_QPT_RC,
  212. .qp_context = p,
  213. };
  214. if (!ipoib_cm_has_srq(dev)) {
  215. attr.cap.max_recv_wr = ipoib_recvq_size;
  216. attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
  217. }
  218. return ib_create_qp(priv->pd, &attr);
  219. }
  220. static int ipoib_cm_modify_rx_qp(struct net_device *dev,
  221. struct ib_cm_id *cm_id, struct ib_qp *qp,
  222. unsigned psn)
  223. {
  224. struct ipoib_dev_priv *priv = netdev_priv(dev);
  225. struct ib_qp_attr qp_attr;
  226. int qp_attr_mask, ret;
  227. qp_attr.qp_state = IB_QPS_INIT;
  228. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  229. if (ret) {
  230. ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
  231. return ret;
  232. }
  233. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  234. if (ret) {
  235. ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
  236. return ret;
  237. }
  238. qp_attr.qp_state = IB_QPS_RTR;
  239. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  240. if (ret) {
  241. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  242. return ret;
  243. }
  244. qp_attr.rq_psn = psn;
  245. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  246. if (ret) {
  247. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  248. return ret;
  249. }
  250. /*
  251. * Current Mellanox HCA firmware won't generate completions
  252. * with error for drain WRs unless the QP has been moved to
  253. * RTS first. This work-around leaves a window where a QP has
  254. * moved to error asynchronously, but this will eventually get
  255. * fixed in firmware, so let's not error out if modify QP
  256. * fails.
  257. */
  258. qp_attr.qp_state = IB_QPS_RTS;
  259. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  260. if (ret) {
  261. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  262. return 0;
  263. }
  264. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  265. if (ret) {
  266. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  267. return 0;
  268. }
  269. return 0;
  270. }
  271. static void ipoib_cm_init_rx_wr(struct net_device *dev,
  272. struct ib_recv_wr *wr,
  273. struct ib_sge *sge)
  274. {
  275. struct ipoib_dev_priv *priv = netdev_priv(dev);
  276. int i;
  277. for (i = 0; i < priv->cm.num_frags; ++i)
  278. sge[i].lkey = priv->mr->lkey;
  279. sge[0].length = IPOIB_CM_HEAD_SIZE;
  280. for (i = 1; i < priv->cm.num_frags; ++i)
  281. sge[i].length = PAGE_SIZE;
  282. wr->next = NULL;
  283. wr->sg_list = sge;
  284. wr->num_sge = priv->cm.num_frags;
  285. }
  286. static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
  287. struct ipoib_cm_rx *rx)
  288. {
  289. struct ipoib_dev_priv *priv = netdev_priv(dev);
  290. struct {
  291. struct ib_recv_wr wr;
  292. struct ib_sge sge[IPOIB_CM_RX_SG];
  293. } *t;
  294. int ret;
  295. int i;
  296. rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
  297. if (!rx->rx_ring) {
  298. printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
  299. priv->ca->name, ipoib_recvq_size);
  300. return -ENOMEM;
  301. }
  302. t = kmalloc(sizeof *t, GFP_KERNEL);
  303. if (!t) {
  304. ret = -ENOMEM;
  305. goto err_free;
  306. }
  307. ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
  308. spin_lock_irq(&priv->lock);
  309. if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
  310. spin_unlock_irq(&priv->lock);
  311. ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
  312. ret = -EINVAL;
  313. goto err_free;
  314. } else
  315. ++priv->cm.nonsrq_conn_qp;
  316. spin_unlock_irq(&priv->lock);
  317. for (i = 0; i < ipoib_recvq_size; ++i) {
  318. if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
  319. rx->rx_ring[i].mapping)) {
  320. ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
  321. ret = -ENOMEM;
  322. goto err_count;
  323. }
  324. ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
  325. if (ret) {
  326. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
  327. "failed for buf %d\n", i);
  328. ret = -EIO;
  329. goto err_count;
  330. }
  331. }
  332. rx->recv_count = ipoib_recvq_size;
  333. kfree(t);
  334. return 0;
  335. err_count:
  336. spin_lock_irq(&priv->lock);
  337. --priv->cm.nonsrq_conn_qp;
  338. spin_unlock_irq(&priv->lock);
  339. err_free:
  340. kfree(t);
  341. ipoib_cm_free_rx_ring(dev, rx->rx_ring);
  342. return ret;
  343. }
  344. static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
  345. struct ib_qp *qp, struct ib_cm_req_event_param *req,
  346. unsigned psn)
  347. {
  348. struct ipoib_dev_priv *priv = netdev_priv(dev);
  349. struct ipoib_cm_data data = {};
  350. struct ib_cm_rep_param rep = {};
  351. data.qpn = cpu_to_be32(priv->qp->qp_num);
  352. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  353. rep.private_data = &data;
  354. rep.private_data_len = sizeof data;
  355. rep.flow_control = 0;
  356. rep.rnr_retry_count = req->rnr_retry_count;
  357. rep.srq = ipoib_cm_has_srq(dev);
  358. rep.qp_num = qp->qp_num;
  359. rep.starting_psn = psn;
  360. return ib_send_cm_rep(cm_id, &rep);
  361. }
  362. static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  363. {
  364. struct net_device *dev = cm_id->context;
  365. struct ipoib_dev_priv *priv = netdev_priv(dev);
  366. struct ipoib_cm_rx *p;
  367. unsigned psn;
  368. int ret;
  369. ipoib_dbg(priv, "REQ arrived\n");
  370. p = kzalloc(sizeof *p, GFP_KERNEL);
  371. if (!p)
  372. return -ENOMEM;
  373. p->dev = dev;
  374. p->id = cm_id;
  375. cm_id->context = p;
  376. p->state = IPOIB_CM_RX_LIVE;
  377. p->jiffies = jiffies;
  378. INIT_LIST_HEAD(&p->list);
  379. p->qp = ipoib_cm_create_rx_qp(dev, p);
  380. if (IS_ERR(p->qp)) {
  381. ret = PTR_ERR(p->qp);
  382. goto err_qp;
  383. }
  384. psn = random32() & 0xffffff;
  385. ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
  386. if (ret)
  387. goto err_modify;
  388. if (!ipoib_cm_has_srq(dev)) {
  389. ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
  390. if (ret)
  391. goto err_modify;
  392. }
  393. spin_lock_irq(&priv->lock);
  394. queue_delayed_work(ipoib_workqueue,
  395. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  396. /* Add this entry to passive ids list head, but do not re-add it
  397. * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
  398. p->jiffies = jiffies;
  399. if (p->state == IPOIB_CM_RX_LIVE)
  400. list_move(&p->list, &priv->cm.passive_ids);
  401. spin_unlock_irq(&priv->lock);
  402. ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
  403. if (ret) {
  404. ipoib_warn(priv, "failed to send REP: %d\n", ret);
  405. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  406. ipoib_warn(priv, "unable to move qp to error state\n");
  407. }
  408. return 0;
  409. err_modify:
  410. ib_destroy_qp(p->qp);
  411. err_qp:
  412. kfree(p);
  413. return ret;
  414. }
  415. static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
  416. struct ib_cm_event *event)
  417. {
  418. struct ipoib_cm_rx *p;
  419. struct ipoib_dev_priv *priv;
  420. switch (event->event) {
  421. case IB_CM_REQ_RECEIVED:
  422. return ipoib_cm_req_handler(cm_id, event);
  423. case IB_CM_DREQ_RECEIVED:
  424. p = cm_id->context;
  425. ib_send_cm_drep(cm_id, NULL, 0);
  426. /* Fall through */
  427. case IB_CM_REJ_RECEIVED:
  428. p = cm_id->context;
  429. priv = netdev_priv(p->dev);
  430. if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
  431. ipoib_warn(priv, "unable to move qp to error state\n");
  432. /* Fall through */
  433. default:
  434. return 0;
  435. }
  436. }
  437. /* Adjust length of skb with fragments to match received data */
  438. static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
  439. unsigned int length, struct sk_buff *toskb)
  440. {
  441. int i, num_frags;
  442. unsigned int size;
  443. /* put header into skb */
  444. size = min(length, hdr_space);
  445. skb->tail += size;
  446. skb->len += size;
  447. length -= size;
  448. num_frags = skb_shinfo(skb)->nr_frags;
  449. for (i = 0; i < num_frags; i++) {
  450. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  451. if (length == 0) {
  452. /* don't need this page */
  453. skb_fill_page_desc(toskb, i, skb_frag_page(frag),
  454. 0, PAGE_SIZE);
  455. --skb_shinfo(skb)->nr_frags;
  456. } else {
  457. size = min(length, (unsigned) PAGE_SIZE);
  458. skb_frag_size_set(frag, size);
  459. skb->data_len += size;
  460. skb->truesize += size;
  461. skb->len += size;
  462. length -= size;
  463. }
  464. }
  465. }
  466. void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
  467. {
  468. struct ipoib_dev_priv *priv = netdev_priv(dev);
  469. struct ipoib_cm_rx_buf *rx_ring;
  470. unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
  471. struct sk_buff *skb, *newskb;
  472. struct ipoib_cm_rx *p;
  473. unsigned long flags;
  474. u64 mapping[IPOIB_CM_RX_SG];
  475. int frags;
  476. int has_srq;
  477. struct sk_buff *small_skb;
  478. ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
  479. wr_id, wc->status);
  480. if (unlikely(wr_id >= ipoib_recvq_size)) {
  481. if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
  482. spin_lock_irqsave(&priv->lock, flags);
  483. list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
  484. ipoib_cm_start_rx_drain(priv);
  485. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  486. spin_unlock_irqrestore(&priv->lock, flags);
  487. } else
  488. ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
  489. wr_id, ipoib_recvq_size);
  490. return;
  491. }
  492. p = wc->qp->qp_context;
  493. has_srq = ipoib_cm_has_srq(dev);
  494. rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
  495. skb = rx_ring[wr_id].skb;
  496. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  497. ipoib_dbg(priv, "cm recv error "
  498. "(status=%d, wrid=%d vend_err %x)\n",
  499. wc->status, wr_id, wc->vendor_err);
  500. ++dev->stats.rx_dropped;
  501. if (has_srq)
  502. goto repost;
  503. else {
  504. if (!--p->recv_count) {
  505. spin_lock_irqsave(&priv->lock, flags);
  506. list_move(&p->list, &priv->cm.rx_reap_list);
  507. spin_unlock_irqrestore(&priv->lock, flags);
  508. queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
  509. }
  510. return;
  511. }
  512. }
  513. if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
  514. if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
  515. spin_lock_irqsave(&priv->lock, flags);
  516. p->jiffies = jiffies;
  517. /* Move this entry to list head, but do not re-add it
  518. * if it has been moved out of list. */
  519. if (p->state == IPOIB_CM_RX_LIVE)
  520. list_move(&p->list, &priv->cm.passive_ids);
  521. spin_unlock_irqrestore(&priv->lock, flags);
  522. }
  523. }
  524. if (wc->byte_len < IPOIB_CM_COPYBREAK) {
  525. int dlen = wc->byte_len;
  526. small_skb = dev_alloc_skb(dlen + 12);
  527. if (small_skb) {
  528. skb_reserve(small_skb, 12);
  529. ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
  530. dlen, DMA_FROM_DEVICE);
  531. skb_copy_from_linear_data(skb, small_skb->data, dlen);
  532. ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
  533. dlen, DMA_FROM_DEVICE);
  534. skb_put(small_skb, dlen);
  535. skb = small_skb;
  536. goto copied;
  537. }
  538. }
  539. frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
  540. (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
  541. newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
  542. if (unlikely(!newskb)) {
  543. /*
  544. * If we can't allocate a new RX buffer, dump
  545. * this packet and reuse the old buffer.
  546. */
  547. ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
  548. ++dev->stats.rx_dropped;
  549. goto repost;
  550. }
  551. ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
  552. memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
  553. ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
  554. wc->byte_len, wc->slid);
  555. skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
  556. copied:
  557. skb->protocol = ((struct ipoib_header *) skb->data)->proto;
  558. skb_reset_mac_header(skb);
  559. skb_pull(skb, IPOIB_ENCAP_LEN);
  560. ++dev->stats.rx_packets;
  561. dev->stats.rx_bytes += skb->len;
  562. skb->dev = dev;
  563. /* XXX get correct PACKET_ type here */
  564. skb->pkt_type = PACKET_HOST;
  565. netif_receive_skb(skb);
  566. repost:
  567. if (has_srq) {
  568. if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
  569. ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
  570. "for buf %d\n", wr_id);
  571. } else {
  572. if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
  573. &priv->cm.rx_wr,
  574. priv->cm.rx_sge,
  575. wr_id))) {
  576. --p->recv_count;
  577. ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
  578. "for buf %d\n", wr_id);
  579. }
  580. }
  581. }
  582. static inline int post_send(struct ipoib_dev_priv *priv,
  583. struct ipoib_cm_tx *tx,
  584. unsigned int wr_id,
  585. u64 addr, int len)
  586. {
  587. struct ib_send_wr *bad_wr;
  588. priv->tx_sge[0].addr = addr;
  589. priv->tx_sge[0].length = len;
  590. priv->tx_wr.num_sge = 1;
  591. priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
  592. return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
  593. }
  594. void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
  595. {
  596. struct ipoib_dev_priv *priv = netdev_priv(dev);
  597. struct ipoib_cm_tx_buf *tx_req;
  598. u64 addr;
  599. int rc;
  600. if (unlikely(skb->len > tx->mtu)) {
  601. ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  602. skb->len, tx->mtu);
  603. ++dev->stats.tx_dropped;
  604. ++dev->stats.tx_errors;
  605. ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  606. return;
  607. }
  608. ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
  609. tx->tx_head, skb->len, tx->qp->qp_num);
  610. /*
  611. * We put the skb into the tx_ring _before_ we call post_send()
  612. * because it's entirely possible that the completion handler will
  613. * run before we execute anything after the post_send(). That
  614. * means we have to make sure everything is properly recorded and
  615. * our state is consistent before we call post_send().
  616. */
  617. tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
  618. tx_req->skb = skb;
  619. addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  620. if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
  621. ++dev->stats.tx_errors;
  622. dev_kfree_skb_any(skb);
  623. return;
  624. }
  625. tx_req->mapping = addr;
  626. rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  627. addr, skb->len);
  628. if (unlikely(rc)) {
  629. ipoib_warn(priv, "post_send failed, error %d\n", rc);
  630. ++dev->stats.tx_errors;
  631. ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  632. dev_kfree_skb_any(skb);
  633. } else {
  634. dev->trans_start = jiffies;
  635. ++tx->tx_head;
  636. skb_orphan(skb);
  637. skb_dst_drop(skb);
  638. if (++priv->tx_outstanding == ipoib_sendq_size) {
  639. ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
  640. tx->qp->qp_num);
  641. if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
  642. ipoib_warn(priv, "request notify on send CQ failed\n");
  643. netif_stop_queue(dev);
  644. }
  645. }
  646. }
  647. void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
  648. {
  649. struct ipoib_dev_priv *priv = netdev_priv(dev);
  650. struct ipoib_cm_tx *tx = wc->qp->qp_context;
  651. unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
  652. struct ipoib_cm_tx_buf *tx_req;
  653. unsigned long flags;
  654. ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
  655. wr_id, wc->status);
  656. if (unlikely(wr_id >= ipoib_sendq_size)) {
  657. ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
  658. wr_id, ipoib_sendq_size);
  659. return;
  660. }
  661. tx_req = &tx->tx_ring[wr_id];
  662. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  663. /* FIXME: is this right? Shouldn't we only increment on success? */
  664. ++dev->stats.tx_packets;
  665. dev->stats.tx_bytes += tx_req->skb->len;
  666. dev_kfree_skb_any(tx_req->skb);
  667. netif_tx_lock(dev);
  668. ++tx->tx_tail;
  669. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  670. netif_queue_stopped(dev) &&
  671. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  672. netif_wake_queue(dev);
  673. if (wc->status != IB_WC_SUCCESS &&
  674. wc->status != IB_WC_WR_FLUSH_ERR) {
  675. struct ipoib_neigh *neigh;
  676. ipoib_dbg(priv, "failed cm send event "
  677. "(status=%d, wrid=%d vend_err %x)\n",
  678. wc->status, wr_id, wc->vendor_err);
  679. spin_lock_irqsave(&priv->lock, flags);
  680. neigh = tx->neigh;
  681. if (neigh) {
  682. neigh->cm = NULL;
  683. list_del(&neigh->list);
  684. ipoib_neigh_free(neigh);
  685. tx->neigh = NULL;
  686. }
  687. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  688. list_move(&tx->list, &priv->cm.reap_list);
  689. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  690. }
  691. clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
  692. spin_unlock_irqrestore(&priv->lock, flags);
  693. }
  694. netif_tx_unlock(dev);
  695. }
  696. int ipoib_cm_dev_open(struct net_device *dev)
  697. {
  698. struct ipoib_dev_priv *priv = netdev_priv(dev);
  699. int ret;
  700. if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
  701. return 0;
  702. priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
  703. if (IS_ERR(priv->cm.id)) {
  704. printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
  705. ret = PTR_ERR(priv->cm.id);
  706. goto err_cm;
  707. }
  708. ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
  709. 0, NULL);
  710. if (ret) {
  711. printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
  712. IPOIB_CM_IETF_ID | priv->qp->qp_num);
  713. goto err_listen;
  714. }
  715. return 0;
  716. err_listen:
  717. ib_destroy_cm_id(priv->cm.id);
  718. err_cm:
  719. priv->cm.id = NULL;
  720. return ret;
  721. }
  722. static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
  723. {
  724. struct ipoib_dev_priv *priv = netdev_priv(dev);
  725. struct ipoib_cm_rx *rx, *n;
  726. LIST_HEAD(list);
  727. spin_lock_irq(&priv->lock);
  728. list_splice_init(&priv->cm.rx_reap_list, &list);
  729. spin_unlock_irq(&priv->lock);
  730. list_for_each_entry_safe(rx, n, &list, list) {
  731. ib_destroy_cm_id(rx->id);
  732. ib_destroy_qp(rx->qp);
  733. if (!ipoib_cm_has_srq(dev)) {
  734. ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
  735. spin_lock_irq(&priv->lock);
  736. --priv->cm.nonsrq_conn_qp;
  737. spin_unlock_irq(&priv->lock);
  738. }
  739. kfree(rx);
  740. }
  741. }
  742. void ipoib_cm_dev_stop(struct net_device *dev)
  743. {
  744. struct ipoib_dev_priv *priv = netdev_priv(dev);
  745. struct ipoib_cm_rx *p;
  746. unsigned long begin;
  747. int ret;
  748. if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
  749. return;
  750. ib_destroy_cm_id(priv->cm.id);
  751. priv->cm.id = NULL;
  752. spin_lock_irq(&priv->lock);
  753. while (!list_empty(&priv->cm.passive_ids)) {
  754. p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
  755. list_move(&p->list, &priv->cm.rx_error_list);
  756. p->state = IPOIB_CM_RX_ERROR;
  757. spin_unlock_irq(&priv->lock);
  758. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  759. if (ret)
  760. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  761. spin_lock_irq(&priv->lock);
  762. }
  763. /* Wait for all RX to be drained */
  764. begin = jiffies;
  765. while (!list_empty(&priv->cm.rx_error_list) ||
  766. !list_empty(&priv->cm.rx_flush_list) ||
  767. !list_empty(&priv->cm.rx_drain_list)) {
  768. if (time_after(jiffies, begin + 5 * HZ)) {
  769. ipoib_warn(priv, "RX drain timing out\n");
  770. /*
  771. * assume the HW is wedged and just free up everything.
  772. */
  773. list_splice_init(&priv->cm.rx_flush_list,
  774. &priv->cm.rx_reap_list);
  775. list_splice_init(&priv->cm.rx_error_list,
  776. &priv->cm.rx_reap_list);
  777. list_splice_init(&priv->cm.rx_drain_list,
  778. &priv->cm.rx_reap_list);
  779. break;
  780. }
  781. spin_unlock_irq(&priv->lock);
  782. msleep(1);
  783. ipoib_drain_cq(dev);
  784. spin_lock_irq(&priv->lock);
  785. }
  786. spin_unlock_irq(&priv->lock);
  787. ipoib_cm_free_rx_reap_list(dev);
  788. cancel_delayed_work(&priv->cm.stale_task);
  789. }
  790. static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  791. {
  792. struct ipoib_cm_tx *p = cm_id->context;
  793. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  794. struct ipoib_cm_data *data = event->private_data;
  795. struct sk_buff_head skqueue;
  796. struct ib_qp_attr qp_attr;
  797. int qp_attr_mask, ret;
  798. struct sk_buff *skb;
  799. p->mtu = be32_to_cpu(data->mtu);
  800. if (p->mtu <= IPOIB_ENCAP_LEN) {
  801. ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
  802. p->mtu, IPOIB_ENCAP_LEN);
  803. return -EINVAL;
  804. }
  805. qp_attr.qp_state = IB_QPS_RTR;
  806. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  807. if (ret) {
  808. ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
  809. return ret;
  810. }
  811. qp_attr.rq_psn = 0 /* FIXME */;
  812. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  813. if (ret) {
  814. ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
  815. return ret;
  816. }
  817. qp_attr.qp_state = IB_QPS_RTS;
  818. ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
  819. if (ret) {
  820. ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
  821. return ret;
  822. }
  823. ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
  824. if (ret) {
  825. ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
  826. return ret;
  827. }
  828. skb_queue_head_init(&skqueue);
  829. spin_lock_irq(&priv->lock);
  830. set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
  831. if (p->neigh)
  832. while ((skb = __skb_dequeue(&p->neigh->queue)))
  833. __skb_queue_tail(&skqueue, skb);
  834. spin_unlock_irq(&priv->lock);
  835. while ((skb = __skb_dequeue(&skqueue))) {
  836. skb->dev = p->dev;
  837. if (dev_queue_xmit(skb))
  838. ipoib_warn(priv, "dev_queue_xmit failed "
  839. "to requeue packet\n");
  840. }
  841. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  842. if (ret) {
  843. ipoib_warn(priv, "failed to send RTU: %d\n", ret);
  844. return ret;
  845. }
  846. return 0;
  847. }
  848. static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
  849. {
  850. struct ipoib_dev_priv *priv = netdev_priv(dev);
  851. struct ib_qp_init_attr attr = {
  852. .send_cq = priv->recv_cq,
  853. .recv_cq = priv->recv_cq,
  854. .srq = priv->cm.srq,
  855. .cap.max_send_wr = ipoib_sendq_size,
  856. .cap.max_send_sge = 1,
  857. .sq_sig_type = IB_SIGNAL_ALL_WR,
  858. .qp_type = IB_QPT_RC,
  859. .qp_context = tx
  860. };
  861. return ib_create_qp(priv->pd, &attr);
  862. }
  863. static int ipoib_cm_send_req(struct net_device *dev,
  864. struct ib_cm_id *id, struct ib_qp *qp,
  865. u32 qpn,
  866. struct ib_sa_path_rec *pathrec)
  867. {
  868. struct ipoib_dev_priv *priv = netdev_priv(dev);
  869. struct ipoib_cm_data data = {};
  870. struct ib_cm_req_param req = {};
  871. data.qpn = cpu_to_be32(priv->qp->qp_num);
  872. data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
  873. req.primary_path = pathrec;
  874. req.alternate_path = NULL;
  875. req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
  876. req.qp_num = qp->qp_num;
  877. req.qp_type = qp->qp_type;
  878. req.private_data = &data;
  879. req.private_data_len = sizeof data;
  880. req.flow_control = 0;
  881. req.starting_psn = 0; /* FIXME */
  882. /*
  883. * Pick some arbitrary defaults here; we could make these
  884. * module parameters if anyone cared about setting them.
  885. */
  886. req.responder_resources = 4;
  887. req.remote_cm_response_timeout = 20;
  888. req.local_cm_response_timeout = 20;
  889. req.retry_count = 0; /* RFC draft warns against retries */
  890. req.rnr_retry_count = 0; /* RFC draft warns against retries */
  891. req.max_cm_retries = 15;
  892. req.srq = ipoib_cm_has_srq(dev);
  893. return ib_send_cm_req(id, &req);
  894. }
  895. static int ipoib_cm_modify_tx_init(struct net_device *dev,
  896. struct ib_cm_id *cm_id, struct ib_qp *qp)
  897. {
  898. struct ipoib_dev_priv *priv = netdev_priv(dev);
  899. struct ib_qp_attr qp_attr;
  900. int qp_attr_mask, ret;
  901. ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
  902. if (ret) {
  903. ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
  904. return ret;
  905. }
  906. qp_attr.qp_state = IB_QPS_INIT;
  907. qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
  908. qp_attr.port_num = priv->port;
  909. qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
  910. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  911. if (ret) {
  912. ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
  913. return ret;
  914. }
  915. return 0;
  916. }
  917. static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
  918. struct ib_sa_path_rec *pathrec)
  919. {
  920. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  921. int ret;
  922. p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
  923. if (!p->tx_ring) {
  924. ipoib_warn(priv, "failed to allocate tx ring\n");
  925. ret = -ENOMEM;
  926. goto err_tx;
  927. }
  928. p->qp = ipoib_cm_create_tx_qp(p->dev, p);
  929. if (IS_ERR(p->qp)) {
  930. ret = PTR_ERR(p->qp);
  931. ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
  932. goto err_qp;
  933. }
  934. p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
  935. if (IS_ERR(p->id)) {
  936. ret = PTR_ERR(p->id);
  937. ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
  938. goto err_id;
  939. }
  940. ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
  941. if (ret) {
  942. ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
  943. goto err_modify;
  944. }
  945. ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
  946. if (ret) {
  947. ipoib_warn(priv, "failed to send cm req: %d\n", ret);
  948. goto err_send_cm;
  949. }
  950. ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
  951. p->qp->qp_num, pathrec->dgid.raw, qpn);
  952. return 0;
  953. err_send_cm:
  954. err_modify:
  955. ib_destroy_cm_id(p->id);
  956. err_id:
  957. p->id = NULL;
  958. ib_destroy_qp(p->qp);
  959. err_qp:
  960. p->qp = NULL;
  961. vfree(p->tx_ring);
  962. err_tx:
  963. return ret;
  964. }
  965. static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
  966. {
  967. struct ipoib_dev_priv *priv = netdev_priv(p->dev);
  968. struct ipoib_cm_tx_buf *tx_req;
  969. unsigned long begin;
  970. ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
  971. p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
  972. if (p->id)
  973. ib_destroy_cm_id(p->id);
  974. if (p->tx_ring) {
  975. /* Wait for all sends to complete */
  976. begin = jiffies;
  977. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  978. if (time_after(jiffies, begin + 5 * HZ)) {
  979. ipoib_warn(priv, "timing out; %d sends not completed\n",
  980. p->tx_head - p->tx_tail);
  981. goto timeout;
  982. }
  983. msleep(1);
  984. }
  985. }
  986. timeout:
  987. while ((int) p->tx_tail - (int) p->tx_head < 0) {
  988. tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
  989. ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
  990. DMA_TO_DEVICE);
  991. dev_kfree_skb_any(tx_req->skb);
  992. ++p->tx_tail;
  993. netif_tx_lock_bh(p->dev);
  994. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  995. netif_queue_stopped(p->dev) &&
  996. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  997. netif_wake_queue(p->dev);
  998. netif_tx_unlock_bh(p->dev);
  999. }
  1000. if (p->qp)
  1001. ib_destroy_qp(p->qp);
  1002. vfree(p->tx_ring);
  1003. kfree(p);
  1004. }
  1005. static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
  1006. struct ib_cm_event *event)
  1007. {
  1008. struct ipoib_cm_tx *tx = cm_id->context;
  1009. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1010. struct net_device *dev = priv->dev;
  1011. struct ipoib_neigh *neigh;
  1012. unsigned long flags;
  1013. int ret;
  1014. switch (event->event) {
  1015. case IB_CM_DREQ_RECEIVED:
  1016. ipoib_dbg(priv, "DREQ received.\n");
  1017. ib_send_cm_drep(cm_id, NULL, 0);
  1018. break;
  1019. case IB_CM_REP_RECEIVED:
  1020. ipoib_dbg(priv, "REP received.\n");
  1021. ret = ipoib_cm_rep_handler(cm_id, event);
  1022. if (ret)
  1023. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  1024. NULL, 0, NULL, 0);
  1025. break;
  1026. case IB_CM_REQ_ERROR:
  1027. case IB_CM_REJ_RECEIVED:
  1028. case IB_CM_TIMEWAIT_EXIT:
  1029. ipoib_dbg(priv, "CM error %d.\n", event->event);
  1030. netif_tx_lock_bh(dev);
  1031. spin_lock_irqsave(&priv->lock, flags);
  1032. neigh = tx->neigh;
  1033. if (neigh) {
  1034. neigh->cm = NULL;
  1035. list_del(&neigh->list);
  1036. ipoib_neigh_free(neigh);
  1037. tx->neigh = NULL;
  1038. }
  1039. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1040. list_move(&tx->list, &priv->cm.reap_list);
  1041. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1042. }
  1043. spin_unlock_irqrestore(&priv->lock, flags);
  1044. netif_tx_unlock_bh(dev);
  1045. break;
  1046. default:
  1047. break;
  1048. }
  1049. return 0;
  1050. }
  1051. struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
  1052. struct ipoib_neigh *neigh)
  1053. {
  1054. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1055. struct ipoib_cm_tx *tx;
  1056. tx = kzalloc(sizeof *tx, GFP_ATOMIC);
  1057. if (!tx)
  1058. return NULL;
  1059. neigh->cm = tx;
  1060. tx->neigh = neigh;
  1061. tx->path = path;
  1062. tx->dev = dev;
  1063. list_add(&tx->list, &priv->cm.start_list);
  1064. set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
  1065. queue_work(ipoib_workqueue, &priv->cm.start_task);
  1066. return tx;
  1067. }
  1068. void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
  1069. {
  1070. struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
  1071. unsigned long flags;
  1072. if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
  1073. spin_lock_irqsave(&priv->lock, flags);
  1074. list_move(&tx->list, &priv->cm.reap_list);
  1075. queue_work(ipoib_workqueue, &priv->cm.reap_task);
  1076. ipoib_dbg(priv, "Reap connection for gid %pI6\n",
  1077. tx->neigh->daddr + 4);
  1078. tx->neigh = NULL;
  1079. spin_unlock_irqrestore(&priv->lock, flags);
  1080. }
  1081. }
  1082. static void ipoib_cm_tx_start(struct work_struct *work)
  1083. {
  1084. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1085. cm.start_task);
  1086. struct net_device *dev = priv->dev;
  1087. struct ipoib_neigh *neigh;
  1088. struct ipoib_cm_tx *p;
  1089. unsigned long flags;
  1090. int ret;
  1091. struct ib_sa_path_rec pathrec;
  1092. u32 qpn;
  1093. netif_tx_lock_bh(dev);
  1094. spin_lock_irqsave(&priv->lock, flags);
  1095. while (!list_empty(&priv->cm.start_list)) {
  1096. p = list_entry(priv->cm.start_list.next, typeof(*p), list);
  1097. list_del_init(&p->list);
  1098. neigh = p->neigh;
  1099. qpn = IPOIB_QPN(neigh->daddr);
  1100. memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
  1101. spin_unlock_irqrestore(&priv->lock, flags);
  1102. netif_tx_unlock_bh(dev);
  1103. ret = ipoib_cm_tx_init(p, qpn, &pathrec);
  1104. netif_tx_lock_bh(dev);
  1105. spin_lock_irqsave(&priv->lock, flags);
  1106. if (ret) {
  1107. neigh = p->neigh;
  1108. if (neigh) {
  1109. neigh->cm = NULL;
  1110. list_del(&neigh->list);
  1111. ipoib_neigh_free(neigh);
  1112. }
  1113. list_del(&p->list);
  1114. kfree(p);
  1115. }
  1116. }
  1117. spin_unlock_irqrestore(&priv->lock, flags);
  1118. netif_tx_unlock_bh(dev);
  1119. }
  1120. static void ipoib_cm_tx_reap(struct work_struct *work)
  1121. {
  1122. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1123. cm.reap_task);
  1124. struct net_device *dev = priv->dev;
  1125. struct ipoib_cm_tx *p;
  1126. unsigned long flags;
  1127. netif_tx_lock_bh(dev);
  1128. spin_lock_irqsave(&priv->lock, flags);
  1129. while (!list_empty(&priv->cm.reap_list)) {
  1130. p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
  1131. list_del(&p->list);
  1132. spin_unlock_irqrestore(&priv->lock, flags);
  1133. netif_tx_unlock_bh(dev);
  1134. ipoib_cm_tx_destroy(p);
  1135. netif_tx_lock_bh(dev);
  1136. spin_lock_irqsave(&priv->lock, flags);
  1137. }
  1138. spin_unlock_irqrestore(&priv->lock, flags);
  1139. netif_tx_unlock_bh(dev);
  1140. }
  1141. static void ipoib_cm_skb_reap(struct work_struct *work)
  1142. {
  1143. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1144. cm.skb_task);
  1145. struct net_device *dev = priv->dev;
  1146. struct sk_buff *skb;
  1147. unsigned long flags;
  1148. unsigned mtu = priv->mcast_mtu;
  1149. netif_tx_lock_bh(dev);
  1150. spin_lock_irqsave(&priv->lock, flags);
  1151. while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
  1152. spin_unlock_irqrestore(&priv->lock, flags);
  1153. netif_tx_unlock_bh(dev);
  1154. if (skb->protocol == htons(ETH_P_IP))
  1155. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
  1156. #if IS_ENABLED(CONFIG_IPV6)
  1157. else if (skb->protocol == htons(ETH_P_IPV6))
  1158. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  1159. #endif
  1160. dev_kfree_skb_any(skb);
  1161. netif_tx_lock_bh(dev);
  1162. spin_lock_irqsave(&priv->lock, flags);
  1163. }
  1164. spin_unlock_irqrestore(&priv->lock, flags);
  1165. netif_tx_unlock_bh(dev);
  1166. }
  1167. void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
  1168. unsigned int mtu)
  1169. {
  1170. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1171. int e = skb_queue_empty(&priv->cm.skb_queue);
  1172. if (skb_dst(skb))
  1173. skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
  1174. skb_queue_tail(&priv->cm.skb_queue, skb);
  1175. if (e)
  1176. queue_work(ipoib_workqueue, &priv->cm.skb_task);
  1177. }
  1178. static void ipoib_cm_rx_reap(struct work_struct *work)
  1179. {
  1180. ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
  1181. cm.rx_reap_task)->dev);
  1182. }
  1183. static void ipoib_cm_stale_task(struct work_struct *work)
  1184. {
  1185. struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
  1186. cm.stale_task.work);
  1187. struct ipoib_cm_rx *p;
  1188. int ret;
  1189. spin_lock_irq(&priv->lock);
  1190. while (!list_empty(&priv->cm.passive_ids)) {
  1191. /* List is sorted by LRU, start from tail,
  1192. * stop when we see a recently used entry */
  1193. p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
  1194. if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
  1195. break;
  1196. list_move(&p->list, &priv->cm.rx_error_list);
  1197. p->state = IPOIB_CM_RX_ERROR;
  1198. spin_unlock_irq(&priv->lock);
  1199. ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
  1200. if (ret)
  1201. ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
  1202. spin_lock_irq(&priv->lock);
  1203. }
  1204. if (!list_empty(&priv->cm.passive_ids))
  1205. queue_delayed_work(ipoib_workqueue,
  1206. &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
  1207. spin_unlock_irq(&priv->lock);
  1208. }
  1209. static ssize_t show_mode(struct device *d, struct device_attribute *attr,
  1210. char *buf)
  1211. {
  1212. struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
  1213. if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
  1214. return sprintf(buf, "connected\n");
  1215. else
  1216. return sprintf(buf, "datagram\n");
  1217. }
  1218. static ssize_t set_mode(struct device *d, struct device_attribute *attr,
  1219. const char *buf, size_t count)
  1220. {
  1221. struct net_device *dev = to_net_dev(d);
  1222. int ret;
  1223. if (!rtnl_trylock())
  1224. return restart_syscall();
  1225. ret = ipoib_set_mode(dev, buf);
  1226. rtnl_unlock();
  1227. if (!ret)
  1228. return count;
  1229. return ret;
  1230. }
  1231. static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
  1232. int ipoib_cm_add_mode_attr(struct net_device *dev)
  1233. {
  1234. return device_create_file(&dev->dev, &dev_attr_mode);
  1235. }
  1236. static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
  1237. {
  1238. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1239. struct ib_srq_init_attr srq_init_attr = {
  1240. .srq_type = IB_SRQT_BASIC,
  1241. .attr = {
  1242. .max_wr = ipoib_recvq_size,
  1243. .max_sge = max_sge
  1244. }
  1245. };
  1246. priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
  1247. if (IS_ERR(priv->cm.srq)) {
  1248. if (PTR_ERR(priv->cm.srq) != -ENOSYS)
  1249. printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
  1250. priv->ca->name, PTR_ERR(priv->cm.srq));
  1251. priv->cm.srq = NULL;
  1252. return;
  1253. }
  1254. priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
  1255. if (!priv->cm.srq_ring) {
  1256. printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
  1257. priv->ca->name, ipoib_recvq_size);
  1258. ib_destroy_srq(priv->cm.srq);
  1259. priv->cm.srq = NULL;
  1260. return;
  1261. }
  1262. }
  1263. int ipoib_cm_dev_init(struct net_device *dev)
  1264. {
  1265. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1266. int i, ret;
  1267. struct ib_device_attr attr;
  1268. INIT_LIST_HEAD(&priv->cm.passive_ids);
  1269. INIT_LIST_HEAD(&priv->cm.reap_list);
  1270. INIT_LIST_HEAD(&priv->cm.start_list);
  1271. INIT_LIST_HEAD(&priv->cm.rx_error_list);
  1272. INIT_LIST_HEAD(&priv->cm.rx_flush_list);
  1273. INIT_LIST_HEAD(&priv->cm.rx_drain_list);
  1274. INIT_LIST_HEAD(&priv->cm.rx_reap_list);
  1275. INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
  1276. INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
  1277. INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
  1278. INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
  1279. INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
  1280. skb_queue_head_init(&priv->cm.skb_queue);
  1281. ret = ib_query_device(priv->ca, &attr);
  1282. if (ret) {
  1283. printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
  1284. return ret;
  1285. }
  1286. ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
  1287. attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
  1288. ipoib_cm_create_srq(dev, attr.max_srq_sge);
  1289. if (ipoib_cm_has_srq(dev)) {
  1290. priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
  1291. priv->cm.num_frags = attr.max_srq_sge;
  1292. ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
  1293. priv->cm.max_cm_mtu, priv->cm.num_frags);
  1294. } else {
  1295. priv->cm.max_cm_mtu = IPOIB_CM_MTU;
  1296. priv->cm.num_frags = IPOIB_CM_RX_SG;
  1297. }
  1298. ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
  1299. if (ipoib_cm_has_srq(dev)) {
  1300. for (i = 0; i < ipoib_recvq_size; ++i) {
  1301. if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
  1302. priv->cm.num_frags - 1,
  1303. priv->cm.srq_ring[i].mapping)) {
  1304. ipoib_warn(priv, "failed to allocate "
  1305. "receive buffer %d\n", i);
  1306. ipoib_cm_dev_cleanup(dev);
  1307. return -ENOMEM;
  1308. }
  1309. if (ipoib_cm_post_receive_srq(dev, i)) {
  1310. ipoib_warn(priv, "ipoib_cm_post_receive_srq "
  1311. "failed for buf %d\n", i);
  1312. ipoib_cm_dev_cleanup(dev);
  1313. return -EIO;
  1314. }
  1315. }
  1316. }
  1317. priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
  1318. return 0;
  1319. }
  1320. void ipoib_cm_dev_cleanup(struct net_device *dev)
  1321. {
  1322. struct ipoib_dev_priv *priv = netdev_priv(dev);
  1323. int ret;
  1324. if (!priv->cm.srq)
  1325. return;
  1326. ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
  1327. ret = ib_destroy_srq(priv->cm.srq);
  1328. if (ret)
  1329. ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
  1330. priv->cm.srq = NULL;
  1331. if (!priv->cm.srq_ring)
  1332. return;
  1333. ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
  1334. priv->cm.srq_ring = NULL;
  1335. }