cm.c 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527
  1. /*
  2. * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  36. */
  37. #include <linux/completion.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/err.h>
  40. #include <linux/idr.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/random.h>
  43. #include <linux/rbtree.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/workqueue.h>
  46. #include <rdma/ib_cache.h>
  47. #include <rdma/ib_cm.h>
  48. #include "cm_msgs.h"
  49. MODULE_AUTHOR("Sean Hefty");
  50. MODULE_DESCRIPTION("InfiniBand CM");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. static void cm_add_one(struct ib_device *device);
  53. static void cm_remove_one(struct ib_device *device);
  54. static struct ib_client cm_client = {
  55. .name = "cm",
  56. .add = cm_add_one,
  57. .remove = cm_remove_one
  58. };
  59. static struct ib_cm {
  60. spinlock_t lock;
  61. struct list_head device_list;
  62. rwlock_t device_lock;
  63. struct rb_root listen_service_table;
  64. u64 listen_service_id;
  65. /* struct rb_root peer_service_table; todo: fix peer to peer */
  66. struct rb_root remote_qp_table;
  67. struct rb_root remote_id_table;
  68. struct rb_root remote_sidr_table;
  69. struct idr local_id_table;
  70. __be32 random_id_operand;
  71. struct list_head timewait_list;
  72. struct workqueue_struct *wq;
  73. } cm;
  74. struct cm_port {
  75. struct cm_device *cm_dev;
  76. struct ib_mad_agent *mad_agent;
  77. u8 port_num;
  78. };
  79. struct cm_device {
  80. struct list_head list;
  81. struct ib_device *device;
  82. u8 ack_delay;
  83. struct cm_port port[0];
  84. };
  85. struct cm_av {
  86. struct cm_port *port;
  87. union ib_gid dgid;
  88. struct ib_ah_attr ah_attr;
  89. u16 pkey_index;
  90. u8 timeout;
  91. };
  92. struct cm_work {
  93. struct delayed_work work;
  94. struct list_head list;
  95. struct cm_port *port;
  96. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  97. __be32 local_id; /* Established / timewait */
  98. __be32 remote_id;
  99. struct ib_cm_event cm_event;
  100. struct ib_sa_path_rec path[0];
  101. };
  102. struct cm_timewait_info {
  103. struct cm_work work; /* Must be first. */
  104. struct list_head list;
  105. struct rb_node remote_qp_node;
  106. struct rb_node remote_id_node;
  107. __be64 remote_ca_guid;
  108. __be32 remote_qpn;
  109. u8 inserted_remote_qp;
  110. u8 inserted_remote_id;
  111. };
  112. struct cm_id_private {
  113. struct ib_cm_id id;
  114. struct rb_node service_node;
  115. struct rb_node sidr_id_node;
  116. spinlock_t lock; /* Do not acquire inside cm.lock */
  117. struct completion comp;
  118. atomic_t refcount;
  119. struct ib_mad_send_buf *msg;
  120. struct cm_timewait_info *timewait_info;
  121. /* todo: use alternate port on send failure */
  122. struct cm_av av;
  123. struct cm_av alt_av;
  124. struct ib_cm_compare_data *compare_data;
  125. void *private_data;
  126. __be64 tid;
  127. __be32 local_qpn;
  128. __be32 remote_qpn;
  129. enum ib_qp_type qp_type;
  130. __be32 sq_psn;
  131. __be32 rq_psn;
  132. int timeout_ms;
  133. enum ib_mtu path_mtu;
  134. __be16 pkey;
  135. u8 private_data_len;
  136. u8 max_cm_retries;
  137. u8 peer_to_peer;
  138. u8 responder_resources;
  139. u8 initiator_depth;
  140. u8 retry_count;
  141. u8 rnr_retry_count;
  142. u8 service_timeout;
  143. u8 target_ack_delay;
  144. struct list_head work_list;
  145. atomic_t work_count;
  146. };
  147. static void cm_work_handler(struct work_struct *work);
  148. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  149. {
  150. if (atomic_dec_and_test(&cm_id_priv->refcount))
  151. complete(&cm_id_priv->comp);
  152. }
  153. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  154. struct ib_mad_send_buf **msg)
  155. {
  156. struct ib_mad_agent *mad_agent;
  157. struct ib_mad_send_buf *m;
  158. struct ib_ah *ah;
  159. mad_agent = cm_id_priv->av.port->mad_agent;
  160. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  161. if (IS_ERR(ah))
  162. return PTR_ERR(ah);
  163. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  164. cm_id_priv->av.pkey_index,
  165. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  166. GFP_ATOMIC);
  167. if (IS_ERR(m)) {
  168. ib_destroy_ah(ah);
  169. return PTR_ERR(m);
  170. }
  171. /* Timeout set by caller if response is expected. */
  172. m->ah = ah;
  173. m->retries = cm_id_priv->max_cm_retries;
  174. atomic_inc(&cm_id_priv->refcount);
  175. m->context[0] = cm_id_priv;
  176. *msg = m;
  177. return 0;
  178. }
  179. static int cm_alloc_response_msg(struct cm_port *port,
  180. struct ib_mad_recv_wc *mad_recv_wc,
  181. struct ib_mad_send_buf **msg)
  182. {
  183. struct ib_mad_send_buf *m;
  184. struct ib_ah *ah;
  185. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  186. mad_recv_wc->recv_buf.grh, port->port_num);
  187. if (IS_ERR(ah))
  188. return PTR_ERR(ah);
  189. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  190. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  191. GFP_ATOMIC);
  192. if (IS_ERR(m)) {
  193. ib_destroy_ah(ah);
  194. return PTR_ERR(m);
  195. }
  196. m->ah = ah;
  197. *msg = m;
  198. return 0;
  199. }
  200. static void cm_free_msg(struct ib_mad_send_buf *msg)
  201. {
  202. ib_destroy_ah(msg->ah);
  203. if (msg->context[0])
  204. cm_deref_id(msg->context[0]);
  205. ib_free_send_mad(msg);
  206. }
  207. static void * cm_copy_private_data(const void *private_data,
  208. u8 private_data_len)
  209. {
  210. void *data;
  211. if (!private_data || !private_data_len)
  212. return NULL;
  213. data = kmemdup(private_data, private_data_len, GFP_KERNEL);
  214. if (!data)
  215. return ERR_PTR(-ENOMEM);
  216. return data;
  217. }
  218. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  219. void *private_data, u8 private_data_len)
  220. {
  221. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  222. kfree(cm_id_priv->private_data);
  223. cm_id_priv->private_data = private_data;
  224. cm_id_priv->private_data_len = private_data_len;
  225. }
  226. static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
  227. struct ib_grh *grh, struct cm_av *av)
  228. {
  229. av->port = port;
  230. av->pkey_index = wc->pkey_index;
  231. ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
  232. grh, &av->ah_attr);
  233. }
  234. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  235. {
  236. struct cm_device *cm_dev;
  237. struct cm_port *port = NULL;
  238. unsigned long flags;
  239. int ret;
  240. u8 p;
  241. read_lock_irqsave(&cm.device_lock, flags);
  242. list_for_each_entry(cm_dev, &cm.device_list, list) {
  243. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  244. &p, NULL)) {
  245. port = &cm_dev->port[p-1];
  246. break;
  247. }
  248. }
  249. read_unlock_irqrestore(&cm.device_lock, flags);
  250. if (!port)
  251. return -EINVAL;
  252. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  253. be16_to_cpu(path->pkey), &av->pkey_index);
  254. if (ret)
  255. return ret;
  256. av->port = port;
  257. ib_init_ah_from_path(cm_dev->device, port->port_num, path,
  258. &av->ah_attr);
  259. av->timeout = path->packet_life_time + 1;
  260. return 0;
  261. }
  262. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  263. {
  264. unsigned long flags;
  265. int ret, id;
  266. static int next_id;
  267. do {
  268. spin_lock_irqsave(&cm.lock, flags);
  269. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
  270. next_id, &id);
  271. if (!ret)
  272. next_id = ((unsigned) id + 1) & MAX_ID_MASK;
  273. spin_unlock_irqrestore(&cm.lock, flags);
  274. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  275. cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
  276. return ret;
  277. }
  278. static void cm_free_id(__be32 local_id)
  279. {
  280. spin_lock_irq(&cm.lock);
  281. idr_remove(&cm.local_id_table,
  282. (__force int) (local_id ^ cm.random_id_operand));
  283. spin_unlock_irq(&cm.lock);
  284. }
  285. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  286. {
  287. struct cm_id_private *cm_id_priv;
  288. cm_id_priv = idr_find(&cm.local_id_table,
  289. (__force int) (local_id ^ cm.random_id_operand));
  290. if (cm_id_priv) {
  291. if (cm_id_priv->id.remote_id == remote_id)
  292. atomic_inc(&cm_id_priv->refcount);
  293. else
  294. cm_id_priv = NULL;
  295. }
  296. return cm_id_priv;
  297. }
  298. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  299. {
  300. struct cm_id_private *cm_id_priv;
  301. spin_lock_irq(&cm.lock);
  302. cm_id_priv = cm_get_id(local_id, remote_id);
  303. spin_unlock_irq(&cm.lock);
  304. return cm_id_priv;
  305. }
  306. static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
  307. {
  308. int i;
  309. for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
  310. ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
  311. ((unsigned long *) mask)[i];
  312. }
  313. static int cm_compare_data(struct ib_cm_compare_data *src_data,
  314. struct ib_cm_compare_data *dst_data)
  315. {
  316. u8 src[IB_CM_COMPARE_SIZE];
  317. u8 dst[IB_CM_COMPARE_SIZE];
  318. if (!src_data || !dst_data)
  319. return 0;
  320. cm_mask_copy(src, src_data->data, dst_data->mask);
  321. cm_mask_copy(dst, dst_data->data, src_data->mask);
  322. return memcmp(src, dst, IB_CM_COMPARE_SIZE);
  323. }
  324. static int cm_compare_private_data(u8 *private_data,
  325. struct ib_cm_compare_data *dst_data)
  326. {
  327. u8 src[IB_CM_COMPARE_SIZE];
  328. if (!dst_data)
  329. return 0;
  330. cm_mask_copy(src, private_data, dst_data->mask);
  331. return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
  332. }
  333. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  334. {
  335. struct rb_node **link = &cm.listen_service_table.rb_node;
  336. struct rb_node *parent = NULL;
  337. struct cm_id_private *cur_cm_id_priv;
  338. __be64 service_id = cm_id_priv->id.service_id;
  339. __be64 service_mask = cm_id_priv->id.service_mask;
  340. int data_cmp;
  341. while (*link) {
  342. parent = *link;
  343. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  344. service_node);
  345. data_cmp = cm_compare_data(cm_id_priv->compare_data,
  346. cur_cm_id_priv->compare_data);
  347. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  348. (service_mask & cur_cm_id_priv->id.service_id) &&
  349. (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
  350. !data_cmp)
  351. return cur_cm_id_priv;
  352. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  353. link = &(*link)->rb_left;
  354. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  355. link = &(*link)->rb_right;
  356. else if (service_id < cur_cm_id_priv->id.service_id)
  357. link = &(*link)->rb_left;
  358. else if (service_id > cur_cm_id_priv->id.service_id)
  359. link = &(*link)->rb_right;
  360. else if (data_cmp < 0)
  361. link = &(*link)->rb_left;
  362. else
  363. link = &(*link)->rb_right;
  364. }
  365. rb_link_node(&cm_id_priv->service_node, parent, link);
  366. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  367. return NULL;
  368. }
  369. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  370. __be64 service_id,
  371. u8 *private_data)
  372. {
  373. struct rb_node *node = cm.listen_service_table.rb_node;
  374. struct cm_id_private *cm_id_priv;
  375. int data_cmp;
  376. while (node) {
  377. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  378. data_cmp = cm_compare_private_data(private_data,
  379. cm_id_priv->compare_data);
  380. if ((cm_id_priv->id.service_mask & service_id) ==
  381. cm_id_priv->id.service_id &&
  382. (cm_id_priv->id.device == device) && !data_cmp)
  383. return cm_id_priv;
  384. if (device < cm_id_priv->id.device)
  385. node = node->rb_left;
  386. else if (device > cm_id_priv->id.device)
  387. node = node->rb_right;
  388. else if (service_id < cm_id_priv->id.service_id)
  389. node = node->rb_left;
  390. else if (service_id > cm_id_priv->id.service_id)
  391. node = node->rb_right;
  392. else if (data_cmp < 0)
  393. node = node->rb_left;
  394. else
  395. node = node->rb_right;
  396. }
  397. return NULL;
  398. }
  399. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  400. *timewait_info)
  401. {
  402. struct rb_node **link = &cm.remote_id_table.rb_node;
  403. struct rb_node *parent = NULL;
  404. struct cm_timewait_info *cur_timewait_info;
  405. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  406. __be32 remote_id = timewait_info->work.remote_id;
  407. while (*link) {
  408. parent = *link;
  409. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  410. remote_id_node);
  411. if (remote_id < cur_timewait_info->work.remote_id)
  412. link = &(*link)->rb_left;
  413. else if (remote_id > cur_timewait_info->work.remote_id)
  414. link = &(*link)->rb_right;
  415. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  416. link = &(*link)->rb_left;
  417. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  418. link = &(*link)->rb_right;
  419. else
  420. return cur_timewait_info;
  421. }
  422. timewait_info->inserted_remote_id = 1;
  423. rb_link_node(&timewait_info->remote_id_node, parent, link);
  424. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  425. return NULL;
  426. }
  427. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  428. __be32 remote_id)
  429. {
  430. struct rb_node *node = cm.remote_id_table.rb_node;
  431. struct cm_timewait_info *timewait_info;
  432. while (node) {
  433. timewait_info = rb_entry(node, struct cm_timewait_info,
  434. remote_id_node);
  435. if (remote_id < timewait_info->work.remote_id)
  436. node = node->rb_left;
  437. else if (remote_id > timewait_info->work.remote_id)
  438. node = node->rb_right;
  439. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  440. node = node->rb_left;
  441. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  442. node = node->rb_right;
  443. else
  444. return timewait_info;
  445. }
  446. return NULL;
  447. }
  448. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  449. *timewait_info)
  450. {
  451. struct rb_node **link = &cm.remote_qp_table.rb_node;
  452. struct rb_node *parent = NULL;
  453. struct cm_timewait_info *cur_timewait_info;
  454. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  455. __be32 remote_qpn = timewait_info->remote_qpn;
  456. while (*link) {
  457. parent = *link;
  458. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  459. remote_qp_node);
  460. if (remote_qpn < cur_timewait_info->remote_qpn)
  461. link = &(*link)->rb_left;
  462. else if (remote_qpn > cur_timewait_info->remote_qpn)
  463. link = &(*link)->rb_right;
  464. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  465. link = &(*link)->rb_left;
  466. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  467. link = &(*link)->rb_right;
  468. else
  469. return cur_timewait_info;
  470. }
  471. timewait_info->inserted_remote_qp = 1;
  472. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  473. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  474. return NULL;
  475. }
  476. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  477. *cm_id_priv)
  478. {
  479. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  480. struct rb_node *parent = NULL;
  481. struct cm_id_private *cur_cm_id_priv;
  482. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  483. __be32 remote_id = cm_id_priv->id.remote_id;
  484. while (*link) {
  485. parent = *link;
  486. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  487. sidr_id_node);
  488. if (remote_id < cur_cm_id_priv->id.remote_id)
  489. link = &(*link)->rb_left;
  490. else if (remote_id > cur_cm_id_priv->id.remote_id)
  491. link = &(*link)->rb_right;
  492. else {
  493. int cmp;
  494. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  495. sizeof *port_gid);
  496. if (cmp < 0)
  497. link = &(*link)->rb_left;
  498. else if (cmp > 0)
  499. link = &(*link)->rb_right;
  500. else
  501. return cur_cm_id_priv;
  502. }
  503. }
  504. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  505. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  506. return NULL;
  507. }
  508. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  509. enum ib_cm_sidr_status status)
  510. {
  511. struct ib_cm_sidr_rep_param param;
  512. memset(&param, 0, sizeof param);
  513. param.status = status;
  514. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  515. }
  516. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  517. ib_cm_handler cm_handler,
  518. void *context)
  519. {
  520. struct cm_id_private *cm_id_priv;
  521. int ret;
  522. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  523. if (!cm_id_priv)
  524. return ERR_PTR(-ENOMEM);
  525. cm_id_priv->id.state = IB_CM_IDLE;
  526. cm_id_priv->id.device = device;
  527. cm_id_priv->id.cm_handler = cm_handler;
  528. cm_id_priv->id.context = context;
  529. cm_id_priv->id.remote_cm_qpn = 1;
  530. ret = cm_alloc_id(cm_id_priv);
  531. if (ret)
  532. goto error;
  533. spin_lock_init(&cm_id_priv->lock);
  534. init_completion(&cm_id_priv->comp);
  535. INIT_LIST_HEAD(&cm_id_priv->work_list);
  536. atomic_set(&cm_id_priv->work_count, -1);
  537. atomic_set(&cm_id_priv->refcount, 1);
  538. return &cm_id_priv->id;
  539. error:
  540. kfree(cm_id_priv);
  541. return ERR_PTR(-ENOMEM);
  542. }
  543. EXPORT_SYMBOL(ib_create_cm_id);
  544. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  545. {
  546. struct cm_work *work;
  547. if (list_empty(&cm_id_priv->work_list))
  548. return NULL;
  549. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  550. list_del(&work->list);
  551. return work;
  552. }
  553. static void cm_free_work(struct cm_work *work)
  554. {
  555. if (work->mad_recv_wc)
  556. ib_free_recv_mad(work->mad_recv_wc);
  557. kfree(work);
  558. }
  559. static inline int cm_convert_to_ms(int iba_time)
  560. {
  561. /* approximate conversion to ms from 4.096us x 2^iba_time */
  562. return 1 << max(iba_time - 8, 0);
  563. }
  564. /*
  565. * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
  566. * Because of how ack_timeout is stored, adding one doubles the timeout.
  567. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
  568. * increment it (round up) only if the other is within 50%.
  569. */
  570. static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
  571. {
  572. int ack_timeout = packet_life_time + 1;
  573. if (ack_timeout >= ca_ack_delay)
  574. ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
  575. else
  576. ack_timeout = ca_ack_delay +
  577. (ack_timeout >= (ca_ack_delay - 1));
  578. return min(31, ack_timeout);
  579. }
  580. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  581. {
  582. if (timewait_info->inserted_remote_id) {
  583. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  584. timewait_info->inserted_remote_id = 0;
  585. }
  586. if (timewait_info->inserted_remote_qp) {
  587. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  588. timewait_info->inserted_remote_qp = 0;
  589. }
  590. }
  591. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  592. {
  593. struct cm_timewait_info *timewait_info;
  594. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  595. if (!timewait_info)
  596. return ERR_PTR(-ENOMEM);
  597. timewait_info->work.local_id = local_id;
  598. INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
  599. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  600. return timewait_info;
  601. }
  602. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  603. {
  604. int wait_time;
  605. unsigned long flags;
  606. spin_lock_irqsave(&cm.lock, flags);
  607. cm_cleanup_timewait(cm_id_priv->timewait_info);
  608. list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
  609. spin_unlock_irqrestore(&cm.lock, flags);
  610. /*
  611. * The cm_id could be destroyed by the user before we exit timewait.
  612. * To protect against this, we search for the cm_id after exiting
  613. * timewait before notifying the user that we've exited timewait.
  614. */
  615. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  616. wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
  617. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  618. msecs_to_jiffies(wait_time));
  619. cm_id_priv->timewait_info = NULL;
  620. }
  621. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  622. {
  623. unsigned long flags;
  624. cm_id_priv->id.state = IB_CM_IDLE;
  625. if (cm_id_priv->timewait_info) {
  626. spin_lock_irqsave(&cm.lock, flags);
  627. cm_cleanup_timewait(cm_id_priv->timewait_info);
  628. spin_unlock_irqrestore(&cm.lock, flags);
  629. kfree(cm_id_priv->timewait_info);
  630. cm_id_priv->timewait_info = NULL;
  631. }
  632. }
  633. static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
  634. {
  635. struct cm_id_private *cm_id_priv;
  636. struct cm_work *work;
  637. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  638. retest:
  639. spin_lock_irq(&cm_id_priv->lock);
  640. switch (cm_id->state) {
  641. case IB_CM_LISTEN:
  642. cm_id->state = IB_CM_IDLE;
  643. spin_unlock_irq(&cm_id_priv->lock);
  644. spin_lock_irq(&cm.lock);
  645. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  646. spin_unlock_irq(&cm.lock);
  647. break;
  648. case IB_CM_SIDR_REQ_SENT:
  649. cm_id->state = IB_CM_IDLE;
  650. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  651. spin_unlock_irq(&cm_id_priv->lock);
  652. break;
  653. case IB_CM_SIDR_REQ_RCVD:
  654. spin_unlock_irq(&cm_id_priv->lock);
  655. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  656. break;
  657. case IB_CM_REQ_SENT:
  658. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  659. spin_unlock_irq(&cm_id_priv->lock);
  660. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  661. &cm_id_priv->id.device->node_guid,
  662. sizeof cm_id_priv->id.device->node_guid,
  663. NULL, 0);
  664. break;
  665. case IB_CM_REQ_RCVD:
  666. if (err == -ENOMEM) {
  667. /* Do not reject to allow future retries. */
  668. cm_reset_to_idle(cm_id_priv);
  669. spin_unlock_irq(&cm_id_priv->lock);
  670. } else {
  671. spin_unlock_irq(&cm_id_priv->lock);
  672. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  673. NULL, 0, NULL, 0);
  674. }
  675. break;
  676. case IB_CM_MRA_REQ_RCVD:
  677. case IB_CM_REP_SENT:
  678. case IB_CM_MRA_REP_RCVD:
  679. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  680. /* Fall through */
  681. case IB_CM_MRA_REQ_SENT:
  682. case IB_CM_REP_RCVD:
  683. case IB_CM_MRA_REP_SENT:
  684. spin_unlock_irq(&cm_id_priv->lock);
  685. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  686. NULL, 0, NULL, 0);
  687. break;
  688. case IB_CM_ESTABLISHED:
  689. spin_unlock_irq(&cm_id_priv->lock);
  690. ib_send_cm_dreq(cm_id, NULL, 0);
  691. goto retest;
  692. case IB_CM_DREQ_SENT:
  693. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  694. cm_enter_timewait(cm_id_priv);
  695. spin_unlock_irq(&cm_id_priv->lock);
  696. break;
  697. case IB_CM_DREQ_RCVD:
  698. spin_unlock_irq(&cm_id_priv->lock);
  699. ib_send_cm_drep(cm_id, NULL, 0);
  700. break;
  701. default:
  702. spin_unlock_irq(&cm_id_priv->lock);
  703. break;
  704. }
  705. cm_free_id(cm_id->local_id);
  706. cm_deref_id(cm_id_priv);
  707. wait_for_completion(&cm_id_priv->comp);
  708. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  709. cm_free_work(work);
  710. kfree(cm_id_priv->compare_data);
  711. kfree(cm_id_priv->private_data);
  712. kfree(cm_id_priv);
  713. }
  714. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  715. {
  716. cm_destroy_id(cm_id, 0);
  717. }
  718. EXPORT_SYMBOL(ib_destroy_cm_id);
  719. int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
  720. struct ib_cm_compare_data *compare_data)
  721. {
  722. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  723. unsigned long flags;
  724. int ret = 0;
  725. service_mask = service_mask ? service_mask :
  726. __constant_cpu_to_be64(~0ULL);
  727. service_id &= service_mask;
  728. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  729. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  730. return -EINVAL;
  731. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  732. if (cm_id->state != IB_CM_IDLE)
  733. return -EINVAL;
  734. if (compare_data) {
  735. cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
  736. GFP_KERNEL);
  737. if (!cm_id_priv->compare_data)
  738. return -ENOMEM;
  739. cm_mask_copy(cm_id_priv->compare_data->data,
  740. compare_data->data, compare_data->mask);
  741. memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
  742. IB_CM_COMPARE_SIZE);
  743. }
  744. cm_id->state = IB_CM_LISTEN;
  745. spin_lock_irqsave(&cm.lock, flags);
  746. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  747. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  748. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  749. } else {
  750. cm_id->service_id = service_id;
  751. cm_id->service_mask = service_mask;
  752. }
  753. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  754. spin_unlock_irqrestore(&cm.lock, flags);
  755. if (cur_cm_id_priv) {
  756. cm_id->state = IB_CM_IDLE;
  757. kfree(cm_id_priv->compare_data);
  758. cm_id_priv->compare_data = NULL;
  759. ret = -EBUSY;
  760. }
  761. return ret;
  762. }
  763. EXPORT_SYMBOL(ib_cm_listen);
  764. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  765. enum cm_msg_sequence msg_seq)
  766. {
  767. u64 hi_tid, low_tid;
  768. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  769. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  770. (msg_seq << 30));
  771. return cpu_to_be64(hi_tid | low_tid);
  772. }
  773. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  774. __be16 attr_id, __be64 tid)
  775. {
  776. hdr->base_version = IB_MGMT_BASE_VERSION;
  777. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  778. hdr->class_version = IB_CM_CLASS_VERSION;
  779. hdr->method = IB_MGMT_METHOD_SEND;
  780. hdr->attr_id = attr_id;
  781. hdr->tid = tid;
  782. }
  783. static void cm_format_req(struct cm_req_msg *req_msg,
  784. struct cm_id_private *cm_id_priv,
  785. struct ib_cm_req_param *param)
  786. {
  787. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  788. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  789. req_msg->local_comm_id = cm_id_priv->id.local_id;
  790. req_msg->service_id = param->service_id;
  791. req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
  792. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  793. cm_req_set_resp_res(req_msg, param->responder_resources);
  794. cm_req_set_init_depth(req_msg, param->initiator_depth);
  795. cm_req_set_remote_resp_timeout(req_msg,
  796. param->remote_cm_response_timeout);
  797. cm_req_set_qp_type(req_msg, param->qp_type);
  798. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  799. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  800. cm_req_set_local_resp_timeout(req_msg,
  801. param->local_cm_response_timeout);
  802. cm_req_set_retry_count(req_msg, param->retry_count);
  803. req_msg->pkey = param->primary_path->pkey;
  804. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  805. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  806. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  807. cm_req_set_srq(req_msg, param->srq);
  808. req_msg->primary_local_lid = param->primary_path->slid;
  809. req_msg->primary_remote_lid = param->primary_path->dlid;
  810. req_msg->primary_local_gid = param->primary_path->sgid;
  811. req_msg->primary_remote_gid = param->primary_path->dgid;
  812. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  813. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  814. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  815. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  816. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  817. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  818. cm_req_set_primary_local_ack_timeout(req_msg,
  819. cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
  820. param->primary_path->packet_life_time));
  821. if (param->alternate_path) {
  822. req_msg->alt_local_lid = param->alternate_path->slid;
  823. req_msg->alt_remote_lid = param->alternate_path->dlid;
  824. req_msg->alt_local_gid = param->alternate_path->sgid;
  825. req_msg->alt_remote_gid = param->alternate_path->dgid;
  826. cm_req_set_alt_flow_label(req_msg,
  827. param->alternate_path->flow_label);
  828. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  829. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  830. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  831. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  832. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  833. cm_req_set_alt_local_ack_timeout(req_msg,
  834. cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
  835. param->alternate_path->packet_life_time));
  836. }
  837. if (param->private_data && param->private_data_len)
  838. memcpy(req_msg->private_data, param->private_data,
  839. param->private_data_len);
  840. }
  841. static int cm_validate_req_param(struct ib_cm_req_param *param)
  842. {
  843. /* peer-to-peer not supported */
  844. if (param->peer_to_peer)
  845. return -EINVAL;
  846. if (!param->primary_path)
  847. return -EINVAL;
  848. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  849. return -EINVAL;
  850. if (param->private_data &&
  851. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  852. return -EINVAL;
  853. if (param->alternate_path &&
  854. (param->alternate_path->pkey != param->primary_path->pkey ||
  855. param->alternate_path->mtu != param->primary_path->mtu))
  856. return -EINVAL;
  857. return 0;
  858. }
  859. int ib_send_cm_req(struct ib_cm_id *cm_id,
  860. struct ib_cm_req_param *param)
  861. {
  862. struct cm_id_private *cm_id_priv;
  863. struct cm_req_msg *req_msg;
  864. unsigned long flags;
  865. int ret;
  866. ret = cm_validate_req_param(param);
  867. if (ret)
  868. return ret;
  869. /* Verify that we're not in timewait. */
  870. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  871. spin_lock_irqsave(&cm_id_priv->lock, flags);
  872. if (cm_id->state != IB_CM_IDLE) {
  873. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  874. ret = -EINVAL;
  875. goto out;
  876. }
  877. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  878. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  879. id.local_id);
  880. if (IS_ERR(cm_id_priv->timewait_info)) {
  881. ret = PTR_ERR(cm_id_priv->timewait_info);
  882. goto out;
  883. }
  884. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  885. if (ret)
  886. goto error1;
  887. if (param->alternate_path) {
  888. ret = cm_init_av_by_path(param->alternate_path,
  889. &cm_id_priv->alt_av);
  890. if (ret)
  891. goto error1;
  892. }
  893. cm_id->service_id = param->service_id;
  894. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  895. cm_id_priv->timeout_ms = cm_convert_to_ms(
  896. param->primary_path->packet_life_time) * 2 +
  897. cm_convert_to_ms(
  898. param->remote_cm_response_timeout);
  899. cm_id_priv->max_cm_retries = param->max_cm_retries;
  900. cm_id_priv->initiator_depth = param->initiator_depth;
  901. cm_id_priv->responder_resources = param->responder_resources;
  902. cm_id_priv->retry_count = param->retry_count;
  903. cm_id_priv->path_mtu = param->primary_path->mtu;
  904. cm_id_priv->pkey = param->primary_path->pkey;
  905. cm_id_priv->qp_type = param->qp_type;
  906. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  907. if (ret)
  908. goto error1;
  909. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  910. cm_format_req(req_msg, cm_id_priv, param);
  911. cm_id_priv->tid = req_msg->hdr.tid;
  912. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  913. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  914. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  915. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  916. spin_lock_irqsave(&cm_id_priv->lock, flags);
  917. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  918. if (ret) {
  919. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  920. goto error2;
  921. }
  922. BUG_ON(cm_id->state != IB_CM_IDLE);
  923. cm_id->state = IB_CM_REQ_SENT;
  924. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  925. return 0;
  926. error2: cm_free_msg(cm_id_priv->msg);
  927. error1: kfree(cm_id_priv->timewait_info);
  928. out: return ret;
  929. }
  930. EXPORT_SYMBOL(ib_send_cm_req);
  931. static int cm_issue_rej(struct cm_port *port,
  932. struct ib_mad_recv_wc *mad_recv_wc,
  933. enum ib_cm_rej_reason reason,
  934. enum cm_msg_response msg_rejected,
  935. void *ari, u8 ari_length)
  936. {
  937. struct ib_mad_send_buf *msg = NULL;
  938. struct cm_rej_msg *rej_msg, *rcv_msg;
  939. int ret;
  940. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  941. if (ret)
  942. return ret;
  943. /* We just need common CM header information. Cast to any message. */
  944. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  945. rej_msg = (struct cm_rej_msg *) msg->mad;
  946. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  947. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  948. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  949. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  950. rej_msg->reason = cpu_to_be16(reason);
  951. if (ari && ari_length) {
  952. cm_rej_set_reject_info_len(rej_msg, ari_length);
  953. memcpy(rej_msg->ari, ari, ari_length);
  954. }
  955. ret = ib_post_send_mad(msg, NULL);
  956. if (ret)
  957. cm_free_msg(msg);
  958. return ret;
  959. }
  960. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  961. __be32 local_qpn, __be32 remote_qpn)
  962. {
  963. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  964. ((local_ca_guid == remote_ca_guid) &&
  965. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  966. }
  967. static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  968. struct ib_sa_path_rec *primary_path,
  969. struct ib_sa_path_rec *alt_path)
  970. {
  971. memset(primary_path, 0, sizeof *primary_path);
  972. primary_path->dgid = req_msg->primary_local_gid;
  973. primary_path->sgid = req_msg->primary_remote_gid;
  974. primary_path->dlid = req_msg->primary_local_lid;
  975. primary_path->slid = req_msg->primary_remote_lid;
  976. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  977. primary_path->hop_limit = req_msg->primary_hop_limit;
  978. primary_path->traffic_class = req_msg->primary_traffic_class;
  979. primary_path->reversible = 1;
  980. primary_path->pkey = req_msg->pkey;
  981. primary_path->sl = cm_req_get_primary_sl(req_msg);
  982. primary_path->mtu_selector = IB_SA_EQ;
  983. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  984. primary_path->rate_selector = IB_SA_EQ;
  985. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  986. primary_path->packet_life_time_selector = IB_SA_EQ;
  987. primary_path->packet_life_time =
  988. cm_req_get_primary_local_ack_timeout(req_msg);
  989. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  990. if (req_msg->alt_local_lid) {
  991. memset(alt_path, 0, sizeof *alt_path);
  992. alt_path->dgid = req_msg->alt_local_gid;
  993. alt_path->sgid = req_msg->alt_remote_gid;
  994. alt_path->dlid = req_msg->alt_local_lid;
  995. alt_path->slid = req_msg->alt_remote_lid;
  996. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  997. alt_path->hop_limit = req_msg->alt_hop_limit;
  998. alt_path->traffic_class = req_msg->alt_traffic_class;
  999. alt_path->reversible = 1;
  1000. alt_path->pkey = req_msg->pkey;
  1001. alt_path->sl = cm_req_get_alt_sl(req_msg);
  1002. alt_path->mtu_selector = IB_SA_EQ;
  1003. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  1004. alt_path->rate_selector = IB_SA_EQ;
  1005. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  1006. alt_path->packet_life_time_selector = IB_SA_EQ;
  1007. alt_path->packet_life_time =
  1008. cm_req_get_alt_local_ack_timeout(req_msg);
  1009. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  1010. }
  1011. }
  1012. static void cm_format_req_event(struct cm_work *work,
  1013. struct cm_id_private *cm_id_priv,
  1014. struct ib_cm_id *listen_id)
  1015. {
  1016. struct cm_req_msg *req_msg;
  1017. struct ib_cm_req_event_param *param;
  1018. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1019. param = &work->cm_event.param.req_rcvd;
  1020. param->listen_id = listen_id;
  1021. param->port = cm_id_priv->av.port->port_num;
  1022. param->primary_path = &work->path[0];
  1023. if (req_msg->alt_local_lid)
  1024. param->alternate_path = &work->path[1];
  1025. else
  1026. param->alternate_path = NULL;
  1027. param->remote_ca_guid = req_msg->local_ca_guid;
  1028. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  1029. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  1030. param->qp_type = cm_req_get_qp_type(req_msg);
  1031. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  1032. param->responder_resources = cm_req_get_init_depth(req_msg);
  1033. param->initiator_depth = cm_req_get_resp_res(req_msg);
  1034. param->local_cm_response_timeout =
  1035. cm_req_get_remote_resp_timeout(req_msg);
  1036. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  1037. param->remote_cm_response_timeout =
  1038. cm_req_get_local_resp_timeout(req_msg);
  1039. param->retry_count = cm_req_get_retry_count(req_msg);
  1040. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1041. param->srq = cm_req_get_srq(req_msg);
  1042. work->cm_event.private_data = &req_msg->private_data;
  1043. }
  1044. static void cm_process_work(struct cm_id_private *cm_id_priv,
  1045. struct cm_work *work)
  1046. {
  1047. int ret;
  1048. /* We will typically only have the current event to report. */
  1049. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  1050. cm_free_work(work);
  1051. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  1052. spin_lock_irq(&cm_id_priv->lock);
  1053. work = cm_dequeue_work(cm_id_priv);
  1054. spin_unlock_irq(&cm_id_priv->lock);
  1055. BUG_ON(!work);
  1056. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  1057. &work->cm_event);
  1058. cm_free_work(work);
  1059. }
  1060. cm_deref_id(cm_id_priv);
  1061. if (ret)
  1062. cm_destroy_id(&cm_id_priv->id, ret);
  1063. }
  1064. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  1065. struct cm_id_private *cm_id_priv,
  1066. enum cm_msg_response msg_mraed, u8 service_timeout,
  1067. const void *private_data, u8 private_data_len)
  1068. {
  1069. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  1070. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  1071. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  1072. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1073. cm_mra_set_service_timeout(mra_msg, service_timeout);
  1074. if (private_data && private_data_len)
  1075. memcpy(mra_msg->private_data, private_data, private_data_len);
  1076. }
  1077. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  1078. struct cm_id_private *cm_id_priv,
  1079. enum ib_cm_rej_reason reason,
  1080. void *ari,
  1081. u8 ari_length,
  1082. const void *private_data,
  1083. u8 private_data_len)
  1084. {
  1085. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1086. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1087. switch(cm_id_priv->id.state) {
  1088. case IB_CM_REQ_RCVD:
  1089. rej_msg->local_comm_id = 0;
  1090. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1091. break;
  1092. case IB_CM_MRA_REQ_SENT:
  1093. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1094. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1095. break;
  1096. case IB_CM_REP_RCVD:
  1097. case IB_CM_MRA_REP_SENT:
  1098. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1099. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1100. break;
  1101. default:
  1102. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1103. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1104. break;
  1105. }
  1106. rej_msg->reason = cpu_to_be16(reason);
  1107. if (ari && ari_length) {
  1108. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1109. memcpy(rej_msg->ari, ari, ari_length);
  1110. }
  1111. if (private_data && private_data_len)
  1112. memcpy(rej_msg->private_data, private_data, private_data_len);
  1113. }
  1114. static void cm_dup_req_handler(struct cm_work *work,
  1115. struct cm_id_private *cm_id_priv)
  1116. {
  1117. struct ib_mad_send_buf *msg = NULL;
  1118. int ret;
  1119. /* Quick state check to discard duplicate REQs. */
  1120. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1121. return;
  1122. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1123. if (ret)
  1124. return;
  1125. spin_lock_irq(&cm_id_priv->lock);
  1126. switch (cm_id_priv->id.state) {
  1127. case IB_CM_MRA_REQ_SENT:
  1128. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1129. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1130. cm_id_priv->private_data,
  1131. cm_id_priv->private_data_len);
  1132. break;
  1133. case IB_CM_TIMEWAIT:
  1134. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1135. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1136. break;
  1137. default:
  1138. goto unlock;
  1139. }
  1140. spin_unlock_irq(&cm_id_priv->lock);
  1141. ret = ib_post_send_mad(msg, NULL);
  1142. if (ret)
  1143. goto free;
  1144. return;
  1145. unlock: spin_unlock_irq(&cm_id_priv->lock);
  1146. free: cm_free_msg(msg);
  1147. }
  1148. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1149. struct cm_id_private *cm_id_priv)
  1150. {
  1151. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1152. struct cm_timewait_info *timewait_info;
  1153. struct cm_req_msg *req_msg;
  1154. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1155. /* Check for possible duplicate REQ. */
  1156. spin_lock_irq(&cm.lock);
  1157. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1158. if (timewait_info) {
  1159. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1160. timewait_info->work.remote_id);
  1161. spin_unlock_irq(&cm.lock);
  1162. if (cur_cm_id_priv) {
  1163. cm_dup_req_handler(work, cur_cm_id_priv);
  1164. cm_deref_id(cur_cm_id_priv);
  1165. }
  1166. return NULL;
  1167. }
  1168. /* Check for stale connections. */
  1169. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1170. if (timewait_info) {
  1171. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1172. spin_unlock_irq(&cm.lock);
  1173. cm_issue_rej(work->port, work->mad_recv_wc,
  1174. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1175. NULL, 0);
  1176. return NULL;
  1177. }
  1178. /* Find matching listen request. */
  1179. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1180. req_msg->service_id,
  1181. req_msg->private_data);
  1182. if (!listen_cm_id_priv) {
  1183. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1184. spin_unlock_irq(&cm.lock);
  1185. cm_issue_rej(work->port, work->mad_recv_wc,
  1186. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1187. NULL, 0);
  1188. goto out;
  1189. }
  1190. atomic_inc(&listen_cm_id_priv->refcount);
  1191. atomic_inc(&cm_id_priv->refcount);
  1192. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1193. atomic_inc(&cm_id_priv->work_count);
  1194. spin_unlock_irq(&cm.lock);
  1195. out:
  1196. return listen_cm_id_priv;
  1197. }
  1198. static int cm_req_handler(struct cm_work *work)
  1199. {
  1200. struct ib_cm_id *cm_id;
  1201. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1202. struct cm_req_msg *req_msg;
  1203. int ret;
  1204. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1205. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1206. if (IS_ERR(cm_id))
  1207. return PTR_ERR(cm_id);
  1208. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1209. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1210. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1211. work->mad_recv_wc->recv_buf.grh,
  1212. &cm_id_priv->av);
  1213. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1214. id.local_id);
  1215. if (IS_ERR(cm_id_priv->timewait_info)) {
  1216. ret = PTR_ERR(cm_id_priv->timewait_info);
  1217. goto destroy;
  1218. }
  1219. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1220. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1221. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1222. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1223. if (!listen_cm_id_priv) {
  1224. ret = -EINVAL;
  1225. kfree(cm_id_priv->timewait_info);
  1226. goto destroy;
  1227. }
  1228. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1229. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1230. cm_id_priv->id.service_id = req_msg->service_id;
  1231. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1232. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1233. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1234. if (ret) {
  1235. ib_get_cached_gid(work->port->cm_dev->device,
  1236. work->port->port_num, 0, &work->path[0].sgid);
  1237. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
  1238. &work->path[0].sgid, sizeof work->path[0].sgid,
  1239. NULL, 0);
  1240. goto rejected;
  1241. }
  1242. if (req_msg->alt_local_lid) {
  1243. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1244. if (ret) {
  1245. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
  1246. &work->path[0].sgid,
  1247. sizeof work->path[0].sgid, NULL, 0);
  1248. goto rejected;
  1249. }
  1250. }
  1251. cm_id_priv->tid = req_msg->hdr.tid;
  1252. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1253. cm_req_get_local_resp_timeout(req_msg));
  1254. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1255. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1256. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1257. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1258. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1259. cm_id_priv->pkey = req_msg->pkey;
  1260. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1261. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1262. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1263. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1264. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1265. cm_process_work(cm_id_priv, work);
  1266. cm_deref_id(listen_cm_id_priv);
  1267. return 0;
  1268. rejected:
  1269. atomic_dec(&cm_id_priv->refcount);
  1270. cm_deref_id(listen_cm_id_priv);
  1271. destroy:
  1272. ib_destroy_cm_id(cm_id);
  1273. return ret;
  1274. }
  1275. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1276. struct cm_id_private *cm_id_priv,
  1277. struct ib_cm_rep_param *param)
  1278. {
  1279. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1280. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1281. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1282. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1283. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1284. rep_msg->resp_resources = param->responder_resources;
  1285. rep_msg->initiator_depth = param->initiator_depth;
  1286. cm_rep_set_target_ack_delay(rep_msg,
  1287. cm_id_priv->av.port->cm_dev->ack_delay);
  1288. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1289. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1290. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1291. cm_rep_set_srq(rep_msg, param->srq);
  1292. rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
  1293. if (param->private_data && param->private_data_len)
  1294. memcpy(rep_msg->private_data, param->private_data,
  1295. param->private_data_len);
  1296. }
  1297. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1298. struct ib_cm_rep_param *param)
  1299. {
  1300. struct cm_id_private *cm_id_priv;
  1301. struct ib_mad_send_buf *msg;
  1302. struct cm_rep_msg *rep_msg;
  1303. unsigned long flags;
  1304. int ret;
  1305. if (param->private_data &&
  1306. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1307. return -EINVAL;
  1308. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1309. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1310. if (cm_id->state != IB_CM_REQ_RCVD &&
  1311. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1312. ret = -EINVAL;
  1313. goto out;
  1314. }
  1315. ret = cm_alloc_msg(cm_id_priv, &msg);
  1316. if (ret)
  1317. goto out;
  1318. rep_msg = (struct cm_rep_msg *) msg->mad;
  1319. cm_format_rep(rep_msg, cm_id_priv, param);
  1320. msg->timeout_ms = cm_id_priv->timeout_ms;
  1321. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1322. ret = ib_post_send_mad(msg, NULL);
  1323. if (ret) {
  1324. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1325. cm_free_msg(msg);
  1326. return ret;
  1327. }
  1328. cm_id->state = IB_CM_REP_SENT;
  1329. cm_id_priv->msg = msg;
  1330. cm_id_priv->initiator_depth = param->initiator_depth;
  1331. cm_id_priv->responder_resources = param->responder_resources;
  1332. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1333. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1334. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1335. return ret;
  1336. }
  1337. EXPORT_SYMBOL(ib_send_cm_rep);
  1338. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1339. struct cm_id_private *cm_id_priv,
  1340. const void *private_data,
  1341. u8 private_data_len)
  1342. {
  1343. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1344. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1345. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1346. if (private_data && private_data_len)
  1347. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1348. }
  1349. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1350. const void *private_data,
  1351. u8 private_data_len)
  1352. {
  1353. struct cm_id_private *cm_id_priv;
  1354. struct ib_mad_send_buf *msg;
  1355. unsigned long flags;
  1356. void *data;
  1357. int ret;
  1358. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1359. return -EINVAL;
  1360. data = cm_copy_private_data(private_data, private_data_len);
  1361. if (IS_ERR(data))
  1362. return PTR_ERR(data);
  1363. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1364. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1365. if (cm_id->state != IB_CM_REP_RCVD &&
  1366. cm_id->state != IB_CM_MRA_REP_SENT) {
  1367. ret = -EINVAL;
  1368. goto error;
  1369. }
  1370. ret = cm_alloc_msg(cm_id_priv, &msg);
  1371. if (ret)
  1372. goto error;
  1373. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1374. private_data, private_data_len);
  1375. ret = ib_post_send_mad(msg, NULL);
  1376. if (ret) {
  1377. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1378. cm_free_msg(msg);
  1379. kfree(data);
  1380. return ret;
  1381. }
  1382. cm_id->state = IB_CM_ESTABLISHED;
  1383. cm_set_private_data(cm_id_priv, data, private_data_len);
  1384. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1385. return 0;
  1386. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1387. kfree(data);
  1388. return ret;
  1389. }
  1390. EXPORT_SYMBOL(ib_send_cm_rtu);
  1391. static void cm_format_rep_event(struct cm_work *work)
  1392. {
  1393. struct cm_rep_msg *rep_msg;
  1394. struct ib_cm_rep_event_param *param;
  1395. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1396. param = &work->cm_event.param.rep_rcvd;
  1397. param->remote_ca_guid = rep_msg->local_ca_guid;
  1398. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1399. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1400. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1401. param->responder_resources = rep_msg->initiator_depth;
  1402. param->initiator_depth = rep_msg->resp_resources;
  1403. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1404. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1405. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1406. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1407. param->srq = cm_rep_get_srq(rep_msg);
  1408. work->cm_event.private_data = &rep_msg->private_data;
  1409. }
  1410. static void cm_dup_rep_handler(struct cm_work *work)
  1411. {
  1412. struct cm_id_private *cm_id_priv;
  1413. struct cm_rep_msg *rep_msg;
  1414. struct ib_mad_send_buf *msg = NULL;
  1415. int ret;
  1416. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1417. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1418. rep_msg->local_comm_id);
  1419. if (!cm_id_priv)
  1420. return;
  1421. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1422. if (ret)
  1423. goto deref;
  1424. spin_lock_irq(&cm_id_priv->lock);
  1425. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1426. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1427. cm_id_priv->private_data,
  1428. cm_id_priv->private_data_len);
  1429. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1430. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1431. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1432. cm_id_priv->private_data,
  1433. cm_id_priv->private_data_len);
  1434. else
  1435. goto unlock;
  1436. spin_unlock_irq(&cm_id_priv->lock);
  1437. ret = ib_post_send_mad(msg, NULL);
  1438. if (ret)
  1439. goto free;
  1440. goto deref;
  1441. unlock: spin_unlock_irq(&cm_id_priv->lock);
  1442. free: cm_free_msg(msg);
  1443. deref: cm_deref_id(cm_id_priv);
  1444. }
  1445. static int cm_rep_handler(struct cm_work *work)
  1446. {
  1447. struct cm_id_private *cm_id_priv;
  1448. struct cm_rep_msg *rep_msg;
  1449. int ret;
  1450. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1451. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1452. if (!cm_id_priv) {
  1453. cm_dup_rep_handler(work);
  1454. return -EINVAL;
  1455. }
  1456. cm_format_rep_event(work);
  1457. spin_lock_irq(&cm_id_priv->lock);
  1458. switch (cm_id_priv->id.state) {
  1459. case IB_CM_REQ_SENT:
  1460. case IB_CM_MRA_REQ_RCVD:
  1461. break;
  1462. default:
  1463. spin_unlock_irq(&cm_id_priv->lock);
  1464. ret = -EINVAL;
  1465. goto error;
  1466. }
  1467. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1468. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1469. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1470. spin_lock(&cm.lock);
  1471. /* Check for duplicate REP. */
  1472. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1473. spin_unlock(&cm.lock);
  1474. spin_unlock_irq(&cm_id_priv->lock);
  1475. ret = -EINVAL;
  1476. goto error;
  1477. }
  1478. /* Check for a stale connection. */
  1479. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1480. rb_erase(&cm_id_priv->timewait_info->remote_id_node,
  1481. &cm.remote_id_table);
  1482. cm_id_priv->timewait_info->inserted_remote_id = 0;
  1483. spin_unlock(&cm.lock);
  1484. spin_unlock_irq(&cm_id_priv->lock);
  1485. cm_issue_rej(work->port, work->mad_recv_wc,
  1486. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1487. NULL, 0);
  1488. ret = -EINVAL;
  1489. goto error;
  1490. }
  1491. spin_unlock(&cm.lock);
  1492. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1493. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1494. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1495. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1496. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1497. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1498. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1499. cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1500. cm_id_priv->av.timeout =
  1501. cm_ack_timeout(cm_id_priv->target_ack_delay,
  1502. cm_id_priv->av.timeout - 1);
  1503. cm_id_priv->alt_av.timeout =
  1504. cm_ack_timeout(cm_id_priv->target_ack_delay,
  1505. cm_id_priv->alt_av.timeout - 1);
  1506. /* todo: handle peer_to_peer */
  1507. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1508. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1509. if (!ret)
  1510. list_add_tail(&work->list, &cm_id_priv->work_list);
  1511. spin_unlock_irq(&cm_id_priv->lock);
  1512. if (ret)
  1513. cm_process_work(cm_id_priv, work);
  1514. else
  1515. cm_deref_id(cm_id_priv);
  1516. return 0;
  1517. error:
  1518. cm_deref_id(cm_id_priv);
  1519. return ret;
  1520. }
  1521. static int cm_establish_handler(struct cm_work *work)
  1522. {
  1523. struct cm_id_private *cm_id_priv;
  1524. int ret;
  1525. /* See comment in cm_establish about lookup. */
  1526. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1527. if (!cm_id_priv)
  1528. return -EINVAL;
  1529. spin_lock_irq(&cm_id_priv->lock);
  1530. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1531. spin_unlock_irq(&cm_id_priv->lock);
  1532. goto out;
  1533. }
  1534. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1535. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1536. if (!ret)
  1537. list_add_tail(&work->list, &cm_id_priv->work_list);
  1538. spin_unlock_irq(&cm_id_priv->lock);
  1539. if (ret)
  1540. cm_process_work(cm_id_priv, work);
  1541. else
  1542. cm_deref_id(cm_id_priv);
  1543. return 0;
  1544. out:
  1545. cm_deref_id(cm_id_priv);
  1546. return -EINVAL;
  1547. }
  1548. static int cm_rtu_handler(struct cm_work *work)
  1549. {
  1550. struct cm_id_private *cm_id_priv;
  1551. struct cm_rtu_msg *rtu_msg;
  1552. int ret;
  1553. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1554. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1555. rtu_msg->local_comm_id);
  1556. if (!cm_id_priv)
  1557. return -EINVAL;
  1558. work->cm_event.private_data = &rtu_msg->private_data;
  1559. spin_lock_irq(&cm_id_priv->lock);
  1560. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1561. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1562. spin_unlock_irq(&cm_id_priv->lock);
  1563. goto out;
  1564. }
  1565. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1566. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1567. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1568. if (!ret)
  1569. list_add_tail(&work->list, &cm_id_priv->work_list);
  1570. spin_unlock_irq(&cm_id_priv->lock);
  1571. if (ret)
  1572. cm_process_work(cm_id_priv, work);
  1573. else
  1574. cm_deref_id(cm_id_priv);
  1575. return 0;
  1576. out:
  1577. cm_deref_id(cm_id_priv);
  1578. return -EINVAL;
  1579. }
  1580. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1581. struct cm_id_private *cm_id_priv,
  1582. const void *private_data,
  1583. u8 private_data_len)
  1584. {
  1585. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1586. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1587. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1588. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1589. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1590. if (private_data && private_data_len)
  1591. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1592. }
  1593. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1594. const void *private_data,
  1595. u8 private_data_len)
  1596. {
  1597. struct cm_id_private *cm_id_priv;
  1598. struct ib_mad_send_buf *msg;
  1599. unsigned long flags;
  1600. int ret;
  1601. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1602. return -EINVAL;
  1603. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1604. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1605. if (cm_id->state != IB_CM_ESTABLISHED) {
  1606. ret = -EINVAL;
  1607. goto out;
  1608. }
  1609. ret = cm_alloc_msg(cm_id_priv, &msg);
  1610. if (ret) {
  1611. cm_enter_timewait(cm_id_priv);
  1612. goto out;
  1613. }
  1614. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1615. private_data, private_data_len);
  1616. msg->timeout_ms = cm_id_priv->timeout_ms;
  1617. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1618. ret = ib_post_send_mad(msg, NULL);
  1619. if (ret) {
  1620. cm_enter_timewait(cm_id_priv);
  1621. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1622. cm_free_msg(msg);
  1623. return ret;
  1624. }
  1625. cm_id->state = IB_CM_DREQ_SENT;
  1626. cm_id_priv->msg = msg;
  1627. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1628. return ret;
  1629. }
  1630. EXPORT_SYMBOL(ib_send_cm_dreq);
  1631. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1632. struct cm_id_private *cm_id_priv,
  1633. const void *private_data,
  1634. u8 private_data_len)
  1635. {
  1636. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1637. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1638. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1639. if (private_data && private_data_len)
  1640. memcpy(drep_msg->private_data, private_data, private_data_len);
  1641. }
  1642. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1643. const void *private_data,
  1644. u8 private_data_len)
  1645. {
  1646. struct cm_id_private *cm_id_priv;
  1647. struct ib_mad_send_buf *msg;
  1648. unsigned long flags;
  1649. void *data;
  1650. int ret;
  1651. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1652. return -EINVAL;
  1653. data = cm_copy_private_data(private_data, private_data_len);
  1654. if (IS_ERR(data))
  1655. return PTR_ERR(data);
  1656. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1657. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1658. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1659. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1660. kfree(data);
  1661. return -EINVAL;
  1662. }
  1663. cm_set_private_data(cm_id_priv, data, private_data_len);
  1664. cm_enter_timewait(cm_id_priv);
  1665. ret = cm_alloc_msg(cm_id_priv, &msg);
  1666. if (ret)
  1667. goto out;
  1668. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1669. private_data, private_data_len);
  1670. ret = ib_post_send_mad(msg, NULL);
  1671. if (ret) {
  1672. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1673. cm_free_msg(msg);
  1674. return ret;
  1675. }
  1676. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1677. return ret;
  1678. }
  1679. EXPORT_SYMBOL(ib_send_cm_drep);
  1680. static int cm_issue_drep(struct cm_port *port,
  1681. struct ib_mad_recv_wc *mad_recv_wc)
  1682. {
  1683. struct ib_mad_send_buf *msg = NULL;
  1684. struct cm_dreq_msg *dreq_msg;
  1685. struct cm_drep_msg *drep_msg;
  1686. int ret;
  1687. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  1688. if (ret)
  1689. return ret;
  1690. dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
  1691. drep_msg = (struct cm_drep_msg *) msg->mad;
  1692. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
  1693. drep_msg->remote_comm_id = dreq_msg->local_comm_id;
  1694. drep_msg->local_comm_id = dreq_msg->remote_comm_id;
  1695. ret = ib_post_send_mad(msg, NULL);
  1696. if (ret)
  1697. cm_free_msg(msg);
  1698. return ret;
  1699. }
  1700. static int cm_dreq_handler(struct cm_work *work)
  1701. {
  1702. struct cm_id_private *cm_id_priv;
  1703. struct cm_dreq_msg *dreq_msg;
  1704. struct ib_mad_send_buf *msg = NULL;
  1705. int ret;
  1706. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1707. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1708. dreq_msg->local_comm_id);
  1709. if (!cm_id_priv) {
  1710. cm_issue_drep(work->port, work->mad_recv_wc);
  1711. return -EINVAL;
  1712. }
  1713. work->cm_event.private_data = &dreq_msg->private_data;
  1714. spin_lock_irq(&cm_id_priv->lock);
  1715. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1716. goto unlock;
  1717. switch (cm_id_priv->id.state) {
  1718. case IB_CM_REP_SENT:
  1719. case IB_CM_DREQ_SENT:
  1720. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1721. break;
  1722. case IB_CM_ESTABLISHED:
  1723. case IB_CM_MRA_REP_RCVD:
  1724. break;
  1725. case IB_CM_TIMEWAIT:
  1726. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1727. goto unlock;
  1728. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1729. cm_id_priv->private_data,
  1730. cm_id_priv->private_data_len);
  1731. spin_unlock_irq(&cm_id_priv->lock);
  1732. if (ib_post_send_mad(msg, NULL))
  1733. cm_free_msg(msg);
  1734. goto deref;
  1735. default:
  1736. goto unlock;
  1737. }
  1738. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1739. cm_id_priv->tid = dreq_msg->hdr.tid;
  1740. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1741. if (!ret)
  1742. list_add_tail(&work->list, &cm_id_priv->work_list);
  1743. spin_unlock_irq(&cm_id_priv->lock);
  1744. if (ret)
  1745. cm_process_work(cm_id_priv, work);
  1746. else
  1747. cm_deref_id(cm_id_priv);
  1748. return 0;
  1749. unlock: spin_unlock_irq(&cm_id_priv->lock);
  1750. deref: cm_deref_id(cm_id_priv);
  1751. return -EINVAL;
  1752. }
  1753. static int cm_drep_handler(struct cm_work *work)
  1754. {
  1755. struct cm_id_private *cm_id_priv;
  1756. struct cm_drep_msg *drep_msg;
  1757. int ret;
  1758. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1759. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1760. drep_msg->local_comm_id);
  1761. if (!cm_id_priv)
  1762. return -EINVAL;
  1763. work->cm_event.private_data = &drep_msg->private_data;
  1764. spin_lock_irq(&cm_id_priv->lock);
  1765. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1766. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1767. spin_unlock_irq(&cm_id_priv->lock);
  1768. goto out;
  1769. }
  1770. cm_enter_timewait(cm_id_priv);
  1771. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1772. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1773. if (!ret)
  1774. list_add_tail(&work->list, &cm_id_priv->work_list);
  1775. spin_unlock_irq(&cm_id_priv->lock);
  1776. if (ret)
  1777. cm_process_work(cm_id_priv, work);
  1778. else
  1779. cm_deref_id(cm_id_priv);
  1780. return 0;
  1781. out:
  1782. cm_deref_id(cm_id_priv);
  1783. return -EINVAL;
  1784. }
  1785. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1786. enum ib_cm_rej_reason reason,
  1787. void *ari,
  1788. u8 ari_length,
  1789. const void *private_data,
  1790. u8 private_data_len)
  1791. {
  1792. struct cm_id_private *cm_id_priv;
  1793. struct ib_mad_send_buf *msg;
  1794. unsigned long flags;
  1795. int ret;
  1796. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1797. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1798. return -EINVAL;
  1799. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1800. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1801. switch (cm_id->state) {
  1802. case IB_CM_REQ_SENT:
  1803. case IB_CM_MRA_REQ_RCVD:
  1804. case IB_CM_REQ_RCVD:
  1805. case IB_CM_MRA_REQ_SENT:
  1806. case IB_CM_REP_RCVD:
  1807. case IB_CM_MRA_REP_SENT:
  1808. ret = cm_alloc_msg(cm_id_priv, &msg);
  1809. if (!ret)
  1810. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1811. cm_id_priv, reason, ari, ari_length,
  1812. private_data, private_data_len);
  1813. cm_reset_to_idle(cm_id_priv);
  1814. break;
  1815. case IB_CM_REP_SENT:
  1816. case IB_CM_MRA_REP_RCVD:
  1817. ret = cm_alloc_msg(cm_id_priv, &msg);
  1818. if (!ret)
  1819. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1820. cm_id_priv, reason, ari, ari_length,
  1821. private_data, private_data_len);
  1822. cm_enter_timewait(cm_id_priv);
  1823. break;
  1824. default:
  1825. ret = -EINVAL;
  1826. goto out;
  1827. }
  1828. if (ret)
  1829. goto out;
  1830. ret = ib_post_send_mad(msg, NULL);
  1831. if (ret)
  1832. cm_free_msg(msg);
  1833. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1834. return ret;
  1835. }
  1836. EXPORT_SYMBOL(ib_send_cm_rej);
  1837. static void cm_format_rej_event(struct cm_work *work)
  1838. {
  1839. struct cm_rej_msg *rej_msg;
  1840. struct ib_cm_rej_event_param *param;
  1841. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1842. param = &work->cm_event.param.rej_rcvd;
  1843. param->ari = rej_msg->ari;
  1844. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1845. param->reason = __be16_to_cpu(rej_msg->reason);
  1846. work->cm_event.private_data = &rej_msg->private_data;
  1847. }
  1848. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1849. {
  1850. struct cm_timewait_info *timewait_info;
  1851. struct cm_id_private *cm_id_priv;
  1852. __be32 remote_id;
  1853. remote_id = rej_msg->local_comm_id;
  1854. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1855. spin_lock_irq(&cm.lock);
  1856. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1857. remote_id);
  1858. if (!timewait_info) {
  1859. spin_unlock_irq(&cm.lock);
  1860. return NULL;
  1861. }
  1862. cm_id_priv = idr_find(&cm.local_id_table, (__force int)
  1863. (timewait_info->work.local_id ^
  1864. cm.random_id_operand));
  1865. if (cm_id_priv) {
  1866. if (cm_id_priv->id.remote_id == remote_id)
  1867. atomic_inc(&cm_id_priv->refcount);
  1868. else
  1869. cm_id_priv = NULL;
  1870. }
  1871. spin_unlock_irq(&cm.lock);
  1872. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1873. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1874. else
  1875. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1876. return cm_id_priv;
  1877. }
  1878. static int cm_rej_handler(struct cm_work *work)
  1879. {
  1880. struct cm_id_private *cm_id_priv;
  1881. struct cm_rej_msg *rej_msg;
  1882. int ret;
  1883. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1884. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1885. if (!cm_id_priv)
  1886. return -EINVAL;
  1887. cm_format_rej_event(work);
  1888. spin_lock_irq(&cm_id_priv->lock);
  1889. switch (cm_id_priv->id.state) {
  1890. case IB_CM_REQ_SENT:
  1891. case IB_CM_MRA_REQ_RCVD:
  1892. case IB_CM_REP_SENT:
  1893. case IB_CM_MRA_REP_RCVD:
  1894. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1895. /* fall through */
  1896. case IB_CM_REQ_RCVD:
  1897. case IB_CM_MRA_REQ_SENT:
  1898. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1899. cm_enter_timewait(cm_id_priv);
  1900. else
  1901. cm_reset_to_idle(cm_id_priv);
  1902. break;
  1903. case IB_CM_DREQ_SENT:
  1904. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1905. /* fall through */
  1906. case IB_CM_REP_RCVD:
  1907. case IB_CM_MRA_REP_SENT:
  1908. case IB_CM_ESTABLISHED:
  1909. cm_enter_timewait(cm_id_priv);
  1910. break;
  1911. default:
  1912. spin_unlock_irq(&cm_id_priv->lock);
  1913. ret = -EINVAL;
  1914. goto out;
  1915. }
  1916. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1917. if (!ret)
  1918. list_add_tail(&work->list, &cm_id_priv->work_list);
  1919. spin_unlock_irq(&cm_id_priv->lock);
  1920. if (ret)
  1921. cm_process_work(cm_id_priv, work);
  1922. else
  1923. cm_deref_id(cm_id_priv);
  1924. return 0;
  1925. out:
  1926. cm_deref_id(cm_id_priv);
  1927. return -EINVAL;
  1928. }
  1929. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1930. u8 service_timeout,
  1931. const void *private_data,
  1932. u8 private_data_len)
  1933. {
  1934. struct cm_id_private *cm_id_priv;
  1935. struct ib_mad_send_buf *msg;
  1936. enum ib_cm_state cm_state;
  1937. enum ib_cm_lap_state lap_state;
  1938. enum cm_msg_response msg_response;
  1939. void *data;
  1940. unsigned long flags;
  1941. int ret;
  1942. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1943. return -EINVAL;
  1944. data = cm_copy_private_data(private_data, private_data_len);
  1945. if (IS_ERR(data))
  1946. return PTR_ERR(data);
  1947. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1948. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1949. switch(cm_id_priv->id.state) {
  1950. case IB_CM_REQ_RCVD:
  1951. cm_state = IB_CM_MRA_REQ_SENT;
  1952. lap_state = cm_id->lap_state;
  1953. msg_response = CM_MSG_RESPONSE_REQ;
  1954. break;
  1955. case IB_CM_REP_RCVD:
  1956. cm_state = IB_CM_MRA_REP_SENT;
  1957. lap_state = cm_id->lap_state;
  1958. msg_response = CM_MSG_RESPONSE_REP;
  1959. break;
  1960. case IB_CM_ESTABLISHED:
  1961. cm_state = cm_id->state;
  1962. lap_state = IB_CM_MRA_LAP_SENT;
  1963. msg_response = CM_MSG_RESPONSE_OTHER;
  1964. break;
  1965. default:
  1966. ret = -EINVAL;
  1967. goto error1;
  1968. }
  1969. if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
  1970. ret = cm_alloc_msg(cm_id_priv, &msg);
  1971. if (ret)
  1972. goto error1;
  1973. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1974. msg_response, service_timeout,
  1975. private_data, private_data_len);
  1976. ret = ib_post_send_mad(msg, NULL);
  1977. if (ret)
  1978. goto error2;
  1979. }
  1980. cm_id->state = cm_state;
  1981. cm_id->lap_state = lap_state;
  1982. cm_id_priv->service_timeout = service_timeout;
  1983. cm_set_private_data(cm_id_priv, data, private_data_len);
  1984. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1985. return 0;
  1986. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1987. kfree(data);
  1988. return ret;
  1989. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1990. kfree(data);
  1991. cm_free_msg(msg);
  1992. return ret;
  1993. }
  1994. EXPORT_SYMBOL(ib_send_cm_mra);
  1995. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1996. {
  1997. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1998. case CM_MSG_RESPONSE_REQ:
  1999. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  2000. case CM_MSG_RESPONSE_REP:
  2001. case CM_MSG_RESPONSE_OTHER:
  2002. return cm_acquire_id(mra_msg->remote_comm_id,
  2003. mra_msg->local_comm_id);
  2004. default:
  2005. return NULL;
  2006. }
  2007. }
  2008. static int cm_mra_handler(struct cm_work *work)
  2009. {
  2010. struct cm_id_private *cm_id_priv;
  2011. struct cm_mra_msg *mra_msg;
  2012. int timeout, ret;
  2013. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  2014. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  2015. if (!cm_id_priv)
  2016. return -EINVAL;
  2017. work->cm_event.private_data = &mra_msg->private_data;
  2018. work->cm_event.param.mra_rcvd.service_timeout =
  2019. cm_mra_get_service_timeout(mra_msg);
  2020. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  2021. cm_convert_to_ms(cm_id_priv->av.timeout);
  2022. spin_lock_irq(&cm_id_priv->lock);
  2023. switch (cm_id_priv->id.state) {
  2024. case IB_CM_REQ_SENT:
  2025. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  2026. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2027. cm_id_priv->msg, timeout))
  2028. goto out;
  2029. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  2030. break;
  2031. case IB_CM_REP_SENT:
  2032. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  2033. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2034. cm_id_priv->msg, timeout))
  2035. goto out;
  2036. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  2037. break;
  2038. case IB_CM_ESTABLISHED:
  2039. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  2040. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  2041. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2042. cm_id_priv->msg, timeout))
  2043. goto out;
  2044. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  2045. break;
  2046. default:
  2047. goto out;
  2048. }
  2049. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  2050. cm_id_priv->id.state;
  2051. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2052. if (!ret)
  2053. list_add_tail(&work->list, &cm_id_priv->work_list);
  2054. spin_unlock_irq(&cm_id_priv->lock);
  2055. if (ret)
  2056. cm_process_work(cm_id_priv, work);
  2057. else
  2058. cm_deref_id(cm_id_priv);
  2059. return 0;
  2060. out:
  2061. spin_unlock_irq(&cm_id_priv->lock);
  2062. cm_deref_id(cm_id_priv);
  2063. return -EINVAL;
  2064. }
  2065. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  2066. struct cm_id_private *cm_id_priv,
  2067. struct ib_sa_path_rec *alternate_path,
  2068. const void *private_data,
  2069. u8 private_data_len)
  2070. {
  2071. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  2072. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  2073. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  2074. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2075. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  2076. /* todo: need remote CM response timeout */
  2077. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  2078. lap_msg->alt_local_lid = alternate_path->slid;
  2079. lap_msg->alt_remote_lid = alternate_path->dlid;
  2080. lap_msg->alt_local_gid = alternate_path->sgid;
  2081. lap_msg->alt_remote_gid = alternate_path->dgid;
  2082. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  2083. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  2084. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  2085. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  2086. cm_lap_set_sl(lap_msg, alternate_path->sl);
  2087. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  2088. cm_lap_set_local_ack_timeout(lap_msg,
  2089. cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
  2090. alternate_path->packet_life_time));
  2091. if (private_data && private_data_len)
  2092. memcpy(lap_msg->private_data, private_data, private_data_len);
  2093. }
  2094. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  2095. struct ib_sa_path_rec *alternate_path,
  2096. const void *private_data,
  2097. u8 private_data_len)
  2098. {
  2099. struct cm_id_private *cm_id_priv;
  2100. struct ib_mad_send_buf *msg;
  2101. unsigned long flags;
  2102. int ret;
  2103. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  2104. return -EINVAL;
  2105. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2106. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2107. if (cm_id->state != IB_CM_ESTABLISHED ||
  2108. (cm_id->lap_state != IB_CM_LAP_UNINIT &&
  2109. cm_id->lap_state != IB_CM_LAP_IDLE)) {
  2110. ret = -EINVAL;
  2111. goto out;
  2112. }
  2113. ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
  2114. if (ret)
  2115. goto out;
  2116. cm_id_priv->alt_av.timeout =
  2117. cm_ack_timeout(cm_id_priv->target_ack_delay,
  2118. cm_id_priv->alt_av.timeout - 1);
  2119. ret = cm_alloc_msg(cm_id_priv, &msg);
  2120. if (ret)
  2121. goto out;
  2122. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2123. alternate_path, private_data, private_data_len);
  2124. msg->timeout_ms = cm_id_priv->timeout_ms;
  2125. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2126. ret = ib_post_send_mad(msg, NULL);
  2127. if (ret) {
  2128. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2129. cm_free_msg(msg);
  2130. return ret;
  2131. }
  2132. cm_id->lap_state = IB_CM_LAP_SENT;
  2133. cm_id_priv->msg = msg;
  2134. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2135. return ret;
  2136. }
  2137. EXPORT_SYMBOL(ib_send_cm_lap);
  2138. static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
  2139. struct ib_sa_path_rec *path,
  2140. struct cm_lap_msg *lap_msg)
  2141. {
  2142. memset(path, 0, sizeof *path);
  2143. path->dgid = lap_msg->alt_local_gid;
  2144. path->sgid = lap_msg->alt_remote_gid;
  2145. path->dlid = lap_msg->alt_local_lid;
  2146. path->slid = lap_msg->alt_remote_lid;
  2147. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2148. path->hop_limit = lap_msg->alt_hop_limit;
  2149. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2150. path->reversible = 1;
  2151. path->pkey = cm_id_priv->pkey;
  2152. path->sl = cm_lap_get_sl(lap_msg);
  2153. path->mtu_selector = IB_SA_EQ;
  2154. path->mtu = cm_id_priv->path_mtu;
  2155. path->rate_selector = IB_SA_EQ;
  2156. path->rate = cm_lap_get_packet_rate(lap_msg);
  2157. path->packet_life_time_selector = IB_SA_EQ;
  2158. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2159. path->packet_life_time -= (path->packet_life_time > 0);
  2160. }
  2161. static int cm_lap_handler(struct cm_work *work)
  2162. {
  2163. struct cm_id_private *cm_id_priv;
  2164. struct cm_lap_msg *lap_msg;
  2165. struct ib_cm_lap_event_param *param;
  2166. struct ib_mad_send_buf *msg = NULL;
  2167. int ret;
  2168. /* todo: verify LAP request and send reject APR if invalid. */
  2169. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2170. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2171. lap_msg->local_comm_id);
  2172. if (!cm_id_priv)
  2173. return -EINVAL;
  2174. param = &work->cm_event.param.lap_rcvd;
  2175. param->alternate_path = &work->path[0];
  2176. cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
  2177. work->cm_event.private_data = &lap_msg->private_data;
  2178. spin_lock_irq(&cm_id_priv->lock);
  2179. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2180. goto unlock;
  2181. switch (cm_id_priv->id.lap_state) {
  2182. case IB_CM_LAP_UNINIT:
  2183. case IB_CM_LAP_IDLE:
  2184. break;
  2185. case IB_CM_MRA_LAP_SENT:
  2186. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2187. goto unlock;
  2188. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2189. CM_MSG_RESPONSE_OTHER,
  2190. cm_id_priv->service_timeout,
  2191. cm_id_priv->private_data,
  2192. cm_id_priv->private_data_len);
  2193. spin_unlock_irq(&cm_id_priv->lock);
  2194. if (ib_post_send_mad(msg, NULL))
  2195. cm_free_msg(msg);
  2196. goto deref;
  2197. default:
  2198. goto unlock;
  2199. }
  2200. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2201. cm_id_priv->tid = lap_msg->hdr.tid;
  2202. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2203. work->mad_recv_wc->recv_buf.grh,
  2204. &cm_id_priv->av);
  2205. cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
  2206. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2207. if (!ret)
  2208. list_add_tail(&work->list, &cm_id_priv->work_list);
  2209. spin_unlock_irq(&cm_id_priv->lock);
  2210. if (ret)
  2211. cm_process_work(cm_id_priv, work);
  2212. else
  2213. cm_deref_id(cm_id_priv);
  2214. return 0;
  2215. unlock: spin_unlock_irq(&cm_id_priv->lock);
  2216. deref: cm_deref_id(cm_id_priv);
  2217. return -EINVAL;
  2218. }
  2219. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2220. struct cm_id_private *cm_id_priv,
  2221. enum ib_cm_apr_status status,
  2222. void *info,
  2223. u8 info_length,
  2224. const void *private_data,
  2225. u8 private_data_len)
  2226. {
  2227. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2228. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2229. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2230. apr_msg->ap_status = (u8) status;
  2231. if (info && info_length) {
  2232. apr_msg->info_length = info_length;
  2233. memcpy(apr_msg->info, info, info_length);
  2234. }
  2235. if (private_data && private_data_len)
  2236. memcpy(apr_msg->private_data, private_data, private_data_len);
  2237. }
  2238. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2239. enum ib_cm_apr_status status,
  2240. void *info,
  2241. u8 info_length,
  2242. const void *private_data,
  2243. u8 private_data_len)
  2244. {
  2245. struct cm_id_private *cm_id_priv;
  2246. struct ib_mad_send_buf *msg;
  2247. unsigned long flags;
  2248. int ret;
  2249. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2250. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2251. return -EINVAL;
  2252. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2253. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2254. if (cm_id->state != IB_CM_ESTABLISHED ||
  2255. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2256. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2257. ret = -EINVAL;
  2258. goto out;
  2259. }
  2260. ret = cm_alloc_msg(cm_id_priv, &msg);
  2261. if (ret)
  2262. goto out;
  2263. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2264. info, info_length, private_data, private_data_len);
  2265. ret = ib_post_send_mad(msg, NULL);
  2266. if (ret) {
  2267. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2268. cm_free_msg(msg);
  2269. return ret;
  2270. }
  2271. cm_id->lap_state = IB_CM_LAP_IDLE;
  2272. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2273. return ret;
  2274. }
  2275. EXPORT_SYMBOL(ib_send_cm_apr);
  2276. static int cm_apr_handler(struct cm_work *work)
  2277. {
  2278. struct cm_id_private *cm_id_priv;
  2279. struct cm_apr_msg *apr_msg;
  2280. int ret;
  2281. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2282. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2283. apr_msg->local_comm_id);
  2284. if (!cm_id_priv)
  2285. return -EINVAL; /* Unmatched reply. */
  2286. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2287. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2288. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2289. work->cm_event.private_data = &apr_msg->private_data;
  2290. spin_lock_irq(&cm_id_priv->lock);
  2291. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2292. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2293. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2294. spin_unlock_irq(&cm_id_priv->lock);
  2295. goto out;
  2296. }
  2297. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2298. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2299. cm_id_priv->msg = NULL;
  2300. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2301. if (!ret)
  2302. list_add_tail(&work->list, &cm_id_priv->work_list);
  2303. spin_unlock_irq(&cm_id_priv->lock);
  2304. if (ret)
  2305. cm_process_work(cm_id_priv, work);
  2306. else
  2307. cm_deref_id(cm_id_priv);
  2308. return 0;
  2309. out:
  2310. cm_deref_id(cm_id_priv);
  2311. return -EINVAL;
  2312. }
  2313. static int cm_timewait_handler(struct cm_work *work)
  2314. {
  2315. struct cm_timewait_info *timewait_info;
  2316. struct cm_id_private *cm_id_priv;
  2317. int ret;
  2318. timewait_info = (struct cm_timewait_info *)work;
  2319. spin_lock_irq(&cm.lock);
  2320. list_del(&timewait_info->list);
  2321. spin_unlock_irq(&cm.lock);
  2322. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2323. timewait_info->work.remote_id);
  2324. if (!cm_id_priv)
  2325. return -EINVAL;
  2326. spin_lock_irq(&cm_id_priv->lock);
  2327. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2328. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2329. spin_unlock_irq(&cm_id_priv->lock);
  2330. goto out;
  2331. }
  2332. cm_id_priv->id.state = IB_CM_IDLE;
  2333. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2334. if (!ret)
  2335. list_add_tail(&work->list, &cm_id_priv->work_list);
  2336. spin_unlock_irq(&cm_id_priv->lock);
  2337. if (ret)
  2338. cm_process_work(cm_id_priv, work);
  2339. else
  2340. cm_deref_id(cm_id_priv);
  2341. return 0;
  2342. out:
  2343. cm_deref_id(cm_id_priv);
  2344. return -EINVAL;
  2345. }
  2346. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2347. struct cm_id_private *cm_id_priv,
  2348. struct ib_cm_sidr_req_param *param)
  2349. {
  2350. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2351. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2352. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2353. sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
  2354. sidr_req_msg->service_id = param->service_id;
  2355. if (param->private_data && param->private_data_len)
  2356. memcpy(sidr_req_msg->private_data, param->private_data,
  2357. param->private_data_len);
  2358. }
  2359. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2360. struct ib_cm_sidr_req_param *param)
  2361. {
  2362. struct cm_id_private *cm_id_priv;
  2363. struct ib_mad_send_buf *msg;
  2364. unsigned long flags;
  2365. int ret;
  2366. if (!param->path || (param->private_data &&
  2367. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2368. return -EINVAL;
  2369. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2370. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2371. if (ret)
  2372. goto out;
  2373. cm_id->service_id = param->service_id;
  2374. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2375. cm_id_priv->timeout_ms = param->timeout_ms;
  2376. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2377. ret = cm_alloc_msg(cm_id_priv, &msg);
  2378. if (ret)
  2379. goto out;
  2380. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2381. param);
  2382. msg->timeout_ms = cm_id_priv->timeout_ms;
  2383. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2384. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2385. if (cm_id->state == IB_CM_IDLE)
  2386. ret = ib_post_send_mad(msg, NULL);
  2387. else
  2388. ret = -EINVAL;
  2389. if (ret) {
  2390. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2391. cm_free_msg(msg);
  2392. goto out;
  2393. }
  2394. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2395. cm_id_priv->msg = msg;
  2396. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2397. out:
  2398. return ret;
  2399. }
  2400. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2401. static void cm_format_sidr_req_event(struct cm_work *work,
  2402. struct ib_cm_id *listen_id)
  2403. {
  2404. struct cm_sidr_req_msg *sidr_req_msg;
  2405. struct ib_cm_sidr_req_event_param *param;
  2406. sidr_req_msg = (struct cm_sidr_req_msg *)
  2407. work->mad_recv_wc->recv_buf.mad;
  2408. param = &work->cm_event.param.sidr_req_rcvd;
  2409. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2410. param->listen_id = listen_id;
  2411. param->port = work->port->port_num;
  2412. work->cm_event.private_data = &sidr_req_msg->private_data;
  2413. }
  2414. static int cm_sidr_req_handler(struct cm_work *work)
  2415. {
  2416. struct ib_cm_id *cm_id;
  2417. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2418. struct cm_sidr_req_msg *sidr_req_msg;
  2419. struct ib_wc *wc;
  2420. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2421. if (IS_ERR(cm_id))
  2422. return PTR_ERR(cm_id);
  2423. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2424. /* Record SGID/SLID and request ID for lookup. */
  2425. sidr_req_msg = (struct cm_sidr_req_msg *)
  2426. work->mad_recv_wc->recv_buf.mad;
  2427. wc = work->mad_recv_wc->wc;
  2428. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2429. cm_id_priv->av.dgid.global.interface_id = 0;
  2430. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2431. work->mad_recv_wc->recv_buf.grh,
  2432. &cm_id_priv->av);
  2433. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2434. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2435. atomic_inc(&cm_id_priv->work_count);
  2436. spin_lock_irq(&cm.lock);
  2437. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2438. if (cur_cm_id_priv) {
  2439. spin_unlock_irq(&cm.lock);
  2440. goto out; /* Duplicate message. */
  2441. }
  2442. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2443. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2444. sidr_req_msg->service_id,
  2445. sidr_req_msg->private_data);
  2446. if (!cur_cm_id_priv) {
  2447. spin_unlock_irq(&cm.lock);
  2448. cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
  2449. goto out; /* No match. */
  2450. }
  2451. atomic_inc(&cur_cm_id_priv->refcount);
  2452. spin_unlock_irq(&cm.lock);
  2453. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2454. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2455. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2456. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2457. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2458. cm_process_work(cm_id_priv, work);
  2459. cm_deref_id(cur_cm_id_priv);
  2460. return 0;
  2461. out:
  2462. ib_destroy_cm_id(&cm_id_priv->id);
  2463. return -EINVAL;
  2464. }
  2465. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2466. struct cm_id_private *cm_id_priv,
  2467. struct ib_cm_sidr_rep_param *param)
  2468. {
  2469. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2470. cm_id_priv->tid);
  2471. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2472. sidr_rep_msg->status = param->status;
  2473. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2474. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2475. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2476. if (param->info && param->info_length)
  2477. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2478. if (param->private_data && param->private_data_len)
  2479. memcpy(sidr_rep_msg->private_data, param->private_data,
  2480. param->private_data_len);
  2481. }
  2482. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2483. struct ib_cm_sidr_rep_param *param)
  2484. {
  2485. struct cm_id_private *cm_id_priv;
  2486. struct ib_mad_send_buf *msg;
  2487. unsigned long flags;
  2488. int ret;
  2489. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2490. (param->private_data &&
  2491. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2492. return -EINVAL;
  2493. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2494. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2495. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2496. ret = -EINVAL;
  2497. goto error;
  2498. }
  2499. ret = cm_alloc_msg(cm_id_priv, &msg);
  2500. if (ret)
  2501. goto error;
  2502. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2503. param);
  2504. ret = ib_post_send_mad(msg, NULL);
  2505. if (ret) {
  2506. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2507. cm_free_msg(msg);
  2508. return ret;
  2509. }
  2510. cm_id->state = IB_CM_IDLE;
  2511. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2512. spin_lock_irqsave(&cm.lock, flags);
  2513. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2514. spin_unlock_irqrestore(&cm.lock, flags);
  2515. return 0;
  2516. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2517. return ret;
  2518. }
  2519. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2520. static void cm_format_sidr_rep_event(struct cm_work *work)
  2521. {
  2522. struct cm_sidr_rep_msg *sidr_rep_msg;
  2523. struct ib_cm_sidr_rep_event_param *param;
  2524. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2525. work->mad_recv_wc->recv_buf.mad;
  2526. param = &work->cm_event.param.sidr_rep_rcvd;
  2527. param->status = sidr_rep_msg->status;
  2528. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2529. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2530. param->info = &sidr_rep_msg->info;
  2531. param->info_len = sidr_rep_msg->info_length;
  2532. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2533. }
  2534. static int cm_sidr_rep_handler(struct cm_work *work)
  2535. {
  2536. struct cm_sidr_rep_msg *sidr_rep_msg;
  2537. struct cm_id_private *cm_id_priv;
  2538. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2539. work->mad_recv_wc->recv_buf.mad;
  2540. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2541. if (!cm_id_priv)
  2542. return -EINVAL; /* Unmatched reply. */
  2543. spin_lock_irq(&cm_id_priv->lock);
  2544. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2545. spin_unlock_irq(&cm_id_priv->lock);
  2546. goto out;
  2547. }
  2548. cm_id_priv->id.state = IB_CM_IDLE;
  2549. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2550. spin_unlock_irq(&cm_id_priv->lock);
  2551. cm_format_sidr_rep_event(work);
  2552. cm_process_work(cm_id_priv, work);
  2553. return 0;
  2554. out:
  2555. cm_deref_id(cm_id_priv);
  2556. return -EINVAL;
  2557. }
  2558. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2559. enum ib_wc_status wc_status)
  2560. {
  2561. struct cm_id_private *cm_id_priv;
  2562. struct ib_cm_event cm_event;
  2563. enum ib_cm_state state;
  2564. int ret;
  2565. memset(&cm_event, 0, sizeof cm_event);
  2566. cm_id_priv = msg->context[0];
  2567. /* Discard old sends or ones without a response. */
  2568. spin_lock_irq(&cm_id_priv->lock);
  2569. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2570. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2571. goto discard;
  2572. switch (state) {
  2573. case IB_CM_REQ_SENT:
  2574. case IB_CM_MRA_REQ_RCVD:
  2575. cm_reset_to_idle(cm_id_priv);
  2576. cm_event.event = IB_CM_REQ_ERROR;
  2577. break;
  2578. case IB_CM_REP_SENT:
  2579. case IB_CM_MRA_REP_RCVD:
  2580. cm_reset_to_idle(cm_id_priv);
  2581. cm_event.event = IB_CM_REP_ERROR;
  2582. break;
  2583. case IB_CM_DREQ_SENT:
  2584. cm_enter_timewait(cm_id_priv);
  2585. cm_event.event = IB_CM_DREQ_ERROR;
  2586. break;
  2587. case IB_CM_SIDR_REQ_SENT:
  2588. cm_id_priv->id.state = IB_CM_IDLE;
  2589. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2590. break;
  2591. default:
  2592. goto discard;
  2593. }
  2594. spin_unlock_irq(&cm_id_priv->lock);
  2595. cm_event.param.send_status = wc_status;
  2596. /* No other events can occur on the cm_id at this point. */
  2597. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2598. cm_free_msg(msg);
  2599. if (ret)
  2600. ib_destroy_cm_id(&cm_id_priv->id);
  2601. return;
  2602. discard:
  2603. spin_unlock_irq(&cm_id_priv->lock);
  2604. cm_free_msg(msg);
  2605. }
  2606. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2607. struct ib_mad_send_wc *mad_send_wc)
  2608. {
  2609. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  2610. switch (mad_send_wc->status) {
  2611. case IB_WC_SUCCESS:
  2612. case IB_WC_WR_FLUSH_ERR:
  2613. cm_free_msg(msg);
  2614. break;
  2615. default:
  2616. if (msg->context[0] && msg->context[1])
  2617. cm_process_send_error(msg, mad_send_wc->status);
  2618. else
  2619. cm_free_msg(msg);
  2620. break;
  2621. }
  2622. }
  2623. static void cm_work_handler(struct work_struct *_work)
  2624. {
  2625. struct cm_work *work = container_of(_work, struct cm_work, work.work);
  2626. int ret;
  2627. switch (work->cm_event.event) {
  2628. case IB_CM_REQ_RECEIVED:
  2629. ret = cm_req_handler(work);
  2630. break;
  2631. case IB_CM_MRA_RECEIVED:
  2632. ret = cm_mra_handler(work);
  2633. break;
  2634. case IB_CM_REJ_RECEIVED:
  2635. ret = cm_rej_handler(work);
  2636. break;
  2637. case IB_CM_REP_RECEIVED:
  2638. ret = cm_rep_handler(work);
  2639. break;
  2640. case IB_CM_RTU_RECEIVED:
  2641. ret = cm_rtu_handler(work);
  2642. break;
  2643. case IB_CM_USER_ESTABLISHED:
  2644. ret = cm_establish_handler(work);
  2645. break;
  2646. case IB_CM_DREQ_RECEIVED:
  2647. ret = cm_dreq_handler(work);
  2648. break;
  2649. case IB_CM_DREP_RECEIVED:
  2650. ret = cm_drep_handler(work);
  2651. break;
  2652. case IB_CM_SIDR_REQ_RECEIVED:
  2653. ret = cm_sidr_req_handler(work);
  2654. break;
  2655. case IB_CM_SIDR_REP_RECEIVED:
  2656. ret = cm_sidr_rep_handler(work);
  2657. break;
  2658. case IB_CM_LAP_RECEIVED:
  2659. ret = cm_lap_handler(work);
  2660. break;
  2661. case IB_CM_APR_RECEIVED:
  2662. ret = cm_apr_handler(work);
  2663. break;
  2664. case IB_CM_TIMEWAIT_EXIT:
  2665. ret = cm_timewait_handler(work);
  2666. break;
  2667. default:
  2668. ret = -EINVAL;
  2669. break;
  2670. }
  2671. if (ret)
  2672. cm_free_work(work);
  2673. }
  2674. static int cm_establish(struct ib_cm_id *cm_id)
  2675. {
  2676. struct cm_id_private *cm_id_priv;
  2677. struct cm_work *work;
  2678. unsigned long flags;
  2679. int ret = 0;
  2680. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2681. if (!work)
  2682. return -ENOMEM;
  2683. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2684. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2685. switch (cm_id->state)
  2686. {
  2687. case IB_CM_REP_SENT:
  2688. case IB_CM_MRA_REP_RCVD:
  2689. cm_id->state = IB_CM_ESTABLISHED;
  2690. break;
  2691. case IB_CM_ESTABLISHED:
  2692. ret = -EISCONN;
  2693. break;
  2694. default:
  2695. ret = -EINVAL;
  2696. break;
  2697. }
  2698. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2699. if (ret) {
  2700. kfree(work);
  2701. goto out;
  2702. }
  2703. /*
  2704. * The CM worker thread may try to destroy the cm_id before it
  2705. * can execute this work item. To prevent potential deadlock,
  2706. * we need to find the cm_id once we're in the context of the
  2707. * worker thread, rather than holding a reference on it.
  2708. */
  2709. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  2710. work->local_id = cm_id->local_id;
  2711. work->remote_id = cm_id->remote_id;
  2712. work->mad_recv_wc = NULL;
  2713. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2714. queue_delayed_work(cm.wq, &work->work, 0);
  2715. out:
  2716. return ret;
  2717. }
  2718. static int cm_migrate(struct ib_cm_id *cm_id)
  2719. {
  2720. struct cm_id_private *cm_id_priv;
  2721. unsigned long flags;
  2722. int ret = 0;
  2723. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2724. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2725. if (cm_id->state == IB_CM_ESTABLISHED &&
  2726. (cm_id->lap_state == IB_CM_LAP_UNINIT ||
  2727. cm_id->lap_state == IB_CM_LAP_IDLE)) {
  2728. cm_id->lap_state = IB_CM_LAP_IDLE;
  2729. cm_id_priv->av = cm_id_priv->alt_av;
  2730. } else
  2731. ret = -EINVAL;
  2732. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2733. return ret;
  2734. }
  2735. int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
  2736. {
  2737. int ret;
  2738. switch (event) {
  2739. case IB_EVENT_COMM_EST:
  2740. ret = cm_establish(cm_id);
  2741. break;
  2742. case IB_EVENT_PATH_MIG:
  2743. ret = cm_migrate(cm_id);
  2744. break;
  2745. default:
  2746. ret = -EINVAL;
  2747. }
  2748. return ret;
  2749. }
  2750. EXPORT_SYMBOL(ib_cm_notify);
  2751. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2752. struct ib_mad_recv_wc *mad_recv_wc)
  2753. {
  2754. struct cm_work *work;
  2755. enum ib_cm_event_type event;
  2756. int paths = 0;
  2757. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2758. case CM_REQ_ATTR_ID:
  2759. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2760. alt_local_lid != 0);
  2761. event = IB_CM_REQ_RECEIVED;
  2762. break;
  2763. case CM_MRA_ATTR_ID:
  2764. event = IB_CM_MRA_RECEIVED;
  2765. break;
  2766. case CM_REJ_ATTR_ID:
  2767. event = IB_CM_REJ_RECEIVED;
  2768. break;
  2769. case CM_REP_ATTR_ID:
  2770. event = IB_CM_REP_RECEIVED;
  2771. break;
  2772. case CM_RTU_ATTR_ID:
  2773. event = IB_CM_RTU_RECEIVED;
  2774. break;
  2775. case CM_DREQ_ATTR_ID:
  2776. event = IB_CM_DREQ_RECEIVED;
  2777. break;
  2778. case CM_DREP_ATTR_ID:
  2779. event = IB_CM_DREP_RECEIVED;
  2780. break;
  2781. case CM_SIDR_REQ_ATTR_ID:
  2782. event = IB_CM_SIDR_REQ_RECEIVED;
  2783. break;
  2784. case CM_SIDR_REP_ATTR_ID:
  2785. event = IB_CM_SIDR_REP_RECEIVED;
  2786. break;
  2787. case CM_LAP_ATTR_ID:
  2788. paths = 1;
  2789. event = IB_CM_LAP_RECEIVED;
  2790. break;
  2791. case CM_APR_ATTR_ID:
  2792. event = IB_CM_APR_RECEIVED;
  2793. break;
  2794. default:
  2795. ib_free_recv_mad(mad_recv_wc);
  2796. return;
  2797. }
  2798. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2799. GFP_KERNEL);
  2800. if (!work) {
  2801. ib_free_recv_mad(mad_recv_wc);
  2802. return;
  2803. }
  2804. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  2805. work->cm_event.event = event;
  2806. work->mad_recv_wc = mad_recv_wc;
  2807. work->port = (struct cm_port *)mad_agent->context;
  2808. queue_delayed_work(cm.wq, &work->work, 0);
  2809. }
  2810. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2811. struct ib_qp_attr *qp_attr,
  2812. int *qp_attr_mask)
  2813. {
  2814. unsigned long flags;
  2815. int ret;
  2816. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2817. switch (cm_id_priv->id.state) {
  2818. case IB_CM_REQ_SENT:
  2819. case IB_CM_MRA_REQ_RCVD:
  2820. case IB_CM_REQ_RCVD:
  2821. case IB_CM_MRA_REQ_SENT:
  2822. case IB_CM_REP_RCVD:
  2823. case IB_CM_MRA_REP_SENT:
  2824. case IB_CM_REP_SENT:
  2825. case IB_CM_MRA_REP_RCVD:
  2826. case IB_CM_ESTABLISHED:
  2827. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2828. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2829. qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
  2830. if (cm_id_priv->responder_resources)
  2831. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
  2832. IB_ACCESS_REMOTE_ATOMIC;
  2833. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2834. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2835. ret = 0;
  2836. break;
  2837. default:
  2838. ret = -EINVAL;
  2839. break;
  2840. }
  2841. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2842. return ret;
  2843. }
  2844. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2845. struct ib_qp_attr *qp_attr,
  2846. int *qp_attr_mask)
  2847. {
  2848. unsigned long flags;
  2849. int ret;
  2850. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2851. switch (cm_id_priv->id.state) {
  2852. case IB_CM_REQ_RCVD:
  2853. case IB_CM_MRA_REQ_SENT:
  2854. case IB_CM_REP_RCVD:
  2855. case IB_CM_MRA_REP_SENT:
  2856. case IB_CM_REP_SENT:
  2857. case IB_CM_MRA_REP_RCVD:
  2858. case IB_CM_ESTABLISHED:
  2859. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2860. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  2861. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2862. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2863. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2864. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2865. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2866. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  2867. IB_QP_MIN_RNR_TIMER;
  2868. qp_attr->max_dest_rd_atomic =
  2869. cm_id_priv->responder_resources;
  2870. qp_attr->min_rnr_timer = 0;
  2871. }
  2872. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2873. *qp_attr_mask |= IB_QP_ALT_PATH;
  2874. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  2875. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  2876. qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
  2877. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2878. }
  2879. ret = 0;
  2880. break;
  2881. default:
  2882. ret = -EINVAL;
  2883. break;
  2884. }
  2885. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2886. return ret;
  2887. }
  2888. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2889. struct ib_qp_attr *qp_attr,
  2890. int *qp_attr_mask)
  2891. {
  2892. unsigned long flags;
  2893. int ret;
  2894. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2895. switch (cm_id_priv->id.state) {
  2896. /* Allow transition to RTS before sending REP */
  2897. case IB_CM_REQ_RCVD:
  2898. case IB_CM_MRA_REQ_SENT:
  2899. case IB_CM_REP_RCVD:
  2900. case IB_CM_MRA_REP_SENT:
  2901. case IB_CM_REP_SENT:
  2902. case IB_CM_MRA_REP_RCVD:
  2903. case IB_CM_ESTABLISHED:
  2904. if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
  2905. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  2906. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2907. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2908. *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2909. IB_QP_RNR_RETRY |
  2910. IB_QP_MAX_QP_RD_ATOMIC;
  2911. qp_attr->timeout = cm_id_priv->av.timeout;
  2912. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2913. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2914. qp_attr->max_rd_atomic =
  2915. cm_id_priv->initiator_depth;
  2916. }
  2917. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2918. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2919. qp_attr->path_mig_state = IB_MIG_REARM;
  2920. }
  2921. } else {
  2922. *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
  2923. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  2924. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  2925. qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
  2926. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2927. qp_attr->path_mig_state = IB_MIG_REARM;
  2928. }
  2929. ret = 0;
  2930. break;
  2931. default:
  2932. ret = -EINVAL;
  2933. break;
  2934. }
  2935. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2936. return ret;
  2937. }
  2938. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2939. struct ib_qp_attr *qp_attr,
  2940. int *qp_attr_mask)
  2941. {
  2942. struct cm_id_private *cm_id_priv;
  2943. int ret;
  2944. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2945. switch (qp_attr->qp_state) {
  2946. case IB_QPS_INIT:
  2947. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2948. break;
  2949. case IB_QPS_RTR:
  2950. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2951. break;
  2952. case IB_QPS_RTS:
  2953. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2954. break;
  2955. default:
  2956. ret = -EINVAL;
  2957. break;
  2958. }
  2959. return ret;
  2960. }
  2961. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2962. static void cm_get_ack_delay(struct cm_device *cm_dev)
  2963. {
  2964. struct ib_device_attr attr;
  2965. if (ib_query_device(cm_dev->device, &attr))
  2966. cm_dev->ack_delay = 0; /* acks will rely on packet life time */
  2967. else
  2968. cm_dev->ack_delay = attr.local_ca_ack_delay;
  2969. }
  2970. static void cm_add_one(struct ib_device *device)
  2971. {
  2972. struct cm_device *cm_dev;
  2973. struct cm_port *port;
  2974. struct ib_mad_reg_req reg_req = {
  2975. .mgmt_class = IB_MGMT_CLASS_CM,
  2976. .mgmt_class_version = IB_CM_CLASS_VERSION
  2977. };
  2978. struct ib_port_modify port_modify = {
  2979. .set_port_cap_mask = IB_PORT_CM_SUP
  2980. };
  2981. unsigned long flags;
  2982. int ret;
  2983. u8 i;
  2984. if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
  2985. return;
  2986. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2987. device->phys_port_cnt, GFP_KERNEL);
  2988. if (!cm_dev)
  2989. return;
  2990. cm_dev->device = device;
  2991. cm_get_ack_delay(cm_dev);
  2992. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2993. for (i = 1; i <= device->phys_port_cnt; i++) {
  2994. port = &cm_dev->port[i-1];
  2995. port->cm_dev = cm_dev;
  2996. port->port_num = i;
  2997. port->mad_agent = ib_register_mad_agent(device, i,
  2998. IB_QPT_GSI,
  2999. &reg_req,
  3000. 0,
  3001. cm_send_handler,
  3002. cm_recv_handler,
  3003. port);
  3004. if (IS_ERR(port->mad_agent))
  3005. goto error1;
  3006. ret = ib_modify_port(device, i, 0, &port_modify);
  3007. if (ret)
  3008. goto error2;
  3009. }
  3010. ib_set_client_data(device, &cm_client, cm_dev);
  3011. write_lock_irqsave(&cm.device_lock, flags);
  3012. list_add_tail(&cm_dev->list, &cm.device_list);
  3013. write_unlock_irqrestore(&cm.device_lock, flags);
  3014. return;
  3015. error2:
  3016. ib_unregister_mad_agent(port->mad_agent);
  3017. error1:
  3018. port_modify.set_port_cap_mask = 0;
  3019. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  3020. while (--i) {
  3021. port = &cm_dev->port[i-1];
  3022. ib_modify_port(device, port->port_num, 0, &port_modify);
  3023. ib_unregister_mad_agent(port->mad_agent);
  3024. }
  3025. kfree(cm_dev);
  3026. }
  3027. static void cm_remove_one(struct ib_device *device)
  3028. {
  3029. struct cm_device *cm_dev;
  3030. struct cm_port *port;
  3031. struct ib_port_modify port_modify = {
  3032. .clr_port_cap_mask = IB_PORT_CM_SUP
  3033. };
  3034. unsigned long flags;
  3035. int i;
  3036. cm_dev = ib_get_client_data(device, &cm_client);
  3037. if (!cm_dev)
  3038. return;
  3039. write_lock_irqsave(&cm.device_lock, flags);
  3040. list_del(&cm_dev->list);
  3041. write_unlock_irqrestore(&cm.device_lock, flags);
  3042. for (i = 1; i <= device->phys_port_cnt; i++) {
  3043. port = &cm_dev->port[i-1];
  3044. ib_modify_port(device, port->port_num, 0, &port_modify);
  3045. ib_unregister_mad_agent(port->mad_agent);
  3046. }
  3047. kfree(cm_dev);
  3048. }
  3049. static int __init ib_cm_init(void)
  3050. {
  3051. int ret;
  3052. memset(&cm, 0, sizeof cm);
  3053. INIT_LIST_HEAD(&cm.device_list);
  3054. rwlock_init(&cm.device_lock);
  3055. spin_lock_init(&cm.lock);
  3056. cm.listen_service_table = RB_ROOT;
  3057. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  3058. cm.remote_id_table = RB_ROOT;
  3059. cm.remote_qp_table = RB_ROOT;
  3060. cm.remote_sidr_table = RB_ROOT;
  3061. idr_init(&cm.local_id_table);
  3062. get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
  3063. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  3064. INIT_LIST_HEAD(&cm.timewait_list);
  3065. cm.wq = create_workqueue("ib_cm");
  3066. if (!cm.wq)
  3067. return -ENOMEM;
  3068. ret = ib_register_client(&cm_client);
  3069. if (ret)
  3070. goto error;
  3071. return 0;
  3072. error:
  3073. destroy_workqueue(cm.wq);
  3074. return ret;
  3075. }
  3076. static void __exit ib_cm_cleanup(void)
  3077. {
  3078. struct cm_timewait_info *timewait_info, *tmp;
  3079. spin_lock_irq(&cm.lock);
  3080. list_for_each_entry(timewait_info, &cm.timewait_list, list)
  3081. cancel_delayed_work(&timewait_info->work.work);
  3082. spin_unlock_irq(&cm.lock);
  3083. destroy_workqueue(cm.wq);
  3084. list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
  3085. list_del(&timewait_info->list);
  3086. kfree(timewait_info);
  3087. }
  3088. ib_unregister_client(&cm_client);
  3089. idr_destroy(&cm.local_id_table);
  3090. }
  3091. module_init(ib_cm_init);
  3092. module_exit(ib_cm_cleanup);