cm.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359
  1. /*
  2. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  36. */
  37. #include <linux/completion.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/err.h>
  40. #include <linux/idr.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/pci.h>
  43. #include <linux/rbtree.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/workqueue.h>
  46. #include <rdma/ib_cache.h>
  47. #include <rdma/ib_cm.h>
  48. #include "cm_msgs.h"
  49. MODULE_AUTHOR("Sean Hefty");
  50. MODULE_DESCRIPTION("InfiniBand CM");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. static void cm_add_one(struct ib_device *device);
  53. static void cm_remove_one(struct ib_device *device);
  54. static struct ib_client cm_client = {
  55. .name = "cm",
  56. .add = cm_add_one,
  57. .remove = cm_remove_one
  58. };
  59. static struct ib_cm {
  60. spinlock_t lock;
  61. struct list_head device_list;
  62. rwlock_t device_lock;
  63. struct rb_root listen_service_table;
  64. u64 listen_service_id;
  65. /* struct rb_root peer_service_table; todo: fix peer to peer */
  66. struct rb_root remote_qp_table;
  67. struct rb_root remote_id_table;
  68. struct rb_root remote_sidr_table;
  69. struct idr local_id_table;
  70. struct workqueue_struct *wq;
  71. } cm;
  72. struct cm_port {
  73. struct cm_device *cm_dev;
  74. struct ib_mad_agent *mad_agent;
  75. u8 port_num;
  76. };
  77. struct cm_device {
  78. struct list_head list;
  79. struct ib_device *device;
  80. __be64 ca_guid;
  81. struct cm_port port[0];
  82. };
  83. struct cm_av {
  84. struct cm_port *port;
  85. union ib_gid dgid;
  86. struct ib_ah_attr ah_attr;
  87. u16 pkey_index;
  88. u8 packet_life_time;
  89. };
  90. struct cm_work {
  91. struct work_struct work;
  92. struct list_head list;
  93. struct cm_port *port;
  94. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  95. __be32 local_id; /* Established / timewait */
  96. __be32 remote_id;
  97. struct ib_cm_event cm_event;
  98. struct ib_sa_path_rec path[0];
  99. };
  100. struct cm_timewait_info {
  101. struct cm_work work; /* Must be first. */
  102. struct rb_node remote_qp_node;
  103. struct rb_node remote_id_node;
  104. __be64 remote_ca_guid;
  105. __be32 remote_qpn;
  106. u8 inserted_remote_qp;
  107. u8 inserted_remote_id;
  108. };
  109. struct cm_id_private {
  110. struct ib_cm_id id;
  111. struct rb_node service_node;
  112. struct rb_node sidr_id_node;
  113. spinlock_t lock; /* Do not acquire inside cm.lock */
  114. struct completion comp;
  115. atomic_t refcount;
  116. struct ib_mad_send_buf *msg;
  117. struct cm_timewait_info *timewait_info;
  118. /* todo: use alternate port on send failure */
  119. struct cm_av av;
  120. struct cm_av alt_av;
  121. struct ib_cm_compare_data *compare_data;
  122. void *private_data;
  123. __be64 tid;
  124. __be32 local_qpn;
  125. __be32 remote_qpn;
  126. enum ib_qp_type qp_type;
  127. __be32 sq_psn;
  128. __be32 rq_psn;
  129. int timeout_ms;
  130. enum ib_mtu path_mtu;
  131. u8 private_data_len;
  132. u8 max_cm_retries;
  133. u8 peer_to_peer;
  134. u8 responder_resources;
  135. u8 initiator_depth;
  136. u8 local_ack_timeout;
  137. u8 retry_count;
  138. u8 rnr_retry_count;
  139. u8 service_timeout;
  140. struct list_head work_list;
  141. atomic_t work_count;
  142. };
  143. static void cm_work_handler(void *data);
  144. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  145. {
  146. if (atomic_dec_and_test(&cm_id_priv->refcount))
  147. complete(&cm_id_priv->comp);
  148. }
  149. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  150. struct ib_mad_send_buf **msg)
  151. {
  152. struct ib_mad_agent *mad_agent;
  153. struct ib_mad_send_buf *m;
  154. struct ib_ah *ah;
  155. mad_agent = cm_id_priv->av.port->mad_agent;
  156. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  157. if (IS_ERR(ah))
  158. return PTR_ERR(ah);
  159. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  160. cm_id_priv->av.pkey_index,
  161. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  162. GFP_ATOMIC);
  163. if (IS_ERR(m)) {
  164. ib_destroy_ah(ah);
  165. return PTR_ERR(m);
  166. }
  167. /* Timeout set by caller if response is expected. */
  168. m->ah = ah;
  169. m->retries = cm_id_priv->max_cm_retries;
  170. atomic_inc(&cm_id_priv->refcount);
  171. m->context[0] = cm_id_priv;
  172. *msg = m;
  173. return 0;
  174. }
  175. static int cm_alloc_response_msg(struct cm_port *port,
  176. struct ib_mad_recv_wc *mad_recv_wc,
  177. struct ib_mad_send_buf **msg)
  178. {
  179. struct ib_mad_send_buf *m;
  180. struct ib_ah *ah;
  181. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  182. mad_recv_wc->recv_buf.grh, port->port_num);
  183. if (IS_ERR(ah))
  184. return PTR_ERR(ah);
  185. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  186. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  187. GFP_ATOMIC);
  188. if (IS_ERR(m)) {
  189. ib_destroy_ah(ah);
  190. return PTR_ERR(m);
  191. }
  192. m->ah = ah;
  193. *msg = m;
  194. return 0;
  195. }
  196. static void cm_free_msg(struct ib_mad_send_buf *msg)
  197. {
  198. ib_destroy_ah(msg->ah);
  199. if (msg->context[0])
  200. cm_deref_id(msg->context[0]);
  201. ib_free_send_mad(msg);
  202. }
  203. static void * cm_copy_private_data(const void *private_data,
  204. u8 private_data_len)
  205. {
  206. void *data;
  207. if (!private_data || !private_data_len)
  208. return NULL;
  209. data = kmalloc(private_data_len, GFP_KERNEL);
  210. if (!data)
  211. return ERR_PTR(-ENOMEM);
  212. memcpy(data, private_data, private_data_len);
  213. return data;
  214. }
  215. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  216. void *private_data, u8 private_data_len)
  217. {
  218. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  219. kfree(cm_id_priv->private_data);
  220. cm_id_priv->private_data = private_data;
  221. cm_id_priv->private_data_len = private_data_len;
  222. }
  223. static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
  224. struct ib_grh *grh, struct cm_av *av)
  225. {
  226. av->port = port;
  227. av->pkey_index = wc->pkey_index;
  228. ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
  229. grh, &av->ah_attr);
  230. }
  231. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  232. {
  233. struct cm_device *cm_dev;
  234. struct cm_port *port = NULL;
  235. unsigned long flags;
  236. int ret;
  237. u8 p;
  238. read_lock_irqsave(&cm.device_lock, flags);
  239. list_for_each_entry(cm_dev, &cm.device_list, list) {
  240. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  241. &p, NULL)) {
  242. port = &cm_dev->port[p-1];
  243. break;
  244. }
  245. }
  246. read_unlock_irqrestore(&cm.device_lock, flags);
  247. if (!port)
  248. return -EINVAL;
  249. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  250. be16_to_cpu(path->pkey), &av->pkey_index);
  251. if (ret)
  252. return ret;
  253. av->port = port;
  254. ib_init_ah_from_path(cm_dev->device, port->port_num, path,
  255. &av->ah_attr);
  256. av->packet_life_time = path->packet_life_time;
  257. return 0;
  258. }
  259. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  260. {
  261. unsigned long flags;
  262. int ret;
  263. static int next_id;
  264. do {
  265. spin_lock_irqsave(&cm.lock, flags);
  266. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
  267. (__force int *) &cm_id_priv->id.local_id);
  268. spin_unlock_irqrestore(&cm.lock, flags);
  269. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  270. return ret;
  271. }
  272. static void cm_free_id(__be32 local_id)
  273. {
  274. unsigned long flags;
  275. spin_lock_irqsave(&cm.lock, flags);
  276. idr_remove(&cm.local_id_table, (__force int) local_id);
  277. spin_unlock_irqrestore(&cm.lock, flags);
  278. }
  279. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  280. {
  281. struct cm_id_private *cm_id_priv;
  282. cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
  283. if (cm_id_priv) {
  284. if (cm_id_priv->id.remote_id == remote_id)
  285. atomic_inc(&cm_id_priv->refcount);
  286. else
  287. cm_id_priv = NULL;
  288. }
  289. return cm_id_priv;
  290. }
  291. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  292. {
  293. struct cm_id_private *cm_id_priv;
  294. unsigned long flags;
  295. spin_lock_irqsave(&cm.lock, flags);
  296. cm_id_priv = cm_get_id(local_id, remote_id);
  297. spin_unlock_irqrestore(&cm.lock, flags);
  298. return cm_id_priv;
  299. }
  300. static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
  301. {
  302. int i;
  303. for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
  304. ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
  305. ((unsigned long *) mask)[i];
  306. }
  307. static int cm_compare_data(struct ib_cm_compare_data *src_data,
  308. struct ib_cm_compare_data *dst_data)
  309. {
  310. u8 src[IB_CM_COMPARE_SIZE];
  311. u8 dst[IB_CM_COMPARE_SIZE];
  312. if (!src_data || !dst_data)
  313. return 0;
  314. cm_mask_copy(src, src_data->data, dst_data->mask);
  315. cm_mask_copy(dst, dst_data->data, src_data->mask);
  316. return memcmp(src, dst, IB_CM_COMPARE_SIZE);
  317. }
  318. static int cm_compare_private_data(u8 *private_data,
  319. struct ib_cm_compare_data *dst_data)
  320. {
  321. u8 src[IB_CM_COMPARE_SIZE];
  322. if (!dst_data)
  323. return 0;
  324. cm_mask_copy(src, private_data, dst_data->mask);
  325. return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
  326. }
  327. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  328. {
  329. struct rb_node **link = &cm.listen_service_table.rb_node;
  330. struct rb_node *parent = NULL;
  331. struct cm_id_private *cur_cm_id_priv;
  332. __be64 service_id = cm_id_priv->id.service_id;
  333. __be64 service_mask = cm_id_priv->id.service_mask;
  334. int data_cmp;
  335. while (*link) {
  336. parent = *link;
  337. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  338. service_node);
  339. data_cmp = cm_compare_data(cm_id_priv->compare_data,
  340. cur_cm_id_priv->compare_data);
  341. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  342. (service_mask & cur_cm_id_priv->id.service_id) &&
  343. (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
  344. !data_cmp)
  345. return cur_cm_id_priv;
  346. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  347. link = &(*link)->rb_left;
  348. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  349. link = &(*link)->rb_right;
  350. else if (service_id < cur_cm_id_priv->id.service_id)
  351. link = &(*link)->rb_left;
  352. else if (service_id > cur_cm_id_priv->id.service_id)
  353. link = &(*link)->rb_right;
  354. else if (data_cmp < 0)
  355. link = &(*link)->rb_left;
  356. else
  357. link = &(*link)->rb_right;
  358. }
  359. rb_link_node(&cm_id_priv->service_node, parent, link);
  360. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  361. return NULL;
  362. }
  363. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  364. __be64 service_id,
  365. u8 *private_data)
  366. {
  367. struct rb_node *node = cm.listen_service_table.rb_node;
  368. struct cm_id_private *cm_id_priv;
  369. int data_cmp;
  370. while (node) {
  371. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  372. data_cmp = cm_compare_private_data(private_data,
  373. cm_id_priv->compare_data);
  374. if ((cm_id_priv->id.service_mask & service_id) ==
  375. cm_id_priv->id.service_id &&
  376. (cm_id_priv->id.device == device) && !data_cmp)
  377. return cm_id_priv;
  378. if (device < cm_id_priv->id.device)
  379. node = node->rb_left;
  380. else if (device > cm_id_priv->id.device)
  381. node = node->rb_right;
  382. else if (service_id < cm_id_priv->id.service_id)
  383. node = node->rb_left;
  384. else if (service_id > cm_id_priv->id.service_id)
  385. node = node->rb_right;
  386. else if (data_cmp < 0)
  387. node = node->rb_left;
  388. else
  389. node = node->rb_right;
  390. }
  391. return NULL;
  392. }
  393. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  394. *timewait_info)
  395. {
  396. struct rb_node **link = &cm.remote_id_table.rb_node;
  397. struct rb_node *parent = NULL;
  398. struct cm_timewait_info *cur_timewait_info;
  399. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  400. __be32 remote_id = timewait_info->work.remote_id;
  401. while (*link) {
  402. parent = *link;
  403. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  404. remote_id_node);
  405. if (remote_id < cur_timewait_info->work.remote_id)
  406. link = &(*link)->rb_left;
  407. else if (remote_id > cur_timewait_info->work.remote_id)
  408. link = &(*link)->rb_right;
  409. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  410. link = &(*link)->rb_left;
  411. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  412. link = &(*link)->rb_right;
  413. else
  414. return cur_timewait_info;
  415. }
  416. timewait_info->inserted_remote_id = 1;
  417. rb_link_node(&timewait_info->remote_id_node, parent, link);
  418. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  419. return NULL;
  420. }
  421. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  422. __be32 remote_id)
  423. {
  424. struct rb_node *node = cm.remote_id_table.rb_node;
  425. struct cm_timewait_info *timewait_info;
  426. while (node) {
  427. timewait_info = rb_entry(node, struct cm_timewait_info,
  428. remote_id_node);
  429. if (remote_id < timewait_info->work.remote_id)
  430. node = node->rb_left;
  431. else if (remote_id > timewait_info->work.remote_id)
  432. node = node->rb_right;
  433. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  434. node = node->rb_left;
  435. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  436. node = node->rb_right;
  437. else
  438. return timewait_info;
  439. }
  440. return NULL;
  441. }
  442. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  443. *timewait_info)
  444. {
  445. struct rb_node **link = &cm.remote_qp_table.rb_node;
  446. struct rb_node *parent = NULL;
  447. struct cm_timewait_info *cur_timewait_info;
  448. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  449. __be32 remote_qpn = timewait_info->remote_qpn;
  450. while (*link) {
  451. parent = *link;
  452. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  453. remote_qp_node);
  454. if (remote_qpn < cur_timewait_info->remote_qpn)
  455. link = &(*link)->rb_left;
  456. else if (remote_qpn > cur_timewait_info->remote_qpn)
  457. link = &(*link)->rb_right;
  458. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  459. link = &(*link)->rb_left;
  460. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  461. link = &(*link)->rb_right;
  462. else
  463. return cur_timewait_info;
  464. }
  465. timewait_info->inserted_remote_qp = 1;
  466. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  467. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  468. return NULL;
  469. }
  470. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  471. *cm_id_priv)
  472. {
  473. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  474. struct rb_node *parent = NULL;
  475. struct cm_id_private *cur_cm_id_priv;
  476. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  477. __be32 remote_id = cm_id_priv->id.remote_id;
  478. while (*link) {
  479. parent = *link;
  480. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  481. sidr_id_node);
  482. if (remote_id < cur_cm_id_priv->id.remote_id)
  483. link = &(*link)->rb_left;
  484. else if (remote_id > cur_cm_id_priv->id.remote_id)
  485. link = &(*link)->rb_right;
  486. else {
  487. int cmp;
  488. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  489. sizeof *port_gid);
  490. if (cmp < 0)
  491. link = &(*link)->rb_left;
  492. else if (cmp > 0)
  493. link = &(*link)->rb_right;
  494. else
  495. return cur_cm_id_priv;
  496. }
  497. }
  498. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  499. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  500. return NULL;
  501. }
  502. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  503. enum ib_cm_sidr_status status)
  504. {
  505. struct ib_cm_sidr_rep_param param;
  506. memset(&param, 0, sizeof param);
  507. param.status = status;
  508. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  509. }
  510. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  511. ib_cm_handler cm_handler,
  512. void *context)
  513. {
  514. struct cm_id_private *cm_id_priv;
  515. int ret;
  516. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  517. if (!cm_id_priv)
  518. return ERR_PTR(-ENOMEM);
  519. cm_id_priv->id.state = IB_CM_IDLE;
  520. cm_id_priv->id.device = device;
  521. cm_id_priv->id.cm_handler = cm_handler;
  522. cm_id_priv->id.context = context;
  523. cm_id_priv->id.remote_cm_qpn = 1;
  524. ret = cm_alloc_id(cm_id_priv);
  525. if (ret)
  526. goto error;
  527. spin_lock_init(&cm_id_priv->lock);
  528. init_completion(&cm_id_priv->comp);
  529. INIT_LIST_HEAD(&cm_id_priv->work_list);
  530. atomic_set(&cm_id_priv->work_count, -1);
  531. atomic_set(&cm_id_priv->refcount, 1);
  532. return &cm_id_priv->id;
  533. error:
  534. kfree(cm_id_priv);
  535. return ERR_PTR(-ENOMEM);
  536. }
  537. EXPORT_SYMBOL(ib_create_cm_id);
  538. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  539. {
  540. struct cm_work *work;
  541. if (list_empty(&cm_id_priv->work_list))
  542. return NULL;
  543. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  544. list_del(&work->list);
  545. return work;
  546. }
  547. static void cm_free_work(struct cm_work *work)
  548. {
  549. if (work->mad_recv_wc)
  550. ib_free_recv_mad(work->mad_recv_wc);
  551. kfree(work);
  552. }
  553. static inline int cm_convert_to_ms(int iba_time)
  554. {
  555. /* approximate conversion to ms from 4.096us x 2^iba_time */
  556. return 1 << max(iba_time - 8, 0);
  557. }
  558. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  559. {
  560. unsigned long flags;
  561. if (!timewait_info->inserted_remote_id &&
  562. !timewait_info->inserted_remote_qp)
  563. return;
  564. spin_lock_irqsave(&cm.lock, flags);
  565. if (timewait_info->inserted_remote_id) {
  566. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  567. timewait_info->inserted_remote_id = 0;
  568. }
  569. if (timewait_info->inserted_remote_qp) {
  570. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  571. timewait_info->inserted_remote_qp = 0;
  572. }
  573. spin_unlock_irqrestore(&cm.lock, flags);
  574. }
  575. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  576. {
  577. struct cm_timewait_info *timewait_info;
  578. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  579. if (!timewait_info)
  580. return ERR_PTR(-ENOMEM);
  581. timewait_info->work.local_id = local_id;
  582. INIT_WORK(&timewait_info->work.work, cm_work_handler,
  583. &timewait_info->work);
  584. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  585. return timewait_info;
  586. }
  587. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  588. {
  589. int wait_time;
  590. /*
  591. * The cm_id could be destroyed by the user before we exit timewait.
  592. * To protect against this, we search for the cm_id after exiting
  593. * timewait before notifying the user that we've exited timewait.
  594. */
  595. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  596. wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
  597. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  598. msecs_to_jiffies(wait_time));
  599. cm_id_priv->timewait_info = NULL;
  600. }
  601. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  602. {
  603. cm_id_priv->id.state = IB_CM_IDLE;
  604. if (cm_id_priv->timewait_info) {
  605. cm_cleanup_timewait(cm_id_priv->timewait_info);
  606. kfree(cm_id_priv->timewait_info);
  607. cm_id_priv->timewait_info = NULL;
  608. }
  609. }
  610. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  611. {
  612. struct cm_id_private *cm_id_priv;
  613. struct cm_work *work;
  614. unsigned long flags;
  615. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  616. retest:
  617. spin_lock_irqsave(&cm_id_priv->lock, flags);
  618. switch (cm_id->state) {
  619. case IB_CM_LISTEN:
  620. cm_id->state = IB_CM_IDLE;
  621. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  622. spin_lock_irqsave(&cm.lock, flags);
  623. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  624. spin_unlock_irqrestore(&cm.lock, flags);
  625. break;
  626. case IB_CM_SIDR_REQ_SENT:
  627. cm_id->state = IB_CM_IDLE;
  628. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  629. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  630. break;
  631. case IB_CM_SIDR_REQ_RCVD:
  632. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  633. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  634. break;
  635. case IB_CM_REQ_SENT:
  636. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  637. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  638. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  639. &cm_id_priv->av.port->cm_dev->ca_guid,
  640. sizeof cm_id_priv->av.port->cm_dev->ca_guid,
  641. NULL, 0);
  642. break;
  643. case IB_CM_MRA_REQ_RCVD:
  644. case IB_CM_REP_SENT:
  645. case IB_CM_MRA_REP_RCVD:
  646. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  647. /* Fall through */
  648. case IB_CM_REQ_RCVD:
  649. case IB_CM_MRA_REQ_SENT:
  650. case IB_CM_REP_RCVD:
  651. case IB_CM_MRA_REP_SENT:
  652. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  653. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  654. NULL, 0, NULL, 0);
  655. break;
  656. case IB_CM_ESTABLISHED:
  657. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  658. ib_send_cm_dreq(cm_id, NULL, 0);
  659. goto retest;
  660. case IB_CM_DREQ_SENT:
  661. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  662. cm_enter_timewait(cm_id_priv);
  663. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  664. break;
  665. case IB_CM_DREQ_RCVD:
  666. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  667. ib_send_cm_drep(cm_id, NULL, 0);
  668. break;
  669. default:
  670. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  671. break;
  672. }
  673. cm_free_id(cm_id->local_id);
  674. cm_deref_id(cm_id_priv);
  675. wait_for_completion(&cm_id_priv->comp);
  676. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  677. cm_free_work(work);
  678. kfree(cm_id_priv->compare_data);
  679. kfree(cm_id_priv->private_data);
  680. kfree(cm_id_priv);
  681. }
  682. EXPORT_SYMBOL(ib_destroy_cm_id);
  683. int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
  684. struct ib_cm_compare_data *compare_data)
  685. {
  686. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  687. unsigned long flags;
  688. int ret = 0;
  689. service_mask = service_mask ? service_mask :
  690. __constant_cpu_to_be64(~0ULL);
  691. service_id &= service_mask;
  692. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  693. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  694. return -EINVAL;
  695. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  696. if (cm_id->state != IB_CM_IDLE)
  697. return -EINVAL;
  698. if (compare_data) {
  699. cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
  700. GFP_KERNEL);
  701. if (!cm_id_priv->compare_data)
  702. return -ENOMEM;
  703. cm_mask_copy(cm_id_priv->compare_data->data,
  704. compare_data->data, compare_data->mask);
  705. memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
  706. IB_CM_COMPARE_SIZE);
  707. }
  708. cm_id->state = IB_CM_LISTEN;
  709. spin_lock_irqsave(&cm.lock, flags);
  710. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  711. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  712. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  713. } else {
  714. cm_id->service_id = service_id;
  715. cm_id->service_mask = service_mask;
  716. }
  717. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  718. spin_unlock_irqrestore(&cm.lock, flags);
  719. if (cur_cm_id_priv) {
  720. cm_id->state = IB_CM_IDLE;
  721. kfree(cm_id_priv->compare_data);
  722. cm_id_priv->compare_data = NULL;
  723. ret = -EBUSY;
  724. }
  725. return ret;
  726. }
  727. EXPORT_SYMBOL(ib_cm_listen);
  728. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  729. enum cm_msg_sequence msg_seq)
  730. {
  731. u64 hi_tid, low_tid;
  732. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  733. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  734. (msg_seq << 30));
  735. return cpu_to_be64(hi_tid | low_tid);
  736. }
  737. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  738. __be16 attr_id, __be64 tid)
  739. {
  740. hdr->base_version = IB_MGMT_BASE_VERSION;
  741. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  742. hdr->class_version = IB_CM_CLASS_VERSION;
  743. hdr->method = IB_MGMT_METHOD_SEND;
  744. hdr->attr_id = attr_id;
  745. hdr->tid = tid;
  746. }
  747. static void cm_format_req(struct cm_req_msg *req_msg,
  748. struct cm_id_private *cm_id_priv,
  749. struct ib_cm_req_param *param)
  750. {
  751. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  752. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  753. req_msg->local_comm_id = cm_id_priv->id.local_id;
  754. req_msg->service_id = param->service_id;
  755. req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  756. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  757. cm_req_set_resp_res(req_msg, param->responder_resources);
  758. cm_req_set_init_depth(req_msg, param->initiator_depth);
  759. cm_req_set_remote_resp_timeout(req_msg,
  760. param->remote_cm_response_timeout);
  761. cm_req_set_qp_type(req_msg, param->qp_type);
  762. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  763. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  764. cm_req_set_local_resp_timeout(req_msg,
  765. param->local_cm_response_timeout);
  766. cm_req_set_retry_count(req_msg, param->retry_count);
  767. req_msg->pkey = param->primary_path->pkey;
  768. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  769. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  770. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  771. cm_req_set_srq(req_msg, param->srq);
  772. req_msg->primary_local_lid = param->primary_path->slid;
  773. req_msg->primary_remote_lid = param->primary_path->dlid;
  774. req_msg->primary_local_gid = param->primary_path->sgid;
  775. req_msg->primary_remote_gid = param->primary_path->dgid;
  776. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  777. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  778. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  779. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  780. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  781. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  782. cm_req_set_primary_local_ack_timeout(req_msg,
  783. min(31, param->primary_path->packet_life_time + 1));
  784. if (param->alternate_path) {
  785. req_msg->alt_local_lid = param->alternate_path->slid;
  786. req_msg->alt_remote_lid = param->alternate_path->dlid;
  787. req_msg->alt_local_gid = param->alternate_path->sgid;
  788. req_msg->alt_remote_gid = param->alternate_path->dgid;
  789. cm_req_set_alt_flow_label(req_msg,
  790. param->alternate_path->flow_label);
  791. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  792. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  793. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  794. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  795. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  796. cm_req_set_alt_local_ack_timeout(req_msg,
  797. min(31, param->alternate_path->packet_life_time + 1));
  798. }
  799. if (param->private_data && param->private_data_len)
  800. memcpy(req_msg->private_data, param->private_data,
  801. param->private_data_len);
  802. }
  803. static int cm_validate_req_param(struct ib_cm_req_param *param)
  804. {
  805. /* peer-to-peer not supported */
  806. if (param->peer_to_peer)
  807. return -EINVAL;
  808. if (!param->primary_path)
  809. return -EINVAL;
  810. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  811. return -EINVAL;
  812. if (param->private_data &&
  813. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  814. return -EINVAL;
  815. if (param->alternate_path &&
  816. (param->alternate_path->pkey != param->primary_path->pkey ||
  817. param->alternate_path->mtu != param->primary_path->mtu))
  818. return -EINVAL;
  819. return 0;
  820. }
  821. int ib_send_cm_req(struct ib_cm_id *cm_id,
  822. struct ib_cm_req_param *param)
  823. {
  824. struct cm_id_private *cm_id_priv;
  825. struct cm_req_msg *req_msg;
  826. unsigned long flags;
  827. int ret;
  828. ret = cm_validate_req_param(param);
  829. if (ret)
  830. return ret;
  831. /* Verify that we're not in timewait. */
  832. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  833. spin_lock_irqsave(&cm_id_priv->lock, flags);
  834. if (cm_id->state != IB_CM_IDLE) {
  835. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  836. ret = -EINVAL;
  837. goto out;
  838. }
  839. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  840. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  841. id.local_id);
  842. if (IS_ERR(cm_id_priv->timewait_info))
  843. goto out;
  844. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  845. if (ret)
  846. goto error1;
  847. if (param->alternate_path) {
  848. ret = cm_init_av_by_path(param->alternate_path,
  849. &cm_id_priv->alt_av);
  850. if (ret)
  851. goto error1;
  852. }
  853. cm_id->service_id = param->service_id;
  854. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  855. cm_id_priv->timeout_ms = cm_convert_to_ms(
  856. param->primary_path->packet_life_time) * 2 +
  857. cm_convert_to_ms(
  858. param->remote_cm_response_timeout);
  859. cm_id_priv->max_cm_retries = param->max_cm_retries;
  860. cm_id_priv->initiator_depth = param->initiator_depth;
  861. cm_id_priv->responder_resources = param->responder_resources;
  862. cm_id_priv->retry_count = param->retry_count;
  863. cm_id_priv->path_mtu = param->primary_path->mtu;
  864. cm_id_priv->qp_type = param->qp_type;
  865. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  866. if (ret)
  867. goto error1;
  868. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  869. cm_format_req(req_msg, cm_id_priv, param);
  870. cm_id_priv->tid = req_msg->hdr.tid;
  871. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  872. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  873. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  874. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  875. cm_id_priv->local_ack_timeout =
  876. cm_req_get_primary_local_ack_timeout(req_msg);
  877. spin_lock_irqsave(&cm_id_priv->lock, flags);
  878. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  879. if (ret) {
  880. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  881. goto error2;
  882. }
  883. BUG_ON(cm_id->state != IB_CM_IDLE);
  884. cm_id->state = IB_CM_REQ_SENT;
  885. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  886. return 0;
  887. error2: cm_free_msg(cm_id_priv->msg);
  888. error1: kfree(cm_id_priv->timewait_info);
  889. out: return ret;
  890. }
  891. EXPORT_SYMBOL(ib_send_cm_req);
  892. static int cm_issue_rej(struct cm_port *port,
  893. struct ib_mad_recv_wc *mad_recv_wc,
  894. enum ib_cm_rej_reason reason,
  895. enum cm_msg_response msg_rejected,
  896. void *ari, u8 ari_length)
  897. {
  898. struct ib_mad_send_buf *msg = NULL;
  899. struct cm_rej_msg *rej_msg, *rcv_msg;
  900. int ret;
  901. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  902. if (ret)
  903. return ret;
  904. /* We just need common CM header information. Cast to any message. */
  905. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  906. rej_msg = (struct cm_rej_msg *) msg->mad;
  907. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  908. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  909. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  910. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  911. rej_msg->reason = cpu_to_be16(reason);
  912. if (ari && ari_length) {
  913. cm_rej_set_reject_info_len(rej_msg, ari_length);
  914. memcpy(rej_msg->ari, ari, ari_length);
  915. }
  916. ret = ib_post_send_mad(msg, NULL);
  917. if (ret)
  918. cm_free_msg(msg);
  919. return ret;
  920. }
  921. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  922. __be32 local_qpn, __be32 remote_qpn)
  923. {
  924. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  925. ((local_ca_guid == remote_ca_guid) &&
  926. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  927. }
  928. static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  929. struct ib_sa_path_rec *primary_path,
  930. struct ib_sa_path_rec *alt_path)
  931. {
  932. memset(primary_path, 0, sizeof *primary_path);
  933. primary_path->dgid = req_msg->primary_local_gid;
  934. primary_path->sgid = req_msg->primary_remote_gid;
  935. primary_path->dlid = req_msg->primary_local_lid;
  936. primary_path->slid = req_msg->primary_remote_lid;
  937. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  938. primary_path->hop_limit = req_msg->primary_hop_limit;
  939. primary_path->traffic_class = req_msg->primary_traffic_class;
  940. primary_path->reversible = 1;
  941. primary_path->pkey = req_msg->pkey;
  942. primary_path->sl = cm_req_get_primary_sl(req_msg);
  943. primary_path->mtu_selector = IB_SA_EQ;
  944. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  945. primary_path->rate_selector = IB_SA_EQ;
  946. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  947. primary_path->packet_life_time_selector = IB_SA_EQ;
  948. primary_path->packet_life_time =
  949. cm_req_get_primary_local_ack_timeout(req_msg);
  950. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  951. if (req_msg->alt_local_lid) {
  952. memset(alt_path, 0, sizeof *alt_path);
  953. alt_path->dgid = req_msg->alt_local_gid;
  954. alt_path->sgid = req_msg->alt_remote_gid;
  955. alt_path->dlid = req_msg->alt_local_lid;
  956. alt_path->slid = req_msg->alt_remote_lid;
  957. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  958. alt_path->hop_limit = req_msg->alt_hop_limit;
  959. alt_path->traffic_class = req_msg->alt_traffic_class;
  960. alt_path->reversible = 1;
  961. alt_path->pkey = req_msg->pkey;
  962. alt_path->sl = cm_req_get_alt_sl(req_msg);
  963. alt_path->mtu_selector = IB_SA_EQ;
  964. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  965. alt_path->rate_selector = IB_SA_EQ;
  966. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  967. alt_path->packet_life_time_selector = IB_SA_EQ;
  968. alt_path->packet_life_time =
  969. cm_req_get_alt_local_ack_timeout(req_msg);
  970. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  971. }
  972. }
  973. static void cm_format_req_event(struct cm_work *work,
  974. struct cm_id_private *cm_id_priv,
  975. struct ib_cm_id *listen_id)
  976. {
  977. struct cm_req_msg *req_msg;
  978. struct ib_cm_req_event_param *param;
  979. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  980. param = &work->cm_event.param.req_rcvd;
  981. param->listen_id = listen_id;
  982. param->port = cm_id_priv->av.port->port_num;
  983. param->primary_path = &work->path[0];
  984. if (req_msg->alt_local_lid)
  985. param->alternate_path = &work->path[1];
  986. else
  987. param->alternate_path = NULL;
  988. param->remote_ca_guid = req_msg->local_ca_guid;
  989. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  990. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  991. param->qp_type = cm_req_get_qp_type(req_msg);
  992. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  993. param->responder_resources = cm_req_get_init_depth(req_msg);
  994. param->initiator_depth = cm_req_get_resp_res(req_msg);
  995. param->local_cm_response_timeout =
  996. cm_req_get_remote_resp_timeout(req_msg);
  997. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  998. param->remote_cm_response_timeout =
  999. cm_req_get_local_resp_timeout(req_msg);
  1000. param->retry_count = cm_req_get_retry_count(req_msg);
  1001. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1002. param->srq = cm_req_get_srq(req_msg);
  1003. work->cm_event.private_data = &req_msg->private_data;
  1004. }
  1005. static void cm_process_work(struct cm_id_private *cm_id_priv,
  1006. struct cm_work *work)
  1007. {
  1008. unsigned long flags;
  1009. int ret;
  1010. /* We will typically only have the current event to report. */
  1011. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  1012. cm_free_work(work);
  1013. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  1014. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1015. work = cm_dequeue_work(cm_id_priv);
  1016. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1017. BUG_ON(!work);
  1018. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  1019. &work->cm_event);
  1020. cm_free_work(work);
  1021. }
  1022. cm_deref_id(cm_id_priv);
  1023. if (ret)
  1024. ib_destroy_cm_id(&cm_id_priv->id);
  1025. }
  1026. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  1027. struct cm_id_private *cm_id_priv,
  1028. enum cm_msg_response msg_mraed, u8 service_timeout,
  1029. const void *private_data, u8 private_data_len)
  1030. {
  1031. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  1032. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  1033. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  1034. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1035. cm_mra_set_service_timeout(mra_msg, service_timeout);
  1036. if (private_data && private_data_len)
  1037. memcpy(mra_msg->private_data, private_data, private_data_len);
  1038. }
  1039. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  1040. struct cm_id_private *cm_id_priv,
  1041. enum ib_cm_rej_reason reason,
  1042. void *ari,
  1043. u8 ari_length,
  1044. const void *private_data,
  1045. u8 private_data_len)
  1046. {
  1047. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1048. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1049. switch(cm_id_priv->id.state) {
  1050. case IB_CM_REQ_RCVD:
  1051. rej_msg->local_comm_id = 0;
  1052. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1053. break;
  1054. case IB_CM_MRA_REQ_SENT:
  1055. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1056. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1057. break;
  1058. case IB_CM_REP_RCVD:
  1059. case IB_CM_MRA_REP_SENT:
  1060. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1061. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1062. break;
  1063. default:
  1064. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1065. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1066. break;
  1067. }
  1068. rej_msg->reason = cpu_to_be16(reason);
  1069. if (ari && ari_length) {
  1070. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1071. memcpy(rej_msg->ari, ari, ari_length);
  1072. }
  1073. if (private_data && private_data_len)
  1074. memcpy(rej_msg->private_data, private_data, private_data_len);
  1075. }
  1076. static void cm_dup_req_handler(struct cm_work *work,
  1077. struct cm_id_private *cm_id_priv)
  1078. {
  1079. struct ib_mad_send_buf *msg = NULL;
  1080. unsigned long flags;
  1081. int ret;
  1082. /* Quick state check to discard duplicate REQs. */
  1083. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1084. return;
  1085. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1086. if (ret)
  1087. return;
  1088. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1089. switch (cm_id_priv->id.state) {
  1090. case IB_CM_MRA_REQ_SENT:
  1091. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1092. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1093. cm_id_priv->private_data,
  1094. cm_id_priv->private_data_len);
  1095. break;
  1096. case IB_CM_TIMEWAIT:
  1097. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1098. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1099. break;
  1100. default:
  1101. goto unlock;
  1102. }
  1103. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1104. ret = ib_post_send_mad(msg, NULL);
  1105. if (ret)
  1106. goto free;
  1107. return;
  1108. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1109. free: cm_free_msg(msg);
  1110. }
  1111. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1112. struct cm_id_private *cm_id_priv)
  1113. {
  1114. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1115. struct cm_timewait_info *timewait_info;
  1116. struct cm_req_msg *req_msg;
  1117. unsigned long flags;
  1118. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1119. /* Check for duplicate REQ and stale connections. */
  1120. spin_lock_irqsave(&cm.lock, flags);
  1121. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1122. if (!timewait_info)
  1123. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1124. if (timewait_info) {
  1125. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1126. timewait_info->work.remote_id);
  1127. spin_unlock_irqrestore(&cm.lock, flags);
  1128. if (cur_cm_id_priv) {
  1129. cm_dup_req_handler(work, cur_cm_id_priv);
  1130. cm_deref_id(cur_cm_id_priv);
  1131. } else
  1132. cm_issue_rej(work->port, work->mad_recv_wc,
  1133. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1134. NULL, 0);
  1135. goto error;
  1136. }
  1137. /* Find matching listen request. */
  1138. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1139. req_msg->service_id,
  1140. req_msg->private_data);
  1141. if (!listen_cm_id_priv) {
  1142. spin_unlock_irqrestore(&cm.lock, flags);
  1143. cm_issue_rej(work->port, work->mad_recv_wc,
  1144. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1145. NULL, 0);
  1146. goto error;
  1147. }
  1148. atomic_inc(&listen_cm_id_priv->refcount);
  1149. atomic_inc(&cm_id_priv->refcount);
  1150. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1151. atomic_inc(&cm_id_priv->work_count);
  1152. spin_unlock_irqrestore(&cm.lock, flags);
  1153. return listen_cm_id_priv;
  1154. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1155. return NULL;
  1156. }
  1157. static int cm_req_handler(struct cm_work *work)
  1158. {
  1159. struct ib_cm_id *cm_id;
  1160. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1161. struct cm_req_msg *req_msg;
  1162. int ret;
  1163. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1164. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1165. if (IS_ERR(cm_id))
  1166. return PTR_ERR(cm_id);
  1167. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1168. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1169. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1170. work->mad_recv_wc->recv_buf.grh,
  1171. &cm_id_priv->av);
  1172. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1173. id.local_id);
  1174. if (IS_ERR(cm_id_priv->timewait_info)) {
  1175. ret = PTR_ERR(cm_id_priv->timewait_info);
  1176. goto error1;
  1177. }
  1178. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1179. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1180. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1181. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1182. if (!listen_cm_id_priv) {
  1183. ret = -EINVAL;
  1184. goto error2;
  1185. }
  1186. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1187. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1188. cm_id_priv->id.service_id = req_msg->service_id;
  1189. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1190. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1191. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1192. if (ret)
  1193. goto error3;
  1194. if (req_msg->alt_local_lid) {
  1195. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1196. if (ret)
  1197. goto error3;
  1198. }
  1199. cm_id_priv->tid = req_msg->hdr.tid;
  1200. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1201. cm_req_get_local_resp_timeout(req_msg));
  1202. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1203. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1204. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1205. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1206. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1207. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1208. cm_id_priv->local_ack_timeout =
  1209. cm_req_get_primary_local_ack_timeout(req_msg);
  1210. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1211. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1212. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1213. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1214. cm_process_work(cm_id_priv, work);
  1215. cm_deref_id(listen_cm_id_priv);
  1216. return 0;
  1217. error3: atomic_dec(&cm_id_priv->refcount);
  1218. cm_deref_id(listen_cm_id_priv);
  1219. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1220. error2: kfree(cm_id_priv->timewait_info);
  1221. cm_id_priv->timewait_info = NULL;
  1222. error1: ib_destroy_cm_id(&cm_id_priv->id);
  1223. return ret;
  1224. }
  1225. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1226. struct cm_id_private *cm_id_priv,
  1227. struct ib_cm_rep_param *param)
  1228. {
  1229. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1230. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1231. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1232. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1233. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1234. rep_msg->resp_resources = param->responder_resources;
  1235. rep_msg->initiator_depth = param->initiator_depth;
  1236. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1237. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1238. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1239. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1240. cm_rep_set_srq(rep_msg, param->srq);
  1241. rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  1242. if (param->private_data && param->private_data_len)
  1243. memcpy(rep_msg->private_data, param->private_data,
  1244. param->private_data_len);
  1245. }
  1246. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1247. struct ib_cm_rep_param *param)
  1248. {
  1249. struct cm_id_private *cm_id_priv;
  1250. struct ib_mad_send_buf *msg;
  1251. struct cm_rep_msg *rep_msg;
  1252. unsigned long flags;
  1253. int ret;
  1254. if (param->private_data &&
  1255. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1256. return -EINVAL;
  1257. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1258. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1259. if (cm_id->state != IB_CM_REQ_RCVD &&
  1260. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1261. ret = -EINVAL;
  1262. goto out;
  1263. }
  1264. ret = cm_alloc_msg(cm_id_priv, &msg);
  1265. if (ret)
  1266. goto out;
  1267. rep_msg = (struct cm_rep_msg *) msg->mad;
  1268. cm_format_rep(rep_msg, cm_id_priv, param);
  1269. msg->timeout_ms = cm_id_priv->timeout_ms;
  1270. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1271. ret = ib_post_send_mad(msg, NULL);
  1272. if (ret) {
  1273. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1274. cm_free_msg(msg);
  1275. return ret;
  1276. }
  1277. cm_id->state = IB_CM_REP_SENT;
  1278. cm_id_priv->msg = msg;
  1279. cm_id_priv->initiator_depth = param->initiator_depth;
  1280. cm_id_priv->responder_resources = param->responder_resources;
  1281. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1282. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1283. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1284. return ret;
  1285. }
  1286. EXPORT_SYMBOL(ib_send_cm_rep);
  1287. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1288. struct cm_id_private *cm_id_priv,
  1289. const void *private_data,
  1290. u8 private_data_len)
  1291. {
  1292. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1293. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1294. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1295. if (private_data && private_data_len)
  1296. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1297. }
  1298. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1299. const void *private_data,
  1300. u8 private_data_len)
  1301. {
  1302. struct cm_id_private *cm_id_priv;
  1303. struct ib_mad_send_buf *msg;
  1304. unsigned long flags;
  1305. void *data;
  1306. int ret;
  1307. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1308. return -EINVAL;
  1309. data = cm_copy_private_data(private_data, private_data_len);
  1310. if (IS_ERR(data))
  1311. return PTR_ERR(data);
  1312. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1313. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1314. if (cm_id->state != IB_CM_REP_RCVD &&
  1315. cm_id->state != IB_CM_MRA_REP_SENT) {
  1316. ret = -EINVAL;
  1317. goto error;
  1318. }
  1319. ret = cm_alloc_msg(cm_id_priv, &msg);
  1320. if (ret)
  1321. goto error;
  1322. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1323. private_data, private_data_len);
  1324. ret = ib_post_send_mad(msg, NULL);
  1325. if (ret) {
  1326. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1327. cm_free_msg(msg);
  1328. kfree(data);
  1329. return ret;
  1330. }
  1331. cm_id->state = IB_CM_ESTABLISHED;
  1332. cm_set_private_data(cm_id_priv, data, private_data_len);
  1333. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1334. return 0;
  1335. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1336. kfree(data);
  1337. return ret;
  1338. }
  1339. EXPORT_SYMBOL(ib_send_cm_rtu);
  1340. static void cm_format_rep_event(struct cm_work *work)
  1341. {
  1342. struct cm_rep_msg *rep_msg;
  1343. struct ib_cm_rep_event_param *param;
  1344. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1345. param = &work->cm_event.param.rep_rcvd;
  1346. param->remote_ca_guid = rep_msg->local_ca_guid;
  1347. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1348. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1349. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1350. param->responder_resources = rep_msg->initiator_depth;
  1351. param->initiator_depth = rep_msg->resp_resources;
  1352. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1353. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1354. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1355. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1356. param->srq = cm_rep_get_srq(rep_msg);
  1357. work->cm_event.private_data = &rep_msg->private_data;
  1358. }
  1359. static void cm_dup_rep_handler(struct cm_work *work)
  1360. {
  1361. struct cm_id_private *cm_id_priv;
  1362. struct cm_rep_msg *rep_msg;
  1363. struct ib_mad_send_buf *msg = NULL;
  1364. unsigned long flags;
  1365. int ret;
  1366. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1367. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1368. rep_msg->local_comm_id);
  1369. if (!cm_id_priv)
  1370. return;
  1371. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1372. if (ret)
  1373. goto deref;
  1374. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1375. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1376. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1377. cm_id_priv->private_data,
  1378. cm_id_priv->private_data_len);
  1379. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1380. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1381. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1382. cm_id_priv->private_data,
  1383. cm_id_priv->private_data_len);
  1384. else
  1385. goto unlock;
  1386. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1387. ret = ib_post_send_mad(msg, NULL);
  1388. if (ret)
  1389. goto free;
  1390. goto deref;
  1391. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1392. free: cm_free_msg(msg);
  1393. deref: cm_deref_id(cm_id_priv);
  1394. }
  1395. static int cm_rep_handler(struct cm_work *work)
  1396. {
  1397. struct cm_id_private *cm_id_priv;
  1398. struct cm_rep_msg *rep_msg;
  1399. unsigned long flags;
  1400. int ret;
  1401. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1402. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1403. if (!cm_id_priv) {
  1404. cm_dup_rep_handler(work);
  1405. return -EINVAL;
  1406. }
  1407. cm_format_rep_event(work);
  1408. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1409. switch (cm_id_priv->id.state) {
  1410. case IB_CM_REQ_SENT:
  1411. case IB_CM_MRA_REQ_RCVD:
  1412. break;
  1413. default:
  1414. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1415. ret = -EINVAL;
  1416. goto error;
  1417. }
  1418. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1419. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1420. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1421. spin_lock(&cm.lock);
  1422. /* Check for duplicate REP. */
  1423. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1424. spin_unlock(&cm.lock);
  1425. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1426. ret = -EINVAL;
  1427. goto error;
  1428. }
  1429. /* Check for a stale connection. */
  1430. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1431. rb_erase(&cm_id_priv->timewait_info->remote_id_node,
  1432. &cm.remote_id_table);
  1433. cm_id_priv->timewait_info->inserted_remote_id = 0;
  1434. spin_unlock(&cm.lock);
  1435. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1436. cm_issue_rej(work->port, work->mad_recv_wc,
  1437. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1438. NULL, 0);
  1439. ret = -EINVAL;
  1440. goto error;
  1441. }
  1442. spin_unlock(&cm.lock);
  1443. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1444. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1445. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1446. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1447. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1448. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1449. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1450. /* todo: handle peer_to_peer */
  1451. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1452. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1453. if (!ret)
  1454. list_add_tail(&work->list, &cm_id_priv->work_list);
  1455. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1456. if (ret)
  1457. cm_process_work(cm_id_priv, work);
  1458. else
  1459. cm_deref_id(cm_id_priv);
  1460. return 0;
  1461. error:
  1462. cm_deref_id(cm_id_priv);
  1463. return ret;
  1464. }
  1465. static int cm_establish_handler(struct cm_work *work)
  1466. {
  1467. struct cm_id_private *cm_id_priv;
  1468. unsigned long flags;
  1469. int ret;
  1470. /* See comment in ib_cm_establish about lookup. */
  1471. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1472. if (!cm_id_priv)
  1473. return -EINVAL;
  1474. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1475. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1476. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1477. goto out;
  1478. }
  1479. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1480. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1481. if (!ret)
  1482. list_add_tail(&work->list, &cm_id_priv->work_list);
  1483. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1484. if (ret)
  1485. cm_process_work(cm_id_priv, work);
  1486. else
  1487. cm_deref_id(cm_id_priv);
  1488. return 0;
  1489. out:
  1490. cm_deref_id(cm_id_priv);
  1491. return -EINVAL;
  1492. }
  1493. static int cm_rtu_handler(struct cm_work *work)
  1494. {
  1495. struct cm_id_private *cm_id_priv;
  1496. struct cm_rtu_msg *rtu_msg;
  1497. unsigned long flags;
  1498. int ret;
  1499. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1500. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1501. rtu_msg->local_comm_id);
  1502. if (!cm_id_priv)
  1503. return -EINVAL;
  1504. work->cm_event.private_data = &rtu_msg->private_data;
  1505. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1506. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1507. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1508. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1509. goto out;
  1510. }
  1511. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1512. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1513. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1514. if (!ret)
  1515. list_add_tail(&work->list, &cm_id_priv->work_list);
  1516. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1517. if (ret)
  1518. cm_process_work(cm_id_priv, work);
  1519. else
  1520. cm_deref_id(cm_id_priv);
  1521. return 0;
  1522. out:
  1523. cm_deref_id(cm_id_priv);
  1524. return -EINVAL;
  1525. }
  1526. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1527. struct cm_id_private *cm_id_priv,
  1528. const void *private_data,
  1529. u8 private_data_len)
  1530. {
  1531. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1532. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1533. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1534. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1535. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1536. if (private_data && private_data_len)
  1537. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1538. }
  1539. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1540. const void *private_data,
  1541. u8 private_data_len)
  1542. {
  1543. struct cm_id_private *cm_id_priv;
  1544. struct ib_mad_send_buf *msg;
  1545. unsigned long flags;
  1546. int ret;
  1547. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1548. return -EINVAL;
  1549. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1550. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1551. if (cm_id->state != IB_CM_ESTABLISHED) {
  1552. ret = -EINVAL;
  1553. goto out;
  1554. }
  1555. ret = cm_alloc_msg(cm_id_priv, &msg);
  1556. if (ret) {
  1557. cm_enter_timewait(cm_id_priv);
  1558. goto out;
  1559. }
  1560. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1561. private_data, private_data_len);
  1562. msg->timeout_ms = cm_id_priv->timeout_ms;
  1563. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1564. ret = ib_post_send_mad(msg, NULL);
  1565. if (ret) {
  1566. cm_enter_timewait(cm_id_priv);
  1567. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1568. cm_free_msg(msg);
  1569. return ret;
  1570. }
  1571. cm_id->state = IB_CM_DREQ_SENT;
  1572. cm_id_priv->msg = msg;
  1573. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1574. return ret;
  1575. }
  1576. EXPORT_SYMBOL(ib_send_cm_dreq);
  1577. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1578. struct cm_id_private *cm_id_priv,
  1579. const void *private_data,
  1580. u8 private_data_len)
  1581. {
  1582. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1583. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1584. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1585. if (private_data && private_data_len)
  1586. memcpy(drep_msg->private_data, private_data, private_data_len);
  1587. }
  1588. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1589. const void *private_data,
  1590. u8 private_data_len)
  1591. {
  1592. struct cm_id_private *cm_id_priv;
  1593. struct ib_mad_send_buf *msg;
  1594. unsigned long flags;
  1595. void *data;
  1596. int ret;
  1597. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1598. return -EINVAL;
  1599. data = cm_copy_private_data(private_data, private_data_len);
  1600. if (IS_ERR(data))
  1601. return PTR_ERR(data);
  1602. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1603. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1604. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1605. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1606. kfree(data);
  1607. return -EINVAL;
  1608. }
  1609. cm_set_private_data(cm_id_priv, data, private_data_len);
  1610. cm_enter_timewait(cm_id_priv);
  1611. ret = cm_alloc_msg(cm_id_priv, &msg);
  1612. if (ret)
  1613. goto out;
  1614. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1615. private_data, private_data_len);
  1616. ret = ib_post_send_mad(msg, NULL);
  1617. if (ret) {
  1618. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1619. cm_free_msg(msg);
  1620. return ret;
  1621. }
  1622. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1623. return ret;
  1624. }
  1625. EXPORT_SYMBOL(ib_send_cm_drep);
  1626. static int cm_dreq_handler(struct cm_work *work)
  1627. {
  1628. struct cm_id_private *cm_id_priv;
  1629. struct cm_dreq_msg *dreq_msg;
  1630. struct ib_mad_send_buf *msg = NULL;
  1631. unsigned long flags;
  1632. int ret;
  1633. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1634. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1635. dreq_msg->local_comm_id);
  1636. if (!cm_id_priv)
  1637. return -EINVAL;
  1638. work->cm_event.private_data = &dreq_msg->private_data;
  1639. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1640. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1641. goto unlock;
  1642. switch (cm_id_priv->id.state) {
  1643. case IB_CM_REP_SENT:
  1644. case IB_CM_DREQ_SENT:
  1645. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1646. break;
  1647. case IB_CM_ESTABLISHED:
  1648. case IB_CM_MRA_REP_RCVD:
  1649. break;
  1650. case IB_CM_TIMEWAIT:
  1651. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1652. goto unlock;
  1653. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1654. cm_id_priv->private_data,
  1655. cm_id_priv->private_data_len);
  1656. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1657. if (ib_post_send_mad(msg, NULL))
  1658. cm_free_msg(msg);
  1659. goto deref;
  1660. default:
  1661. goto unlock;
  1662. }
  1663. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1664. cm_id_priv->tid = dreq_msg->hdr.tid;
  1665. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1666. if (!ret)
  1667. list_add_tail(&work->list, &cm_id_priv->work_list);
  1668. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1669. if (ret)
  1670. cm_process_work(cm_id_priv, work);
  1671. else
  1672. cm_deref_id(cm_id_priv);
  1673. return 0;
  1674. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1675. deref: cm_deref_id(cm_id_priv);
  1676. return -EINVAL;
  1677. }
  1678. static int cm_drep_handler(struct cm_work *work)
  1679. {
  1680. struct cm_id_private *cm_id_priv;
  1681. struct cm_drep_msg *drep_msg;
  1682. unsigned long flags;
  1683. int ret;
  1684. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1685. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1686. drep_msg->local_comm_id);
  1687. if (!cm_id_priv)
  1688. return -EINVAL;
  1689. work->cm_event.private_data = &drep_msg->private_data;
  1690. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1691. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1692. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1693. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1694. goto out;
  1695. }
  1696. cm_enter_timewait(cm_id_priv);
  1697. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1698. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1699. if (!ret)
  1700. list_add_tail(&work->list, &cm_id_priv->work_list);
  1701. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1702. if (ret)
  1703. cm_process_work(cm_id_priv, work);
  1704. else
  1705. cm_deref_id(cm_id_priv);
  1706. return 0;
  1707. out:
  1708. cm_deref_id(cm_id_priv);
  1709. return -EINVAL;
  1710. }
  1711. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1712. enum ib_cm_rej_reason reason,
  1713. void *ari,
  1714. u8 ari_length,
  1715. const void *private_data,
  1716. u8 private_data_len)
  1717. {
  1718. struct cm_id_private *cm_id_priv;
  1719. struct ib_mad_send_buf *msg;
  1720. unsigned long flags;
  1721. int ret;
  1722. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1723. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1724. return -EINVAL;
  1725. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1726. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1727. switch (cm_id->state) {
  1728. case IB_CM_REQ_SENT:
  1729. case IB_CM_MRA_REQ_RCVD:
  1730. case IB_CM_REQ_RCVD:
  1731. case IB_CM_MRA_REQ_SENT:
  1732. case IB_CM_REP_RCVD:
  1733. case IB_CM_MRA_REP_SENT:
  1734. ret = cm_alloc_msg(cm_id_priv, &msg);
  1735. if (!ret)
  1736. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1737. cm_id_priv, reason, ari, ari_length,
  1738. private_data, private_data_len);
  1739. cm_reset_to_idle(cm_id_priv);
  1740. break;
  1741. case IB_CM_REP_SENT:
  1742. case IB_CM_MRA_REP_RCVD:
  1743. ret = cm_alloc_msg(cm_id_priv, &msg);
  1744. if (!ret)
  1745. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1746. cm_id_priv, reason, ari, ari_length,
  1747. private_data, private_data_len);
  1748. cm_enter_timewait(cm_id_priv);
  1749. break;
  1750. default:
  1751. ret = -EINVAL;
  1752. goto out;
  1753. }
  1754. if (ret)
  1755. goto out;
  1756. ret = ib_post_send_mad(msg, NULL);
  1757. if (ret)
  1758. cm_free_msg(msg);
  1759. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1760. return ret;
  1761. }
  1762. EXPORT_SYMBOL(ib_send_cm_rej);
  1763. static void cm_format_rej_event(struct cm_work *work)
  1764. {
  1765. struct cm_rej_msg *rej_msg;
  1766. struct ib_cm_rej_event_param *param;
  1767. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1768. param = &work->cm_event.param.rej_rcvd;
  1769. param->ari = rej_msg->ari;
  1770. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1771. param->reason = __be16_to_cpu(rej_msg->reason);
  1772. work->cm_event.private_data = &rej_msg->private_data;
  1773. }
  1774. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1775. {
  1776. struct cm_timewait_info *timewait_info;
  1777. struct cm_id_private *cm_id_priv;
  1778. unsigned long flags;
  1779. __be32 remote_id;
  1780. remote_id = rej_msg->local_comm_id;
  1781. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1782. spin_lock_irqsave(&cm.lock, flags);
  1783. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1784. remote_id);
  1785. if (!timewait_info) {
  1786. spin_unlock_irqrestore(&cm.lock, flags);
  1787. return NULL;
  1788. }
  1789. cm_id_priv = idr_find(&cm.local_id_table,
  1790. (__force int) timewait_info->work.local_id);
  1791. if (cm_id_priv) {
  1792. if (cm_id_priv->id.remote_id == remote_id)
  1793. atomic_inc(&cm_id_priv->refcount);
  1794. else
  1795. cm_id_priv = NULL;
  1796. }
  1797. spin_unlock_irqrestore(&cm.lock, flags);
  1798. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1799. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1800. else
  1801. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1802. return cm_id_priv;
  1803. }
  1804. static int cm_rej_handler(struct cm_work *work)
  1805. {
  1806. struct cm_id_private *cm_id_priv;
  1807. struct cm_rej_msg *rej_msg;
  1808. unsigned long flags;
  1809. int ret;
  1810. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1811. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1812. if (!cm_id_priv)
  1813. return -EINVAL;
  1814. cm_format_rej_event(work);
  1815. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1816. switch (cm_id_priv->id.state) {
  1817. case IB_CM_REQ_SENT:
  1818. case IB_CM_MRA_REQ_RCVD:
  1819. case IB_CM_REP_SENT:
  1820. case IB_CM_MRA_REP_RCVD:
  1821. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1822. /* fall through */
  1823. case IB_CM_REQ_RCVD:
  1824. case IB_CM_MRA_REQ_SENT:
  1825. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1826. cm_enter_timewait(cm_id_priv);
  1827. else
  1828. cm_reset_to_idle(cm_id_priv);
  1829. break;
  1830. case IB_CM_DREQ_SENT:
  1831. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1832. /* fall through */
  1833. case IB_CM_REP_RCVD:
  1834. case IB_CM_MRA_REP_SENT:
  1835. case IB_CM_ESTABLISHED:
  1836. cm_enter_timewait(cm_id_priv);
  1837. break;
  1838. default:
  1839. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1840. ret = -EINVAL;
  1841. goto out;
  1842. }
  1843. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1844. if (!ret)
  1845. list_add_tail(&work->list, &cm_id_priv->work_list);
  1846. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1847. if (ret)
  1848. cm_process_work(cm_id_priv, work);
  1849. else
  1850. cm_deref_id(cm_id_priv);
  1851. return 0;
  1852. out:
  1853. cm_deref_id(cm_id_priv);
  1854. return -EINVAL;
  1855. }
  1856. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1857. u8 service_timeout,
  1858. const void *private_data,
  1859. u8 private_data_len)
  1860. {
  1861. struct cm_id_private *cm_id_priv;
  1862. struct ib_mad_send_buf *msg;
  1863. void *data;
  1864. unsigned long flags;
  1865. int ret;
  1866. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1867. return -EINVAL;
  1868. data = cm_copy_private_data(private_data, private_data_len);
  1869. if (IS_ERR(data))
  1870. return PTR_ERR(data);
  1871. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1872. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1873. switch(cm_id_priv->id.state) {
  1874. case IB_CM_REQ_RCVD:
  1875. ret = cm_alloc_msg(cm_id_priv, &msg);
  1876. if (ret)
  1877. goto error1;
  1878. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1879. CM_MSG_RESPONSE_REQ, service_timeout,
  1880. private_data, private_data_len);
  1881. ret = ib_post_send_mad(msg, NULL);
  1882. if (ret)
  1883. goto error2;
  1884. cm_id->state = IB_CM_MRA_REQ_SENT;
  1885. break;
  1886. case IB_CM_REP_RCVD:
  1887. ret = cm_alloc_msg(cm_id_priv, &msg);
  1888. if (ret)
  1889. goto error1;
  1890. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1891. CM_MSG_RESPONSE_REP, service_timeout,
  1892. private_data, private_data_len);
  1893. ret = ib_post_send_mad(msg, NULL);
  1894. if (ret)
  1895. goto error2;
  1896. cm_id->state = IB_CM_MRA_REP_SENT;
  1897. break;
  1898. case IB_CM_ESTABLISHED:
  1899. ret = cm_alloc_msg(cm_id_priv, &msg);
  1900. if (ret)
  1901. goto error1;
  1902. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1903. CM_MSG_RESPONSE_OTHER, service_timeout,
  1904. private_data, private_data_len);
  1905. ret = ib_post_send_mad(msg, NULL);
  1906. if (ret)
  1907. goto error2;
  1908. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1909. break;
  1910. default:
  1911. ret = -EINVAL;
  1912. goto error1;
  1913. }
  1914. cm_id_priv->service_timeout = service_timeout;
  1915. cm_set_private_data(cm_id_priv, data, private_data_len);
  1916. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1917. return 0;
  1918. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1919. kfree(data);
  1920. return ret;
  1921. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1922. kfree(data);
  1923. cm_free_msg(msg);
  1924. return ret;
  1925. }
  1926. EXPORT_SYMBOL(ib_send_cm_mra);
  1927. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1928. {
  1929. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1930. case CM_MSG_RESPONSE_REQ:
  1931. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1932. case CM_MSG_RESPONSE_REP:
  1933. case CM_MSG_RESPONSE_OTHER:
  1934. return cm_acquire_id(mra_msg->remote_comm_id,
  1935. mra_msg->local_comm_id);
  1936. default:
  1937. return NULL;
  1938. }
  1939. }
  1940. static int cm_mra_handler(struct cm_work *work)
  1941. {
  1942. struct cm_id_private *cm_id_priv;
  1943. struct cm_mra_msg *mra_msg;
  1944. unsigned long flags;
  1945. int timeout, ret;
  1946. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  1947. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  1948. if (!cm_id_priv)
  1949. return -EINVAL;
  1950. work->cm_event.private_data = &mra_msg->private_data;
  1951. work->cm_event.param.mra_rcvd.service_timeout =
  1952. cm_mra_get_service_timeout(mra_msg);
  1953. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  1954. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  1955. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1956. switch (cm_id_priv->id.state) {
  1957. case IB_CM_REQ_SENT:
  1958. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  1959. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1960. cm_id_priv->msg, timeout))
  1961. goto out;
  1962. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  1963. break;
  1964. case IB_CM_REP_SENT:
  1965. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  1966. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1967. cm_id_priv->msg, timeout))
  1968. goto out;
  1969. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  1970. break;
  1971. case IB_CM_ESTABLISHED:
  1972. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  1973. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  1974. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1975. cm_id_priv->msg, timeout))
  1976. goto out;
  1977. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  1978. break;
  1979. default:
  1980. goto out;
  1981. }
  1982. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  1983. cm_id_priv->id.state;
  1984. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1985. if (!ret)
  1986. list_add_tail(&work->list, &cm_id_priv->work_list);
  1987. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1988. if (ret)
  1989. cm_process_work(cm_id_priv, work);
  1990. else
  1991. cm_deref_id(cm_id_priv);
  1992. return 0;
  1993. out:
  1994. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1995. cm_deref_id(cm_id_priv);
  1996. return -EINVAL;
  1997. }
  1998. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  1999. struct cm_id_private *cm_id_priv,
  2000. struct ib_sa_path_rec *alternate_path,
  2001. const void *private_data,
  2002. u8 private_data_len)
  2003. {
  2004. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  2005. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  2006. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  2007. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2008. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  2009. /* todo: need remote CM response timeout */
  2010. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  2011. lap_msg->alt_local_lid = alternate_path->slid;
  2012. lap_msg->alt_remote_lid = alternate_path->dlid;
  2013. lap_msg->alt_local_gid = alternate_path->sgid;
  2014. lap_msg->alt_remote_gid = alternate_path->dgid;
  2015. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  2016. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  2017. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  2018. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  2019. cm_lap_set_sl(lap_msg, alternate_path->sl);
  2020. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  2021. cm_lap_set_local_ack_timeout(lap_msg,
  2022. min(31, alternate_path->packet_life_time + 1));
  2023. if (private_data && private_data_len)
  2024. memcpy(lap_msg->private_data, private_data, private_data_len);
  2025. }
  2026. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  2027. struct ib_sa_path_rec *alternate_path,
  2028. const void *private_data,
  2029. u8 private_data_len)
  2030. {
  2031. struct cm_id_private *cm_id_priv;
  2032. struct ib_mad_send_buf *msg;
  2033. unsigned long flags;
  2034. int ret;
  2035. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  2036. return -EINVAL;
  2037. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2038. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2039. if (cm_id->state != IB_CM_ESTABLISHED ||
  2040. cm_id->lap_state != IB_CM_LAP_IDLE) {
  2041. ret = -EINVAL;
  2042. goto out;
  2043. }
  2044. ret = cm_alloc_msg(cm_id_priv, &msg);
  2045. if (ret)
  2046. goto out;
  2047. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2048. alternate_path, private_data, private_data_len);
  2049. msg->timeout_ms = cm_id_priv->timeout_ms;
  2050. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2051. ret = ib_post_send_mad(msg, NULL);
  2052. if (ret) {
  2053. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2054. cm_free_msg(msg);
  2055. return ret;
  2056. }
  2057. cm_id->lap_state = IB_CM_LAP_SENT;
  2058. cm_id_priv->msg = msg;
  2059. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2060. return ret;
  2061. }
  2062. EXPORT_SYMBOL(ib_send_cm_lap);
  2063. static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
  2064. struct cm_lap_msg *lap_msg)
  2065. {
  2066. memset(path, 0, sizeof *path);
  2067. path->dgid = lap_msg->alt_local_gid;
  2068. path->sgid = lap_msg->alt_remote_gid;
  2069. path->dlid = lap_msg->alt_local_lid;
  2070. path->slid = lap_msg->alt_remote_lid;
  2071. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2072. path->hop_limit = lap_msg->alt_hop_limit;
  2073. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2074. path->reversible = 1;
  2075. /* pkey is same as in REQ */
  2076. path->sl = cm_lap_get_sl(lap_msg);
  2077. path->mtu_selector = IB_SA_EQ;
  2078. /* mtu is same as in REQ */
  2079. path->rate_selector = IB_SA_EQ;
  2080. path->rate = cm_lap_get_packet_rate(lap_msg);
  2081. path->packet_life_time_selector = IB_SA_EQ;
  2082. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2083. path->packet_life_time -= (path->packet_life_time > 0);
  2084. }
  2085. static int cm_lap_handler(struct cm_work *work)
  2086. {
  2087. struct cm_id_private *cm_id_priv;
  2088. struct cm_lap_msg *lap_msg;
  2089. struct ib_cm_lap_event_param *param;
  2090. struct ib_mad_send_buf *msg = NULL;
  2091. unsigned long flags;
  2092. int ret;
  2093. /* todo: verify LAP request and send reject APR if invalid. */
  2094. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2095. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2096. lap_msg->local_comm_id);
  2097. if (!cm_id_priv)
  2098. return -EINVAL;
  2099. param = &work->cm_event.param.lap_rcvd;
  2100. param->alternate_path = &work->path[0];
  2101. cm_format_path_from_lap(param->alternate_path, lap_msg);
  2102. work->cm_event.private_data = &lap_msg->private_data;
  2103. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2104. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2105. goto unlock;
  2106. switch (cm_id_priv->id.lap_state) {
  2107. case IB_CM_LAP_IDLE:
  2108. break;
  2109. case IB_CM_MRA_LAP_SENT:
  2110. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2111. goto unlock;
  2112. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2113. CM_MSG_RESPONSE_OTHER,
  2114. cm_id_priv->service_timeout,
  2115. cm_id_priv->private_data,
  2116. cm_id_priv->private_data_len);
  2117. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2118. if (ib_post_send_mad(msg, NULL))
  2119. cm_free_msg(msg);
  2120. goto deref;
  2121. default:
  2122. goto unlock;
  2123. }
  2124. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2125. cm_id_priv->tid = lap_msg->hdr.tid;
  2126. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2127. if (!ret)
  2128. list_add_tail(&work->list, &cm_id_priv->work_list);
  2129. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2130. if (ret)
  2131. cm_process_work(cm_id_priv, work);
  2132. else
  2133. cm_deref_id(cm_id_priv);
  2134. return 0;
  2135. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2136. deref: cm_deref_id(cm_id_priv);
  2137. return -EINVAL;
  2138. }
  2139. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2140. struct cm_id_private *cm_id_priv,
  2141. enum ib_cm_apr_status status,
  2142. void *info,
  2143. u8 info_length,
  2144. const void *private_data,
  2145. u8 private_data_len)
  2146. {
  2147. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2148. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2149. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2150. apr_msg->ap_status = (u8) status;
  2151. if (info && info_length) {
  2152. apr_msg->info_length = info_length;
  2153. memcpy(apr_msg->info, info, info_length);
  2154. }
  2155. if (private_data && private_data_len)
  2156. memcpy(apr_msg->private_data, private_data, private_data_len);
  2157. }
  2158. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2159. enum ib_cm_apr_status status,
  2160. void *info,
  2161. u8 info_length,
  2162. const void *private_data,
  2163. u8 private_data_len)
  2164. {
  2165. struct cm_id_private *cm_id_priv;
  2166. struct ib_mad_send_buf *msg;
  2167. unsigned long flags;
  2168. int ret;
  2169. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2170. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2171. return -EINVAL;
  2172. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2173. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2174. if (cm_id->state != IB_CM_ESTABLISHED ||
  2175. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2176. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2177. ret = -EINVAL;
  2178. goto out;
  2179. }
  2180. ret = cm_alloc_msg(cm_id_priv, &msg);
  2181. if (ret)
  2182. goto out;
  2183. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2184. info, info_length, private_data, private_data_len);
  2185. ret = ib_post_send_mad(msg, NULL);
  2186. if (ret) {
  2187. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2188. cm_free_msg(msg);
  2189. return ret;
  2190. }
  2191. cm_id->lap_state = IB_CM_LAP_IDLE;
  2192. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2193. return ret;
  2194. }
  2195. EXPORT_SYMBOL(ib_send_cm_apr);
  2196. static int cm_apr_handler(struct cm_work *work)
  2197. {
  2198. struct cm_id_private *cm_id_priv;
  2199. struct cm_apr_msg *apr_msg;
  2200. unsigned long flags;
  2201. int ret;
  2202. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2203. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2204. apr_msg->local_comm_id);
  2205. if (!cm_id_priv)
  2206. return -EINVAL; /* Unmatched reply. */
  2207. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2208. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2209. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2210. work->cm_event.private_data = &apr_msg->private_data;
  2211. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2212. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2213. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2214. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2215. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2216. goto out;
  2217. }
  2218. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2219. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2220. cm_id_priv->msg = NULL;
  2221. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2222. if (!ret)
  2223. list_add_tail(&work->list, &cm_id_priv->work_list);
  2224. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2225. if (ret)
  2226. cm_process_work(cm_id_priv, work);
  2227. else
  2228. cm_deref_id(cm_id_priv);
  2229. return 0;
  2230. out:
  2231. cm_deref_id(cm_id_priv);
  2232. return -EINVAL;
  2233. }
  2234. static int cm_timewait_handler(struct cm_work *work)
  2235. {
  2236. struct cm_timewait_info *timewait_info;
  2237. struct cm_id_private *cm_id_priv;
  2238. unsigned long flags;
  2239. int ret;
  2240. timewait_info = (struct cm_timewait_info *)work;
  2241. cm_cleanup_timewait(timewait_info);
  2242. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2243. timewait_info->work.remote_id);
  2244. if (!cm_id_priv)
  2245. return -EINVAL;
  2246. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2247. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2248. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2249. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2250. goto out;
  2251. }
  2252. cm_id_priv->id.state = IB_CM_IDLE;
  2253. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2254. if (!ret)
  2255. list_add_tail(&work->list, &cm_id_priv->work_list);
  2256. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2257. if (ret)
  2258. cm_process_work(cm_id_priv, work);
  2259. else
  2260. cm_deref_id(cm_id_priv);
  2261. return 0;
  2262. out:
  2263. cm_deref_id(cm_id_priv);
  2264. return -EINVAL;
  2265. }
  2266. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2267. struct cm_id_private *cm_id_priv,
  2268. struct ib_cm_sidr_req_param *param)
  2269. {
  2270. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2271. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2272. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2273. sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
  2274. sidr_req_msg->service_id = param->service_id;
  2275. if (param->private_data && param->private_data_len)
  2276. memcpy(sidr_req_msg->private_data, param->private_data,
  2277. param->private_data_len);
  2278. }
  2279. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2280. struct ib_cm_sidr_req_param *param)
  2281. {
  2282. struct cm_id_private *cm_id_priv;
  2283. struct ib_mad_send_buf *msg;
  2284. unsigned long flags;
  2285. int ret;
  2286. if (!param->path || (param->private_data &&
  2287. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2288. return -EINVAL;
  2289. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2290. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2291. if (ret)
  2292. goto out;
  2293. cm_id->service_id = param->service_id;
  2294. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2295. cm_id_priv->timeout_ms = param->timeout_ms;
  2296. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2297. ret = cm_alloc_msg(cm_id_priv, &msg);
  2298. if (ret)
  2299. goto out;
  2300. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2301. param);
  2302. msg->timeout_ms = cm_id_priv->timeout_ms;
  2303. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2304. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2305. if (cm_id->state == IB_CM_IDLE)
  2306. ret = ib_post_send_mad(msg, NULL);
  2307. else
  2308. ret = -EINVAL;
  2309. if (ret) {
  2310. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2311. cm_free_msg(msg);
  2312. goto out;
  2313. }
  2314. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2315. cm_id_priv->msg = msg;
  2316. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2317. out:
  2318. return ret;
  2319. }
  2320. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2321. static void cm_format_sidr_req_event(struct cm_work *work,
  2322. struct ib_cm_id *listen_id)
  2323. {
  2324. struct cm_sidr_req_msg *sidr_req_msg;
  2325. struct ib_cm_sidr_req_event_param *param;
  2326. sidr_req_msg = (struct cm_sidr_req_msg *)
  2327. work->mad_recv_wc->recv_buf.mad;
  2328. param = &work->cm_event.param.sidr_req_rcvd;
  2329. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2330. param->listen_id = listen_id;
  2331. param->port = work->port->port_num;
  2332. work->cm_event.private_data = &sidr_req_msg->private_data;
  2333. }
  2334. static int cm_sidr_req_handler(struct cm_work *work)
  2335. {
  2336. struct ib_cm_id *cm_id;
  2337. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2338. struct cm_sidr_req_msg *sidr_req_msg;
  2339. struct ib_wc *wc;
  2340. unsigned long flags;
  2341. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2342. if (IS_ERR(cm_id))
  2343. return PTR_ERR(cm_id);
  2344. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2345. /* Record SGID/SLID and request ID for lookup. */
  2346. sidr_req_msg = (struct cm_sidr_req_msg *)
  2347. work->mad_recv_wc->recv_buf.mad;
  2348. wc = work->mad_recv_wc->wc;
  2349. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2350. cm_id_priv->av.dgid.global.interface_id = 0;
  2351. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2352. work->mad_recv_wc->recv_buf.grh,
  2353. &cm_id_priv->av);
  2354. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2355. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2356. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2357. atomic_inc(&cm_id_priv->work_count);
  2358. spin_lock_irqsave(&cm.lock, flags);
  2359. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2360. if (cur_cm_id_priv) {
  2361. spin_unlock_irqrestore(&cm.lock, flags);
  2362. goto out; /* Duplicate message. */
  2363. }
  2364. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2365. sidr_req_msg->service_id,
  2366. sidr_req_msg->private_data);
  2367. if (!cur_cm_id_priv) {
  2368. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2369. spin_unlock_irqrestore(&cm.lock, flags);
  2370. /* todo: reply with no match */
  2371. goto out; /* No match. */
  2372. }
  2373. atomic_inc(&cur_cm_id_priv->refcount);
  2374. spin_unlock_irqrestore(&cm.lock, flags);
  2375. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2376. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2377. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2378. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2379. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2380. cm_process_work(cm_id_priv, work);
  2381. cm_deref_id(cur_cm_id_priv);
  2382. return 0;
  2383. out:
  2384. ib_destroy_cm_id(&cm_id_priv->id);
  2385. return -EINVAL;
  2386. }
  2387. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2388. struct cm_id_private *cm_id_priv,
  2389. struct ib_cm_sidr_rep_param *param)
  2390. {
  2391. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2392. cm_id_priv->tid);
  2393. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2394. sidr_rep_msg->status = param->status;
  2395. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2396. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2397. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2398. if (param->info && param->info_length)
  2399. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2400. if (param->private_data && param->private_data_len)
  2401. memcpy(sidr_rep_msg->private_data, param->private_data,
  2402. param->private_data_len);
  2403. }
  2404. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2405. struct ib_cm_sidr_rep_param *param)
  2406. {
  2407. struct cm_id_private *cm_id_priv;
  2408. struct ib_mad_send_buf *msg;
  2409. unsigned long flags;
  2410. int ret;
  2411. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2412. (param->private_data &&
  2413. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2414. return -EINVAL;
  2415. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2416. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2417. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2418. ret = -EINVAL;
  2419. goto error;
  2420. }
  2421. ret = cm_alloc_msg(cm_id_priv, &msg);
  2422. if (ret)
  2423. goto error;
  2424. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2425. param);
  2426. ret = ib_post_send_mad(msg, NULL);
  2427. if (ret) {
  2428. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2429. cm_free_msg(msg);
  2430. return ret;
  2431. }
  2432. cm_id->state = IB_CM_IDLE;
  2433. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2434. spin_lock_irqsave(&cm.lock, flags);
  2435. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2436. spin_unlock_irqrestore(&cm.lock, flags);
  2437. return 0;
  2438. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2439. return ret;
  2440. }
  2441. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2442. static void cm_format_sidr_rep_event(struct cm_work *work)
  2443. {
  2444. struct cm_sidr_rep_msg *sidr_rep_msg;
  2445. struct ib_cm_sidr_rep_event_param *param;
  2446. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2447. work->mad_recv_wc->recv_buf.mad;
  2448. param = &work->cm_event.param.sidr_rep_rcvd;
  2449. param->status = sidr_rep_msg->status;
  2450. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2451. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2452. param->info = &sidr_rep_msg->info;
  2453. param->info_len = sidr_rep_msg->info_length;
  2454. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2455. }
  2456. static int cm_sidr_rep_handler(struct cm_work *work)
  2457. {
  2458. struct cm_sidr_rep_msg *sidr_rep_msg;
  2459. struct cm_id_private *cm_id_priv;
  2460. unsigned long flags;
  2461. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2462. work->mad_recv_wc->recv_buf.mad;
  2463. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2464. if (!cm_id_priv)
  2465. return -EINVAL; /* Unmatched reply. */
  2466. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2467. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2468. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2469. goto out;
  2470. }
  2471. cm_id_priv->id.state = IB_CM_IDLE;
  2472. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2473. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2474. cm_format_sidr_rep_event(work);
  2475. cm_process_work(cm_id_priv, work);
  2476. return 0;
  2477. out:
  2478. cm_deref_id(cm_id_priv);
  2479. return -EINVAL;
  2480. }
  2481. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2482. enum ib_wc_status wc_status)
  2483. {
  2484. struct cm_id_private *cm_id_priv;
  2485. struct ib_cm_event cm_event;
  2486. enum ib_cm_state state;
  2487. unsigned long flags;
  2488. int ret;
  2489. memset(&cm_event, 0, sizeof cm_event);
  2490. cm_id_priv = msg->context[0];
  2491. /* Discard old sends or ones without a response. */
  2492. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2493. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2494. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2495. goto discard;
  2496. switch (state) {
  2497. case IB_CM_REQ_SENT:
  2498. case IB_CM_MRA_REQ_RCVD:
  2499. cm_reset_to_idle(cm_id_priv);
  2500. cm_event.event = IB_CM_REQ_ERROR;
  2501. break;
  2502. case IB_CM_REP_SENT:
  2503. case IB_CM_MRA_REP_RCVD:
  2504. cm_reset_to_idle(cm_id_priv);
  2505. cm_event.event = IB_CM_REP_ERROR;
  2506. break;
  2507. case IB_CM_DREQ_SENT:
  2508. cm_enter_timewait(cm_id_priv);
  2509. cm_event.event = IB_CM_DREQ_ERROR;
  2510. break;
  2511. case IB_CM_SIDR_REQ_SENT:
  2512. cm_id_priv->id.state = IB_CM_IDLE;
  2513. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2514. break;
  2515. default:
  2516. goto discard;
  2517. }
  2518. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2519. cm_event.param.send_status = wc_status;
  2520. /* No other events can occur on the cm_id at this point. */
  2521. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2522. cm_free_msg(msg);
  2523. if (ret)
  2524. ib_destroy_cm_id(&cm_id_priv->id);
  2525. return;
  2526. discard:
  2527. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2528. cm_free_msg(msg);
  2529. }
  2530. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2531. struct ib_mad_send_wc *mad_send_wc)
  2532. {
  2533. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  2534. switch (mad_send_wc->status) {
  2535. case IB_WC_SUCCESS:
  2536. case IB_WC_WR_FLUSH_ERR:
  2537. cm_free_msg(msg);
  2538. break;
  2539. default:
  2540. if (msg->context[0] && msg->context[1])
  2541. cm_process_send_error(msg, mad_send_wc->status);
  2542. else
  2543. cm_free_msg(msg);
  2544. break;
  2545. }
  2546. }
  2547. static void cm_work_handler(void *data)
  2548. {
  2549. struct cm_work *work = data;
  2550. int ret;
  2551. switch (work->cm_event.event) {
  2552. case IB_CM_REQ_RECEIVED:
  2553. ret = cm_req_handler(work);
  2554. break;
  2555. case IB_CM_MRA_RECEIVED:
  2556. ret = cm_mra_handler(work);
  2557. break;
  2558. case IB_CM_REJ_RECEIVED:
  2559. ret = cm_rej_handler(work);
  2560. break;
  2561. case IB_CM_REP_RECEIVED:
  2562. ret = cm_rep_handler(work);
  2563. break;
  2564. case IB_CM_RTU_RECEIVED:
  2565. ret = cm_rtu_handler(work);
  2566. break;
  2567. case IB_CM_USER_ESTABLISHED:
  2568. ret = cm_establish_handler(work);
  2569. break;
  2570. case IB_CM_DREQ_RECEIVED:
  2571. ret = cm_dreq_handler(work);
  2572. break;
  2573. case IB_CM_DREP_RECEIVED:
  2574. ret = cm_drep_handler(work);
  2575. break;
  2576. case IB_CM_SIDR_REQ_RECEIVED:
  2577. ret = cm_sidr_req_handler(work);
  2578. break;
  2579. case IB_CM_SIDR_REP_RECEIVED:
  2580. ret = cm_sidr_rep_handler(work);
  2581. break;
  2582. case IB_CM_LAP_RECEIVED:
  2583. ret = cm_lap_handler(work);
  2584. break;
  2585. case IB_CM_APR_RECEIVED:
  2586. ret = cm_apr_handler(work);
  2587. break;
  2588. case IB_CM_TIMEWAIT_EXIT:
  2589. ret = cm_timewait_handler(work);
  2590. break;
  2591. default:
  2592. ret = -EINVAL;
  2593. break;
  2594. }
  2595. if (ret)
  2596. cm_free_work(work);
  2597. }
  2598. int ib_cm_establish(struct ib_cm_id *cm_id)
  2599. {
  2600. struct cm_id_private *cm_id_priv;
  2601. struct cm_work *work;
  2602. unsigned long flags;
  2603. int ret = 0;
  2604. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2605. if (!work)
  2606. return -ENOMEM;
  2607. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2608. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2609. switch (cm_id->state)
  2610. {
  2611. case IB_CM_REP_SENT:
  2612. case IB_CM_MRA_REP_RCVD:
  2613. cm_id->state = IB_CM_ESTABLISHED;
  2614. break;
  2615. case IB_CM_ESTABLISHED:
  2616. ret = -EISCONN;
  2617. break;
  2618. default:
  2619. ret = -EINVAL;
  2620. break;
  2621. }
  2622. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2623. if (ret) {
  2624. kfree(work);
  2625. goto out;
  2626. }
  2627. /*
  2628. * The CM worker thread may try to destroy the cm_id before it
  2629. * can execute this work item. To prevent potential deadlock,
  2630. * we need to find the cm_id once we're in the context of the
  2631. * worker thread, rather than holding a reference on it.
  2632. */
  2633. INIT_WORK(&work->work, cm_work_handler, work);
  2634. work->local_id = cm_id->local_id;
  2635. work->remote_id = cm_id->remote_id;
  2636. work->mad_recv_wc = NULL;
  2637. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2638. queue_work(cm.wq, &work->work);
  2639. out:
  2640. return ret;
  2641. }
  2642. EXPORT_SYMBOL(ib_cm_establish);
  2643. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2644. struct ib_mad_recv_wc *mad_recv_wc)
  2645. {
  2646. struct cm_work *work;
  2647. enum ib_cm_event_type event;
  2648. int paths = 0;
  2649. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2650. case CM_REQ_ATTR_ID:
  2651. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2652. alt_local_lid != 0);
  2653. event = IB_CM_REQ_RECEIVED;
  2654. break;
  2655. case CM_MRA_ATTR_ID:
  2656. event = IB_CM_MRA_RECEIVED;
  2657. break;
  2658. case CM_REJ_ATTR_ID:
  2659. event = IB_CM_REJ_RECEIVED;
  2660. break;
  2661. case CM_REP_ATTR_ID:
  2662. event = IB_CM_REP_RECEIVED;
  2663. break;
  2664. case CM_RTU_ATTR_ID:
  2665. event = IB_CM_RTU_RECEIVED;
  2666. break;
  2667. case CM_DREQ_ATTR_ID:
  2668. event = IB_CM_DREQ_RECEIVED;
  2669. break;
  2670. case CM_DREP_ATTR_ID:
  2671. event = IB_CM_DREP_RECEIVED;
  2672. break;
  2673. case CM_SIDR_REQ_ATTR_ID:
  2674. event = IB_CM_SIDR_REQ_RECEIVED;
  2675. break;
  2676. case CM_SIDR_REP_ATTR_ID:
  2677. event = IB_CM_SIDR_REP_RECEIVED;
  2678. break;
  2679. case CM_LAP_ATTR_ID:
  2680. paths = 1;
  2681. event = IB_CM_LAP_RECEIVED;
  2682. break;
  2683. case CM_APR_ATTR_ID:
  2684. event = IB_CM_APR_RECEIVED;
  2685. break;
  2686. default:
  2687. ib_free_recv_mad(mad_recv_wc);
  2688. return;
  2689. }
  2690. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2691. GFP_KERNEL);
  2692. if (!work) {
  2693. ib_free_recv_mad(mad_recv_wc);
  2694. return;
  2695. }
  2696. INIT_WORK(&work->work, cm_work_handler, work);
  2697. work->cm_event.event = event;
  2698. work->mad_recv_wc = mad_recv_wc;
  2699. work->port = (struct cm_port *)mad_agent->context;
  2700. queue_work(cm.wq, &work->work);
  2701. }
  2702. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2703. struct ib_qp_attr *qp_attr,
  2704. int *qp_attr_mask)
  2705. {
  2706. unsigned long flags;
  2707. int ret;
  2708. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2709. switch (cm_id_priv->id.state) {
  2710. case IB_CM_REQ_SENT:
  2711. case IB_CM_MRA_REQ_RCVD:
  2712. case IB_CM_REQ_RCVD:
  2713. case IB_CM_MRA_REQ_SENT:
  2714. case IB_CM_REP_RCVD:
  2715. case IB_CM_MRA_REP_SENT:
  2716. case IB_CM_REP_SENT:
  2717. case IB_CM_MRA_REP_RCVD:
  2718. case IB_CM_ESTABLISHED:
  2719. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2720. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2721. qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
  2722. IB_ACCESS_REMOTE_WRITE;
  2723. if (cm_id_priv->responder_resources)
  2724. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
  2725. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2726. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2727. ret = 0;
  2728. break;
  2729. default:
  2730. ret = -EINVAL;
  2731. break;
  2732. }
  2733. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2734. return ret;
  2735. }
  2736. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2737. struct ib_qp_attr *qp_attr,
  2738. int *qp_attr_mask)
  2739. {
  2740. unsigned long flags;
  2741. int ret;
  2742. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2743. switch (cm_id_priv->id.state) {
  2744. case IB_CM_REQ_RCVD:
  2745. case IB_CM_MRA_REQ_SENT:
  2746. case IB_CM_REP_RCVD:
  2747. case IB_CM_MRA_REP_SENT:
  2748. case IB_CM_REP_SENT:
  2749. case IB_CM_MRA_REP_RCVD:
  2750. case IB_CM_ESTABLISHED:
  2751. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2752. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  2753. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2754. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2755. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2756. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2757. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2758. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  2759. IB_QP_MIN_RNR_TIMER;
  2760. qp_attr->max_dest_rd_atomic =
  2761. cm_id_priv->responder_resources;
  2762. qp_attr->min_rnr_timer = 0;
  2763. }
  2764. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2765. *qp_attr_mask |= IB_QP_ALT_PATH;
  2766. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2767. }
  2768. ret = 0;
  2769. break;
  2770. default:
  2771. ret = -EINVAL;
  2772. break;
  2773. }
  2774. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2775. return ret;
  2776. }
  2777. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2778. struct ib_qp_attr *qp_attr,
  2779. int *qp_attr_mask)
  2780. {
  2781. unsigned long flags;
  2782. int ret;
  2783. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2784. switch (cm_id_priv->id.state) {
  2785. case IB_CM_REP_RCVD:
  2786. case IB_CM_MRA_REP_SENT:
  2787. case IB_CM_REP_SENT:
  2788. case IB_CM_MRA_REP_RCVD:
  2789. case IB_CM_ESTABLISHED:
  2790. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  2791. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2792. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2793. *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2794. IB_QP_RNR_RETRY |
  2795. IB_QP_MAX_QP_RD_ATOMIC;
  2796. qp_attr->timeout = cm_id_priv->local_ack_timeout;
  2797. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2798. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2799. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  2800. }
  2801. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2802. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2803. qp_attr->path_mig_state = IB_MIG_REARM;
  2804. }
  2805. ret = 0;
  2806. break;
  2807. default:
  2808. ret = -EINVAL;
  2809. break;
  2810. }
  2811. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2812. return ret;
  2813. }
  2814. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2815. struct ib_qp_attr *qp_attr,
  2816. int *qp_attr_mask)
  2817. {
  2818. struct cm_id_private *cm_id_priv;
  2819. int ret;
  2820. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2821. switch (qp_attr->qp_state) {
  2822. case IB_QPS_INIT:
  2823. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2824. break;
  2825. case IB_QPS_RTR:
  2826. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2827. break;
  2828. case IB_QPS_RTS:
  2829. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2830. break;
  2831. default:
  2832. ret = -EINVAL;
  2833. break;
  2834. }
  2835. return ret;
  2836. }
  2837. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2838. static void cm_add_one(struct ib_device *device)
  2839. {
  2840. struct cm_device *cm_dev;
  2841. struct cm_port *port;
  2842. struct ib_mad_reg_req reg_req = {
  2843. .mgmt_class = IB_MGMT_CLASS_CM,
  2844. .mgmt_class_version = IB_CM_CLASS_VERSION
  2845. };
  2846. struct ib_port_modify port_modify = {
  2847. .set_port_cap_mask = IB_PORT_CM_SUP
  2848. };
  2849. unsigned long flags;
  2850. int ret;
  2851. u8 i;
  2852. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2853. device->phys_port_cnt, GFP_KERNEL);
  2854. if (!cm_dev)
  2855. return;
  2856. cm_dev->device = device;
  2857. cm_dev->ca_guid = device->node_guid;
  2858. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2859. for (i = 1; i <= device->phys_port_cnt; i++) {
  2860. port = &cm_dev->port[i-1];
  2861. port->cm_dev = cm_dev;
  2862. port->port_num = i;
  2863. port->mad_agent = ib_register_mad_agent(device, i,
  2864. IB_QPT_GSI,
  2865. &reg_req,
  2866. 0,
  2867. cm_send_handler,
  2868. cm_recv_handler,
  2869. port);
  2870. if (IS_ERR(port->mad_agent))
  2871. goto error1;
  2872. ret = ib_modify_port(device, i, 0, &port_modify);
  2873. if (ret)
  2874. goto error2;
  2875. }
  2876. ib_set_client_data(device, &cm_client, cm_dev);
  2877. write_lock_irqsave(&cm.device_lock, flags);
  2878. list_add_tail(&cm_dev->list, &cm.device_list);
  2879. write_unlock_irqrestore(&cm.device_lock, flags);
  2880. return;
  2881. error2:
  2882. ib_unregister_mad_agent(port->mad_agent);
  2883. error1:
  2884. port_modify.set_port_cap_mask = 0;
  2885. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  2886. while (--i) {
  2887. port = &cm_dev->port[i-1];
  2888. ib_modify_port(device, port->port_num, 0, &port_modify);
  2889. ib_unregister_mad_agent(port->mad_agent);
  2890. }
  2891. kfree(cm_dev);
  2892. }
  2893. static void cm_remove_one(struct ib_device *device)
  2894. {
  2895. struct cm_device *cm_dev;
  2896. struct cm_port *port;
  2897. struct ib_port_modify port_modify = {
  2898. .clr_port_cap_mask = IB_PORT_CM_SUP
  2899. };
  2900. unsigned long flags;
  2901. int i;
  2902. cm_dev = ib_get_client_data(device, &cm_client);
  2903. if (!cm_dev)
  2904. return;
  2905. write_lock_irqsave(&cm.device_lock, flags);
  2906. list_del(&cm_dev->list);
  2907. write_unlock_irqrestore(&cm.device_lock, flags);
  2908. for (i = 1; i <= device->phys_port_cnt; i++) {
  2909. port = &cm_dev->port[i-1];
  2910. ib_modify_port(device, port->port_num, 0, &port_modify);
  2911. ib_unregister_mad_agent(port->mad_agent);
  2912. }
  2913. kfree(cm_dev);
  2914. }
  2915. static int __init ib_cm_init(void)
  2916. {
  2917. int ret;
  2918. memset(&cm, 0, sizeof cm);
  2919. INIT_LIST_HEAD(&cm.device_list);
  2920. rwlock_init(&cm.device_lock);
  2921. spin_lock_init(&cm.lock);
  2922. cm.listen_service_table = RB_ROOT;
  2923. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  2924. cm.remote_id_table = RB_ROOT;
  2925. cm.remote_qp_table = RB_ROOT;
  2926. cm.remote_sidr_table = RB_ROOT;
  2927. idr_init(&cm.local_id_table);
  2928. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  2929. cm.wq = create_workqueue("ib_cm");
  2930. if (!cm.wq)
  2931. return -ENOMEM;
  2932. ret = ib_register_client(&cm_client);
  2933. if (ret)
  2934. goto error;
  2935. return 0;
  2936. error:
  2937. destroy_workqueue(cm.wq);
  2938. return ret;
  2939. }
  2940. static void __exit ib_cm_cleanup(void)
  2941. {
  2942. destroy_workqueue(cm.wq);
  2943. ib_unregister_client(&cm_client);
  2944. idr_destroy(&cm.local_id_table);
  2945. }
  2946. module_init(ib_cm_init);
  2947. module_exit(ib_cm_cleanup);