cm.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345
  1. /*
  2. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
  36. */
  37. #include <linux/dma-mapping.h>
  38. #include <linux/err.h>
  39. #include <linux/idr.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/pci.h>
  42. #include <linux/rbtree.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/workqueue.h>
  45. #include <rdma/ib_cache.h>
  46. #include <rdma/ib_cm.h>
  47. #include "cm_msgs.h"
  48. MODULE_AUTHOR("Sean Hefty");
  49. MODULE_DESCRIPTION("InfiniBand CM");
  50. MODULE_LICENSE("Dual BSD/GPL");
  51. static void cm_add_one(struct ib_device *device);
  52. static void cm_remove_one(struct ib_device *device);
  53. static struct ib_client cm_client = {
  54. .name = "cm",
  55. .add = cm_add_one,
  56. .remove = cm_remove_one
  57. };
  58. static struct ib_cm {
  59. spinlock_t lock;
  60. struct list_head device_list;
  61. rwlock_t device_lock;
  62. struct rb_root listen_service_table;
  63. u64 listen_service_id;
  64. /* struct rb_root peer_service_table; todo: fix peer to peer */
  65. struct rb_root remote_qp_table;
  66. struct rb_root remote_id_table;
  67. struct rb_root remote_sidr_table;
  68. struct idr local_id_table;
  69. struct workqueue_struct *wq;
  70. } cm;
  71. struct cm_port {
  72. struct cm_device *cm_dev;
  73. struct ib_mad_agent *mad_agent;
  74. u8 port_num;
  75. };
  76. struct cm_device {
  77. struct list_head list;
  78. struct ib_device *device;
  79. __be64 ca_guid;
  80. struct cm_port port[0];
  81. };
  82. struct cm_av {
  83. struct cm_port *port;
  84. union ib_gid dgid;
  85. struct ib_ah_attr ah_attr;
  86. u16 pkey_index;
  87. u8 packet_life_time;
  88. };
  89. struct cm_work {
  90. struct work_struct work;
  91. struct list_head list;
  92. struct cm_port *port;
  93. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  94. __be32 local_id; /* Established / timewait */
  95. __be32 remote_id;
  96. struct ib_cm_event cm_event;
  97. struct ib_sa_path_rec path[0];
  98. };
  99. struct cm_timewait_info {
  100. struct cm_work work; /* Must be first. */
  101. struct rb_node remote_qp_node;
  102. struct rb_node remote_id_node;
  103. __be64 remote_ca_guid;
  104. __be32 remote_qpn;
  105. u8 inserted_remote_qp;
  106. u8 inserted_remote_id;
  107. };
  108. struct cm_id_private {
  109. struct ib_cm_id id;
  110. struct rb_node service_node;
  111. struct rb_node sidr_id_node;
  112. spinlock_t lock;
  113. wait_queue_head_t wait;
  114. atomic_t refcount;
  115. struct ib_mad_send_buf *msg;
  116. struct cm_timewait_info *timewait_info;
  117. /* todo: use alternate port on send failure */
  118. struct cm_av av;
  119. struct cm_av alt_av;
  120. void *private_data;
  121. __be64 tid;
  122. __be32 local_qpn;
  123. __be32 remote_qpn;
  124. __be32 sq_psn;
  125. __be32 rq_psn;
  126. int timeout_ms;
  127. enum ib_mtu path_mtu;
  128. u8 private_data_len;
  129. u8 max_cm_retries;
  130. u8 peer_to_peer;
  131. u8 responder_resources;
  132. u8 initiator_depth;
  133. u8 local_ack_timeout;
  134. u8 retry_count;
  135. u8 rnr_retry_count;
  136. u8 service_timeout;
  137. struct list_head work_list;
  138. atomic_t work_count;
  139. };
  140. static void cm_work_handler(void *data);
  141. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  142. {
  143. if (atomic_dec_and_test(&cm_id_priv->refcount))
  144. wake_up(&cm_id_priv->wait);
  145. }
  146. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  147. struct ib_mad_send_buf **msg)
  148. {
  149. struct ib_mad_agent *mad_agent;
  150. struct ib_mad_send_buf *m;
  151. struct ib_ah *ah;
  152. mad_agent = cm_id_priv->av.port->mad_agent;
  153. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  154. if (IS_ERR(ah))
  155. return PTR_ERR(ah);
  156. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  157. cm_id_priv->av.pkey_index,
  158. ah, 0, sizeof(struct ib_mad_hdr),
  159. sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
  160. GFP_ATOMIC);
  161. if (IS_ERR(m)) {
  162. ib_destroy_ah(ah);
  163. return PTR_ERR(m);
  164. }
  165. /* Timeout set by caller if response is expected. */
  166. m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
  167. atomic_inc(&cm_id_priv->refcount);
  168. m->context[0] = cm_id_priv;
  169. *msg = m;
  170. return 0;
  171. }
  172. static int cm_alloc_response_msg(struct cm_port *port,
  173. struct ib_mad_recv_wc *mad_recv_wc,
  174. struct ib_mad_send_buf **msg)
  175. {
  176. struct ib_mad_send_buf *m;
  177. struct ib_ah *ah;
  178. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  179. mad_recv_wc->recv_buf.grh, port->port_num);
  180. if (IS_ERR(ah))
  181. return PTR_ERR(ah);
  182. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  183. ah, 0, sizeof(struct ib_mad_hdr),
  184. sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
  185. GFP_ATOMIC);
  186. if (IS_ERR(m)) {
  187. ib_destroy_ah(ah);
  188. return PTR_ERR(m);
  189. }
  190. *msg = m;
  191. return 0;
  192. }
  193. static void cm_free_msg(struct ib_mad_send_buf *msg)
  194. {
  195. ib_destroy_ah(msg->send_wr.wr.ud.ah);
  196. if (msg->context[0])
  197. cm_deref_id(msg->context[0]);
  198. ib_free_send_mad(msg);
  199. }
  200. static void * cm_copy_private_data(const void *private_data,
  201. u8 private_data_len)
  202. {
  203. void *data;
  204. if (!private_data || !private_data_len)
  205. return NULL;
  206. data = kmalloc(private_data_len, GFP_KERNEL);
  207. if (!data)
  208. return ERR_PTR(-ENOMEM);
  209. memcpy(data, private_data, private_data_len);
  210. return data;
  211. }
  212. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  213. void *private_data, u8 private_data_len)
  214. {
  215. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  216. kfree(cm_id_priv->private_data);
  217. cm_id_priv->private_data = private_data;
  218. cm_id_priv->private_data_len = private_data_len;
  219. }
  220. static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
  221. u16 dlid, u8 sl, u16 src_path_bits)
  222. {
  223. memset(ah_attr, 0, sizeof ah_attr);
  224. ah_attr->dlid = dlid;
  225. ah_attr->sl = sl;
  226. ah_attr->src_path_bits = src_path_bits;
  227. ah_attr->port_num = port_num;
  228. }
  229. static void cm_init_av_for_response(struct cm_port *port,
  230. struct ib_wc *wc, struct cm_av *av)
  231. {
  232. av->port = port;
  233. av->pkey_index = wc->pkey_index;
  234. cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
  235. wc->sl, wc->dlid_path_bits);
  236. }
  237. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  238. {
  239. struct cm_device *cm_dev;
  240. struct cm_port *port = NULL;
  241. unsigned long flags;
  242. int ret;
  243. u8 p;
  244. read_lock_irqsave(&cm.device_lock, flags);
  245. list_for_each_entry(cm_dev, &cm.device_list, list) {
  246. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  247. &p, NULL)) {
  248. port = &cm_dev->port[p-1];
  249. break;
  250. }
  251. }
  252. read_unlock_irqrestore(&cm.device_lock, flags);
  253. if (!port)
  254. return -EINVAL;
  255. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  256. be16_to_cpu(path->pkey), &av->pkey_index);
  257. if (ret)
  258. return ret;
  259. av->port = port;
  260. cm_set_ah_attr(&av->ah_attr, av->port->port_num,
  261. be16_to_cpu(path->dlid), path->sl,
  262. be16_to_cpu(path->slid) & 0x7F);
  263. av->packet_life_time = path->packet_life_time;
  264. return 0;
  265. }
  266. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  267. {
  268. unsigned long flags;
  269. int ret;
  270. do {
  271. spin_lock_irqsave(&cm.lock, flags);
  272. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
  273. (__force int *) &cm_id_priv->id.local_id);
  274. spin_unlock_irqrestore(&cm.lock, flags);
  275. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  276. return ret;
  277. }
  278. static void cm_free_id(__be32 local_id)
  279. {
  280. unsigned long flags;
  281. spin_lock_irqsave(&cm.lock, flags);
  282. idr_remove(&cm.local_id_table, (__force int) local_id);
  283. spin_unlock_irqrestore(&cm.lock, flags);
  284. }
  285. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  286. {
  287. struct cm_id_private *cm_id_priv;
  288. cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
  289. if (cm_id_priv) {
  290. if (cm_id_priv->id.remote_id == remote_id)
  291. atomic_inc(&cm_id_priv->refcount);
  292. else
  293. cm_id_priv = NULL;
  294. }
  295. return cm_id_priv;
  296. }
  297. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  298. {
  299. struct cm_id_private *cm_id_priv;
  300. unsigned long flags;
  301. spin_lock_irqsave(&cm.lock, flags);
  302. cm_id_priv = cm_get_id(local_id, remote_id);
  303. spin_unlock_irqrestore(&cm.lock, flags);
  304. return cm_id_priv;
  305. }
  306. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  307. {
  308. struct rb_node **link = &cm.listen_service_table.rb_node;
  309. struct rb_node *parent = NULL;
  310. struct cm_id_private *cur_cm_id_priv;
  311. __be64 service_id = cm_id_priv->id.service_id;
  312. __be64 service_mask = cm_id_priv->id.service_mask;
  313. while (*link) {
  314. parent = *link;
  315. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  316. service_node);
  317. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  318. (service_mask & cur_cm_id_priv->id.service_id) &&
  319. (cm_id_priv->id.device == cur_cm_id_priv->id.device))
  320. return cur_cm_id_priv;
  321. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  322. link = &(*link)->rb_left;
  323. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  324. link = &(*link)->rb_right;
  325. else if (service_id < cur_cm_id_priv->id.service_id)
  326. link = &(*link)->rb_left;
  327. else
  328. link = &(*link)->rb_right;
  329. }
  330. rb_link_node(&cm_id_priv->service_node, parent, link);
  331. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  332. return NULL;
  333. }
  334. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  335. __be64 service_id)
  336. {
  337. struct rb_node *node = cm.listen_service_table.rb_node;
  338. struct cm_id_private *cm_id_priv;
  339. while (node) {
  340. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  341. if ((cm_id_priv->id.service_mask & service_id) ==
  342. cm_id_priv->id.service_id &&
  343. (cm_id_priv->id.device == device))
  344. return cm_id_priv;
  345. if (device < cm_id_priv->id.device)
  346. node = node->rb_left;
  347. else if (device > cm_id_priv->id.device)
  348. node = node->rb_right;
  349. else if (service_id < cm_id_priv->id.service_id)
  350. node = node->rb_left;
  351. else
  352. node = node->rb_right;
  353. }
  354. return NULL;
  355. }
  356. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  357. *timewait_info)
  358. {
  359. struct rb_node **link = &cm.remote_id_table.rb_node;
  360. struct rb_node *parent = NULL;
  361. struct cm_timewait_info *cur_timewait_info;
  362. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  363. __be32 remote_id = timewait_info->work.remote_id;
  364. while (*link) {
  365. parent = *link;
  366. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  367. remote_id_node);
  368. if (remote_id < cur_timewait_info->work.remote_id)
  369. link = &(*link)->rb_left;
  370. else if (remote_id > cur_timewait_info->work.remote_id)
  371. link = &(*link)->rb_right;
  372. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  373. link = &(*link)->rb_left;
  374. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  375. link = &(*link)->rb_right;
  376. else
  377. return cur_timewait_info;
  378. }
  379. timewait_info->inserted_remote_id = 1;
  380. rb_link_node(&timewait_info->remote_id_node, parent, link);
  381. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  382. return NULL;
  383. }
  384. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  385. __be32 remote_id)
  386. {
  387. struct rb_node *node = cm.remote_id_table.rb_node;
  388. struct cm_timewait_info *timewait_info;
  389. while (node) {
  390. timewait_info = rb_entry(node, struct cm_timewait_info,
  391. remote_id_node);
  392. if (remote_id < timewait_info->work.remote_id)
  393. node = node->rb_left;
  394. else if (remote_id > timewait_info->work.remote_id)
  395. node = node->rb_right;
  396. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  397. node = node->rb_left;
  398. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  399. node = node->rb_right;
  400. else
  401. return timewait_info;
  402. }
  403. return NULL;
  404. }
  405. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  406. *timewait_info)
  407. {
  408. struct rb_node **link = &cm.remote_qp_table.rb_node;
  409. struct rb_node *parent = NULL;
  410. struct cm_timewait_info *cur_timewait_info;
  411. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  412. __be32 remote_qpn = timewait_info->remote_qpn;
  413. while (*link) {
  414. parent = *link;
  415. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  416. remote_qp_node);
  417. if (remote_qpn < cur_timewait_info->remote_qpn)
  418. link = &(*link)->rb_left;
  419. else if (remote_qpn > cur_timewait_info->remote_qpn)
  420. link = &(*link)->rb_right;
  421. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  422. link = &(*link)->rb_left;
  423. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  424. link = &(*link)->rb_right;
  425. else
  426. return cur_timewait_info;
  427. }
  428. timewait_info->inserted_remote_qp = 1;
  429. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  430. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  431. return NULL;
  432. }
  433. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  434. *cm_id_priv)
  435. {
  436. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  437. struct rb_node *parent = NULL;
  438. struct cm_id_private *cur_cm_id_priv;
  439. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  440. __be32 remote_id = cm_id_priv->id.remote_id;
  441. while (*link) {
  442. parent = *link;
  443. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  444. sidr_id_node);
  445. if (remote_id < cur_cm_id_priv->id.remote_id)
  446. link = &(*link)->rb_left;
  447. else if (remote_id > cur_cm_id_priv->id.remote_id)
  448. link = &(*link)->rb_right;
  449. else {
  450. int cmp;
  451. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  452. sizeof *port_gid);
  453. if (cmp < 0)
  454. link = &(*link)->rb_left;
  455. else if (cmp > 0)
  456. link = &(*link)->rb_right;
  457. else
  458. return cur_cm_id_priv;
  459. }
  460. }
  461. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  462. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  463. return NULL;
  464. }
  465. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  466. enum ib_cm_sidr_status status)
  467. {
  468. struct ib_cm_sidr_rep_param param;
  469. memset(&param, 0, sizeof param);
  470. param.status = status;
  471. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  472. }
  473. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  474. ib_cm_handler cm_handler,
  475. void *context)
  476. {
  477. struct cm_id_private *cm_id_priv;
  478. int ret;
  479. cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
  480. if (!cm_id_priv)
  481. return ERR_PTR(-ENOMEM);
  482. memset(cm_id_priv, 0, sizeof *cm_id_priv);
  483. cm_id_priv->id.state = IB_CM_IDLE;
  484. cm_id_priv->id.device = device;
  485. cm_id_priv->id.cm_handler = cm_handler;
  486. cm_id_priv->id.context = context;
  487. cm_id_priv->id.remote_cm_qpn = 1;
  488. ret = cm_alloc_id(cm_id_priv);
  489. if (ret)
  490. goto error;
  491. spin_lock_init(&cm_id_priv->lock);
  492. init_waitqueue_head(&cm_id_priv->wait);
  493. INIT_LIST_HEAD(&cm_id_priv->work_list);
  494. atomic_set(&cm_id_priv->work_count, -1);
  495. atomic_set(&cm_id_priv->refcount, 1);
  496. return &cm_id_priv->id;
  497. error:
  498. kfree(cm_id_priv);
  499. return ERR_PTR(-ENOMEM);
  500. }
  501. EXPORT_SYMBOL(ib_create_cm_id);
  502. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  503. {
  504. struct cm_work *work;
  505. if (list_empty(&cm_id_priv->work_list))
  506. return NULL;
  507. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  508. list_del(&work->list);
  509. return work;
  510. }
  511. static void cm_free_work(struct cm_work *work)
  512. {
  513. if (work->mad_recv_wc)
  514. ib_free_recv_mad(work->mad_recv_wc);
  515. kfree(work);
  516. }
  517. static inline int cm_convert_to_ms(int iba_time)
  518. {
  519. /* approximate conversion to ms from 4.096us x 2^iba_time */
  520. return 1 << max(iba_time - 8, 0);
  521. }
  522. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  523. {
  524. unsigned long flags;
  525. if (!timewait_info->inserted_remote_id &&
  526. !timewait_info->inserted_remote_qp)
  527. return;
  528. spin_lock_irqsave(&cm.lock, flags);
  529. if (timewait_info->inserted_remote_id) {
  530. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  531. timewait_info->inserted_remote_id = 0;
  532. }
  533. if (timewait_info->inserted_remote_qp) {
  534. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  535. timewait_info->inserted_remote_qp = 0;
  536. }
  537. spin_unlock_irqrestore(&cm.lock, flags);
  538. }
  539. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  540. {
  541. struct cm_timewait_info *timewait_info;
  542. timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
  543. if (!timewait_info)
  544. return ERR_PTR(-ENOMEM);
  545. memset(timewait_info, 0, sizeof *timewait_info);
  546. timewait_info->work.local_id = local_id;
  547. INIT_WORK(&timewait_info->work.work, cm_work_handler,
  548. &timewait_info->work);
  549. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  550. return timewait_info;
  551. }
  552. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  553. {
  554. int wait_time;
  555. /*
  556. * The cm_id could be destroyed by the user before we exit timewait.
  557. * To protect against this, we search for the cm_id after exiting
  558. * timewait before notifying the user that we've exited timewait.
  559. */
  560. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  561. wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
  562. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  563. msecs_to_jiffies(wait_time));
  564. cm_id_priv->timewait_info = NULL;
  565. }
  566. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  567. {
  568. cm_id_priv->id.state = IB_CM_IDLE;
  569. if (cm_id_priv->timewait_info) {
  570. cm_cleanup_timewait(cm_id_priv->timewait_info);
  571. kfree(cm_id_priv->timewait_info);
  572. cm_id_priv->timewait_info = NULL;
  573. }
  574. }
  575. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  576. {
  577. struct cm_id_private *cm_id_priv;
  578. struct cm_work *work;
  579. unsigned long flags;
  580. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  581. retest:
  582. spin_lock_irqsave(&cm_id_priv->lock, flags);
  583. switch (cm_id->state) {
  584. case IB_CM_LISTEN:
  585. cm_id->state = IB_CM_IDLE;
  586. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  587. spin_lock_irqsave(&cm.lock, flags);
  588. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  589. spin_unlock_irqrestore(&cm.lock, flags);
  590. break;
  591. case IB_CM_SIDR_REQ_SENT:
  592. cm_id->state = IB_CM_IDLE;
  593. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  594. (unsigned long) cm_id_priv->msg);
  595. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  596. break;
  597. case IB_CM_SIDR_REQ_RCVD:
  598. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  599. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  600. break;
  601. case IB_CM_REQ_SENT:
  602. case IB_CM_MRA_REQ_RCVD:
  603. case IB_CM_REP_SENT:
  604. case IB_CM_MRA_REP_RCVD:
  605. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  606. (unsigned long) cm_id_priv->msg);
  607. /* Fall through */
  608. case IB_CM_REQ_RCVD:
  609. case IB_CM_MRA_REQ_SENT:
  610. case IB_CM_REP_RCVD:
  611. case IB_CM_MRA_REP_SENT:
  612. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  613. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  614. &cm_id_priv->av.port->cm_dev->ca_guid,
  615. sizeof cm_id_priv->av.port->cm_dev->ca_guid,
  616. NULL, 0);
  617. break;
  618. case IB_CM_ESTABLISHED:
  619. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  620. ib_send_cm_dreq(cm_id, NULL, 0);
  621. goto retest;
  622. case IB_CM_DREQ_SENT:
  623. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  624. (unsigned long) cm_id_priv->msg);
  625. cm_enter_timewait(cm_id_priv);
  626. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  627. break;
  628. case IB_CM_DREQ_RCVD:
  629. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  630. ib_send_cm_drep(cm_id, NULL, 0);
  631. break;
  632. default:
  633. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  634. break;
  635. }
  636. cm_free_id(cm_id->local_id);
  637. atomic_dec(&cm_id_priv->refcount);
  638. wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
  639. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  640. cm_free_work(work);
  641. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  642. kfree(cm_id_priv->private_data);
  643. kfree(cm_id_priv);
  644. }
  645. EXPORT_SYMBOL(ib_destroy_cm_id);
  646. int ib_cm_listen(struct ib_cm_id *cm_id,
  647. __be64 service_id,
  648. __be64 service_mask)
  649. {
  650. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  651. unsigned long flags;
  652. int ret = 0;
  653. service_mask = service_mask ? service_mask :
  654. __constant_cpu_to_be64(~0ULL);
  655. service_id &= service_mask;
  656. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  657. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  658. return -EINVAL;
  659. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  660. BUG_ON(cm_id->state != IB_CM_IDLE);
  661. cm_id->state = IB_CM_LISTEN;
  662. spin_lock_irqsave(&cm.lock, flags);
  663. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  664. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  665. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  666. } else {
  667. cm_id->service_id = service_id;
  668. cm_id->service_mask = service_mask;
  669. }
  670. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  671. spin_unlock_irqrestore(&cm.lock, flags);
  672. if (cur_cm_id_priv) {
  673. cm_id->state = IB_CM_IDLE;
  674. ret = -EBUSY;
  675. }
  676. return ret;
  677. }
  678. EXPORT_SYMBOL(ib_cm_listen);
  679. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  680. enum cm_msg_sequence msg_seq)
  681. {
  682. u64 hi_tid, low_tid;
  683. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  684. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  685. (msg_seq << 30));
  686. return cpu_to_be64(hi_tid | low_tid);
  687. }
  688. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  689. __be16 attr_id, __be64 tid)
  690. {
  691. hdr->base_version = IB_MGMT_BASE_VERSION;
  692. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  693. hdr->class_version = IB_CM_CLASS_VERSION;
  694. hdr->method = IB_MGMT_METHOD_SEND;
  695. hdr->attr_id = attr_id;
  696. hdr->tid = tid;
  697. }
  698. static void cm_format_req(struct cm_req_msg *req_msg,
  699. struct cm_id_private *cm_id_priv,
  700. struct ib_cm_req_param *param)
  701. {
  702. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  703. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  704. req_msg->local_comm_id = cm_id_priv->id.local_id;
  705. req_msg->service_id = param->service_id;
  706. req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  707. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  708. cm_req_set_resp_res(req_msg, param->responder_resources);
  709. cm_req_set_init_depth(req_msg, param->initiator_depth);
  710. cm_req_set_remote_resp_timeout(req_msg,
  711. param->remote_cm_response_timeout);
  712. cm_req_set_qp_type(req_msg, param->qp_type);
  713. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  714. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  715. cm_req_set_local_resp_timeout(req_msg,
  716. param->local_cm_response_timeout);
  717. cm_req_set_retry_count(req_msg, param->retry_count);
  718. req_msg->pkey = param->primary_path->pkey;
  719. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  720. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  721. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  722. cm_req_set_srq(req_msg, param->srq);
  723. req_msg->primary_local_lid = param->primary_path->slid;
  724. req_msg->primary_remote_lid = param->primary_path->dlid;
  725. req_msg->primary_local_gid = param->primary_path->sgid;
  726. req_msg->primary_remote_gid = param->primary_path->dgid;
  727. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  728. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  729. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  730. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  731. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  732. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  733. cm_req_set_primary_local_ack_timeout(req_msg,
  734. min(31, param->primary_path->packet_life_time + 1));
  735. if (param->alternate_path) {
  736. req_msg->alt_local_lid = param->alternate_path->slid;
  737. req_msg->alt_remote_lid = param->alternate_path->dlid;
  738. req_msg->alt_local_gid = param->alternate_path->sgid;
  739. req_msg->alt_remote_gid = param->alternate_path->dgid;
  740. cm_req_set_alt_flow_label(req_msg,
  741. param->alternate_path->flow_label);
  742. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  743. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  744. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  745. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  746. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  747. cm_req_set_alt_local_ack_timeout(req_msg,
  748. min(31, param->alternate_path->packet_life_time + 1));
  749. }
  750. if (param->private_data && param->private_data_len)
  751. memcpy(req_msg->private_data, param->private_data,
  752. param->private_data_len);
  753. }
  754. static inline int cm_validate_req_param(struct ib_cm_req_param *param)
  755. {
  756. /* peer-to-peer not supported */
  757. if (param->peer_to_peer)
  758. return -EINVAL;
  759. if (!param->primary_path)
  760. return -EINVAL;
  761. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  762. return -EINVAL;
  763. if (param->private_data &&
  764. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  765. return -EINVAL;
  766. if (param->alternate_path &&
  767. (param->alternate_path->pkey != param->primary_path->pkey ||
  768. param->alternate_path->mtu != param->primary_path->mtu))
  769. return -EINVAL;
  770. return 0;
  771. }
  772. int ib_send_cm_req(struct ib_cm_id *cm_id,
  773. struct ib_cm_req_param *param)
  774. {
  775. struct cm_id_private *cm_id_priv;
  776. struct ib_send_wr *bad_send_wr;
  777. struct cm_req_msg *req_msg;
  778. unsigned long flags;
  779. int ret;
  780. ret = cm_validate_req_param(param);
  781. if (ret)
  782. return ret;
  783. /* Verify that we're not in timewait. */
  784. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  785. spin_lock_irqsave(&cm_id_priv->lock, flags);
  786. if (cm_id->state != IB_CM_IDLE) {
  787. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  788. ret = -EINVAL;
  789. goto out;
  790. }
  791. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  792. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  793. id.local_id);
  794. if (IS_ERR(cm_id_priv->timewait_info))
  795. goto out;
  796. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  797. if (ret)
  798. goto error1;
  799. if (param->alternate_path) {
  800. ret = cm_init_av_by_path(param->alternate_path,
  801. &cm_id_priv->alt_av);
  802. if (ret)
  803. goto error1;
  804. }
  805. cm_id->service_id = param->service_id;
  806. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  807. cm_id_priv->timeout_ms = cm_convert_to_ms(
  808. param->primary_path->packet_life_time) * 2 +
  809. cm_convert_to_ms(
  810. param->remote_cm_response_timeout);
  811. cm_id_priv->max_cm_retries = param->max_cm_retries;
  812. cm_id_priv->initiator_depth = param->initiator_depth;
  813. cm_id_priv->responder_resources = param->responder_resources;
  814. cm_id_priv->retry_count = param->retry_count;
  815. cm_id_priv->path_mtu = param->primary_path->mtu;
  816. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  817. if (ret)
  818. goto error1;
  819. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  820. cm_format_req(req_msg, cm_id_priv, param);
  821. cm_id_priv->tid = req_msg->hdr.tid;
  822. cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  823. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  824. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  825. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  826. cm_id_priv->local_ack_timeout =
  827. cm_req_get_primary_local_ack_timeout(req_msg);
  828. spin_lock_irqsave(&cm_id_priv->lock, flags);
  829. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  830. &cm_id_priv->msg->send_wr, &bad_send_wr);
  831. if (ret) {
  832. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  833. goto error2;
  834. }
  835. BUG_ON(cm_id->state != IB_CM_IDLE);
  836. cm_id->state = IB_CM_REQ_SENT;
  837. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  838. return 0;
  839. error2: cm_free_msg(cm_id_priv->msg);
  840. error1: kfree(cm_id_priv->timewait_info);
  841. out: return ret;
  842. }
  843. EXPORT_SYMBOL(ib_send_cm_req);
  844. static int cm_issue_rej(struct cm_port *port,
  845. struct ib_mad_recv_wc *mad_recv_wc,
  846. enum ib_cm_rej_reason reason,
  847. enum cm_msg_response msg_rejected,
  848. void *ari, u8 ari_length)
  849. {
  850. struct ib_mad_send_buf *msg = NULL;
  851. struct ib_send_wr *bad_send_wr;
  852. struct cm_rej_msg *rej_msg, *rcv_msg;
  853. int ret;
  854. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  855. if (ret)
  856. return ret;
  857. /* We just need common CM header information. Cast to any message. */
  858. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  859. rej_msg = (struct cm_rej_msg *) msg->mad;
  860. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  861. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  862. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  863. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  864. rej_msg->reason = cpu_to_be16(reason);
  865. if (ari && ari_length) {
  866. cm_rej_set_reject_info_len(rej_msg, ari_length);
  867. memcpy(rej_msg->ari, ari, ari_length);
  868. }
  869. ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
  870. if (ret)
  871. cm_free_msg(msg);
  872. return ret;
  873. }
  874. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  875. __be32 local_qpn, __be32 remote_qpn)
  876. {
  877. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  878. ((local_ca_guid == remote_ca_guid) &&
  879. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  880. }
  881. static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  882. struct ib_sa_path_rec *primary_path,
  883. struct ib_sa_path_rec *alt_path)
  884. {
  885. memset(primary_path, 0, sizeof *primary_path);
  886. primary_path->dgid = req_msg->primary_local_gid;
  887. primary_path->sgid = req_msg->primary_remote_gid;
  888. primary_path->dlid = req_msg->primary_local_lid;
  889. primary_path->slid = req_msg->primary_remote_lid;
  890. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  891. primary_path->hop_limit = req_msg->primary_hop_limit;
  892. primary_path->traffic_class = req_msg->primary_traffic_class;
  893. primary_path->reversible = 1;
  894. primary_path->pkey = req_msg->pkey;
  895. primary_path->sl = cm_req_get_primary_sl(req_msg);
  896. primary_path->mtu_selector = IB_SA_EQ;
  897. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  898. primary_path->rate_selector = IB_SA_EQ;
  899. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  900. primary_path->packet_life_time_selector = IB_SA_EQ;
  901. primary_path->packet_life_time =
  902. cm_req_get_primary_local_ack_timeout(req_msg);
  903. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  904. if (req_msg->alt_local_lid) {
  905. memset(alt_path, 0, sizeof *alt_path);
  906. alt_path->dgid = req_msg->alt_local_gid;
  907. alt_path->sgid = req_msg->alt_remote_gid;
  908. alt_path->dlid = req_msg->alt_local_lid;
  909. alt_path->slid = req_msg->alt_remote_lid;
  910. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  911. alt_path->hop_limit = req_msg->alt_hop_limit;
  912. alt_path->traffic_class = req_msg->alt_traffic_class;
  913. alt_path->reversible = 1;
  914. alt_path->pkey = req_msg->pkey;
  915. alt_path->sl = cm_req_get_alt_sl(req_msg);
  916. alt_path->mtu_selector = IB_SA_EQ;
  917. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  918. alt_path->rate_selector = IB_SA_EQ;
  919. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  920. alt_path->packet_life_time_selector = IB_SA_EQ;
  921. alt_path->packet_life_time =
  922. cm_req_get_alt_local_ack_timeout(req_msg);
  923. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  924. }
  925. }
  926. static void cm_format_req_event(struct cm_work *work,
  927. struct cm_id_private *cm_id_priv,
  928. struct ib_cm_id *listen_id)
  929. {
  930. struct cm_req_msg *req_msg;
  931. struct ib_cm_req_event_param *param;
  932. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  933. param = &work->cm_event.param.req_rcvd;
  934. param->listen_id = listen_id;
  935. param->port = cm_id_priv->av.port->port_num;
  936. param->primary_path = &work->path[0];
  937. if (req_msg->alt_local_lid)
  938. param->alternate_path = &work->path[1];
  939. else
  940. param->alternate_path = NULL;
  941. param->remote_ca_guid = req_msg->local_ca_guid;
  942. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  943. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  944. param->qp_type = cm_req_get_qp_type(req_msg);
  945. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  946. param->responder_resources = cm_req_get_init_depth(req_msg);
  947. param->initiator_depth = cm_req_get_resp_res(req_msg);
  948. param->local_cm_response_timeout =
  949. cm_req_get_remote_resp_timeout(req_msg);
  950. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  951. param->remote_cm_response_timeout =
  952. cm_req_get_local_resp_timeout(req_msg);
  953. param->retry_count = cm_req_get_retry_count(req_msg);
  954. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  955. param->srq = cm_req_get_srq(req_msg);
  956. work->cm_event.private_data = &req_msg->private_data;
  957. }
  958. static void cm_process_work(struct cm_id_private *cm_id_priv,
  959. struct cm_work *work)
  960. {
  961. unsigned long flags;
  962. int ret;
  963. /* We will typically only have the current event to report. */
  964. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  965. cm_free_work(work);
  966. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  967. spin_lock_irqsave(&cm_id_priv->lock, flags);
  968. work = cm_dequeue_work(cm_id_priv);
  969. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  970. BUG_ON(!work);
  971. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  972. &work->cm_event);
  973. cm_free_work(work);
  974. }
  975. cm_deref_id(cm_id_priv);
  976. if (ret)
  977. ib_destroy_cm_id(&cm_id_priv->id);
  978. }
  979. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  980. struct cm_id_private *cm_id_priv,
  981. enum cm_msg_response msg_mraed, u8 service_timeout,
  982. const void *private_data, u8 private_data_len)
  983. {
  984. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  985. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  986. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  987. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  988. cm_mra_set_service_timeout(mra_msg, service_timeout);
  989. if (private_data && private_data_len)
  990. memcpy(mra_msg->private_data, private_data, private_data_len);
  991. }
  992. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  993. struct cm_id_private *cm_id_priv,
  994. enum ib_cm_rej_reason reason,
  995. void *ari,
  996. u8 ari_length,
  997. const void *private_data,
  998. u8 private_data_len)
  999. {
  1000. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1001. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1002. switch(cm_id_priv->id.state) {
  1003. case IB_CM_REQ_RCVD:
  1004. rej_msg->local_comm_id = 0;
  1005. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1006. break;
  1007. case IB_CM_MRA_REQ_SENT:
  1008. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1009. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1010. break;
  1011. case IB_CM_REP_RCVD:
  1012. case IB_CM_MRA_REP_SENT:
  1013. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1014. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1015. break;
  1016. default:
  1017. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1018. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1019. break;
  1020. }
  1021. rej_msg->reason = cpu_to_be16(reason);
  1022. if (ari && ari_length) {
  1023. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1024. memcpy(rej_msg->ari, ari, ari_length);
  1025. }
  1026. if (private_data && private_data_len)
  1027. memcpy(rej_msg->private_data, private_data, private_data_len);
  1028. }
  1029. static void cm_dup_req_handler(struct cm_work *work,
  1030. struct cm_id_private *cm_id_priv)
  1031. {
  1032. struct ib_mad_send_buf *msg = NULL;
  1033. struct ib_send_wr *bad_send_wr;
  1034. unsigned long flags;
  1035. int ret;
  1036. /* Quick state check to discard duplicate REQs. */
  1037. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1038. return;
  1039. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1040. if (ret)
  1041. return;
  1042. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1043. switch (cm_id_priv->id.state) {
  1044. case IB_CM_MRA_REQ_SENT:
  1045. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1046. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1047. cm_id_priv->private_data,
  1048. cm_id_priv->private_data_len);
  1049. break;
  1050. case IB_CM_TIMEWAIT:
  1051. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1052. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1053. break;
  1054. default:
  1055. goto unlock;
  1056. }
  1057. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1058. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1059. &bad_send_wr);
  1060. if (ret)
  1061. goto free;
  1062. return;
  1063. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1064. free: cm_free_msg(msg);
  1065. }
  1066. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1067. struct cm_id_private *cm_id_priv)
  1068. {
  1069. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1070. struct cm_timewait_info *timewait_info;
  1071. struct cm_req_msg *req_msg;
  1072. unsigned long flags;
  1073. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1074. /* Check for duplicate REQ and stale connections. */
  1075. spin_lock_irqsave(&cm.lock, flags);
  1076. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1077. if (!timewait_info)
  1078. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1079. if (timewait_info) {
  1080. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1081. timewait_info->work.remote_id);
  1082. spin_unlock_irqrestore(&cm.lock, flags);
  1083. if (cur_cm_id_priv) {
  1084. cm_dup_req_handler(work, cur_cm_id_priv);
  1085. cm_deref_id(cur_cm_id_priv);
  1086. } else
  1087. cm_issue_rej(work->port, work->mad_recv_wc,
  1088. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1089. NULL, 0);
  1090. goto error;
  1091. }
  1092. /* Find matching listen request. */
  1093. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1094. req_msg->service_id);
  1095. if (!listen_cm_id_priv) {
  1096. spin_unlock_irqrestore(&cm.lock, flags);
  1097. cm_issue_rej(work->port, work->mad_recv_wc,
  1098. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1099. NULL, 0);
  1100. goto error;
  1101. }
  1102. atomic_inc(&listen_cm_id_priv->refcount);
  1103. atomic_inc(&cm_id_priv->refcount);
  1104. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1105. atomic_inc(&cm_id_priv->work_count);
  1106. spin_unlock_irqrestore(&cm.lock, flags);
  1107. return listen_cm_id_priv;
  1108. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1109. return NULL;
  1110. }
  1111. static int cm_req_handler(struct cm_work *work)
  1112. {
  1113. struct ib_cm_id *cm_id;
  1114. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1115. struct cm_req_msg *req_msg;
  1116. int ret;
  1117. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1118. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1119. if (IS_ERR(cm_id))
  1120. return PTR_ERR(cm_id);
  1121. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1122. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1123. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1124. &cm_id_priv->av);
  1125. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1126. id.local_id);
  1127. if (IS_ERR(cm_id_priv->timewait_info)) {
  1128. ret = PTR_ERR(cm_id_priv->timewait_info);
  1129. goto error1;
  1130. }
  1131. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1132. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1133. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1134. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1135. if (!listen_cm_id_priv) {
  1136. ret = -EINVAL;
  1137. goto error2;
  1138. }
  1139. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1140. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1141. cm_id_priv->id.service_id = req_msg->service_id;
  1142. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1143. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1144. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1145. if (ret)
  1146. goto error3;
  1147. if (req_msg->alt_local_lid) {
  1148. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1149. if (ret)
  1150. goto error3;
  1151. }
  1152. cm_id_priv->tid = req_msg->hdr.tid;
  1153. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1154. cm_req_get_local_resp_timeout(req_msg));
  1155. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1156. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1157. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1158. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1159. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1160. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1161. cm_id_priv->local_ack_timeout =
  1162. cm_req_get_primary_local_ack_timeout(req_msg);
  1163. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1164. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1165. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1166. cm_process_work(cm_id_priv, work);
  1167. cm_deref_id(listen_cm_id_priv);
  1168. return 0;
  1169. error3: atomic_dec(&cm_id_priv->refcount);
  1170. cm_deref_id(listen_cm_id_priv);
  1171. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1172. error2: kfree(cm_id_priv->timewait_info);
  1173. cm_id_priv->timewait_info = NULL;
  1174. error1: ib_destroy_cm_id(&cm_id_priv->id);
  1175. return ret;
  1176. }
  1177. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1178. struct cm_id_private *cm_id_priv,
  1179. struct ib_cm_rep_param *param)
  1180. {
  1181. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1182. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1183. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1184. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1185. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1186. rep_msg->resp_resources = param->responder_resources;
  1187. rep_msg->initiator_depth = param->initiator_depth;
  1188. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1189. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1190. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1191. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1192. cm_rep_set_srq(rep_msg, param->srq);
  1193. rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  1194. if (param->private_data && param->private_data_len)
  1195. memcpy(rep_msg->private_data, param->private_data,
  1196. param->private_data_len);
  1197. }
  1198. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1199. struct ib_cm_rep_param *param)
  1200. {
  1201. struct cm_id_private *cm_id_priv;
  1202. struct ib_mad_send_buf *msg;
  1203. struct cm_rep_msg *rep_msg;
  1204. struct ib_send_wr *bad_send_wr;
  1205. unsigned long flags;
  1206. int ret;
  1207. if (param->private_data &&
  1208. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1209. return -EINVAL;
  1210. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1211. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1212. if (cm_id->state != IB_CM_REQ_RCVD &&
  1213. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1214. ret = -EINVAL;
  1215. goto out;
  1216. }
  1217. ret = cm_alloc_msg(cm_id_priv, &msg);
  1218. if (ret)
  1219. goto out;
  1220. rep_msg = (struct cm_rep_msg *) msg->mad;
  1221. cm_format_rep(rep_msg, cm_id_priv, param);
  1222. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  1223. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1224. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1225. &msg->send_wr, &bad_send_wr);
  1226. if (ret) {
  1227. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1228. cm_free_msg(msg);
  1229. return ret;
  1230. }
  1231. cm_id->state = IB_CM_REP_SENT;
  1232. cm_id_priv->msg = msg;
  1233. cm_id_priv->initiator_depth = param->initiator_depth;
  1234. cm_id_priv->responder_resources = param->responder_resources;
  1235. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1236. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1237. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1238. return ret;
  1239. }
  1240. EXPORT_SYMBOL(ib_send_cm_rep);
  1241. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1242. struct cm_id_private *cm_id_priv,
  1243. const void *private_data,
  1244. u8 private_data_len)
  1245. {
  1246. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1247. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1248. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1249. if (private_data && private_data_len)
  1250. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1251. }
  1252. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1253. const void *private_data,
  1254. u8 private_data_len)
  1255. {
  1256. struct cm_id_private *cm_id_priv;
  1257. struct ib_mad_send_buf *msg;
  1258. struct ib_send_wr *bad_send_wr;
  1259. unsigned long flags;
  1260. void *data;
  1261. int ret;
  1262. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1263. return -EINVAL;
  1264. data = cm_copy_private_data(private_data, private_data_len);
  1265. if (IS_ERR(data))
  1266. return PTR_ERR(data);
  1267. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1268. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1269. if (cm_id->state != IB_CM_REP_RCVD &&
  1270. cm_id->state != IB_CM_MRA_REP_SENT) {
  1271. ret = -EINVAL;
  1272. goto error;
  1273. }
  1274. ret = cm_alloc_msg(cm_id_priv, &msg);
  1275. if (ret)
  1276. goto error;
  1277. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1278. private_data, private_data_len);
  1279. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1280. &msg->send_wr, &bad_send_wr);
  1281. if (ret) {
  1282. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1283. cm_free_msg(msg);
  1284. kfree(data);
  1285. return ret;
  1286. }
  1287. cm_id->state = IB_CM_ESTABLISHED;
  1288. cm_set_private_data(cm_id_priv, data, private_data_len);
  1289. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1290. return 0;
  1291. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1292. kfree(data);
  1293. return ret;
  1294. }
  1295. EXPORT_SYMBOL(ib_send_cm_rtu);
  1296. static void cm_format_rep_event(struct cm_work *work)
  1297. {
  1298. struct cm_rep_msg *rep_msg;
  1299. struct ib_cm_rep_event_param *param;
  1300. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1301. param = &work->cm_event.param.rep_rcvd;
  1302. param->remote_ca_guid = rep_msg->local_ca_guid;
  1303. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1304. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1305. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1306. param->responder_resources = rep_msg->initiator_depth;
  1307. param->initiator_depth = rep_msg->resp_resources;
  1308. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1309. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1310. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1311. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1312. param->srq = cm_rep_get_srq(rep_msg);
  1313. work->cm_event.private_data = &rep_msg->private_data;
  1314. }
  1315. static void cm_dup_rep_handler(struct cm_work *work)
  1316. {
  1317. struct cm_id_private *cm_id_priv;
  1318. struct cm_rep_msg *rep_msg;
  1319. struct ib_mad_send_buf *msg = NULL;
  1320. struct ib_send_wr *bad_send_wr;
  1321. unsigned long flags;
  1322. int ret;
  1323. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1324. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1325. rep_msg->local_comm_id);
  1326. if (!cm_id_priv)
  1327. return;
  1328. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1329. if (ret)
  1330. goto deref;
  1331. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1332. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1333. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1334. cm_id_priv->private_data,
  1335. cm_id_priv->private_data_len);
  1336. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1337. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1338. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1339. cm_id_priv->private_data,
  1340. cm_id_priv->private_data_len);
  1341. else
  1342. goto unlock;
  1343. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1344. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1345. &bad_send_wr);
  1346. if (ret)
  1347. goto free;
  1348. goto deref;
  1349. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1350. free: cm_free_msg(msg);
  1351. deref: cm_deref_id(cm_id_priv);
  1352. }
  1353. static int cm_rep_handler(struct cm_work *work)
  1354. {
  1355. struct cm_id_private *cm_id_priv;
  1356. struct cm_rep_msg *rep_msg;
  1357. unsigned long flags;
  1358. int ret;
  1359. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1360. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1361. if (!cm_id_priv) {
  1362. cm_dup_rep_handler(work);
  1363. return -EINVAL;
  1364. }
  1365. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1366. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1367. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1368. spin_lock_irqsave(&cm.lock, flags);
  1369. /* Check for duplicate REP. */
  1370. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1371. spin_unlock_irqrestore(&cm.lock, flags);
  1372. ret = -EINVAL;
  1373. goto error;
  1374. }
  1375. /* Check for a stale connection. */
  1376. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1377. spin_unlock_irqrestore(&cm.lock, flags);
  1378. cm_issue_rej(work->port, work->mad_recv_wc,
  1379. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1380. NULL, 0);
  1381. ret = -EINVAL;
  1382. goto error;
  1383. }
  1384. spin_unlock_irqrestore(&cm.lock, flags);
  1385. cm_format_rep_event(work);
  1386. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1387. switch (cm_id_priv->id.state) {
  1388. case IB_CM_REQ_SENT:
  1389. case IB_CM_MRA_REQ_RCVD:
  1390. break;
  1391. default:
  1392. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1393. ret = -EINVAL;
  1394. goto error;
  1395. }
  1396. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1397. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1398. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1399. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1400. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1401. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1402. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1403. /* todo: handle peer_to_peer */
  1404. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1405. (unsigned long) cm_id_priv->msg);
  1406. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1407. if (!ret)
  1408. list_add_tail(&work->list, &cm_id_priv->work_list);
  1409. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1410. if (ret)
  1411. cm_process_work(cm_id_priv, work);
  1412. else
  1413. cm_deref_id(cm_id_priv);
  1414. return 0;
  1415. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1416. cm_deref_id(cm_id_priv);
  1417. return ret;
  1418. }
  1419. static int cm_establish_handler(struct cm_work *work)
  1420. {
  1421. struct cm_id_private *cm_id_priv;
  1422. unsigned long flags;
  1423. int ret;
  1424. /* See comment in ib_cm_establish about lookup. */
  1425. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1426. if (!cm_id_priv)
  1427. return -EINVAL;
  1428. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1429. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1430. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1431. goto out;
  1432. }
  1433. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1434. (unsigned long) cm_id_priv->msg);
  1435. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1436. if (!ret)
  1437. list_add_tail(&work->list, &cm_id_priv->work_list);
  1438. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1439. if (ret)
  1440. cm_process_work(cm_id_priv, work);
  1441. else
  1442. cm_deref_id(cm_id_priv);
  1443. return 0;
  1444. out:
  1445. cm_deref_id(cm_id_priv);
  1446. return -EINVAL;
  1447. }
  1448. static int cm_rtu_handler(struct cm_work *work)
  1449. {
  1450. struct cm_id_private *cm_id_priv;
  1451. struct cm_rtu_msg *rtu_msg;
  1452. unsigned long flags;
  1453. int ret;
  1454. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1455. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1456. rtu_msg->local_comm_id);
  1457. if (!cm_id_priv)
  1458. return -EINVAL;
  1459. work->cm_event.private_data = &rtu_msg->private_data;
  1460. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1461. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1462. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1463. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1464. goto out;
  1465. }
  1466. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1467. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1468. (unsigned long) cm_id_priv->msg);
  1469. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1470. if (!ret)
  1471. list_add_tail(&work->list, &cm_id_priv->work_list);
  1472. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1473. if (ret)
  1474. cm_process_work(cm_id_priv, work);
  1475. else
  1476. cm_deref_id(cm_id_priv);
  1477. return 0;
  1478. out:
  1479. cm_deref_id(cm_id_priv);
  1480. return -EINVAL;
  1481. }
  1482. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1483. struct cm_id_private *cm_id_priv,
  1484. const void *private_data,
  1485. u8 private_data_len)
  1486. {
  1487. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1488. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1489. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1490. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1491. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1492. if (private_data && private_data_len)
  1493. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1494. }
  1495. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1496. const void *private_data,
  1497. u8 private_data_len)
  1498. {
  1499. struct cm_id_private *cm_id_priv;
  1500. struct ib_mad_send_buf *msg;
  1501. struct ib_send_wr *bad_send_wr;
  1502. unsigned long flags;
  1503. int ret;
  1504. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1505. return -EINVAL;
  1506. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1507. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1508. if (cm_id->state != IB_CM_ESTABLISHED) {
  1509. ret = -EINVAL;
  1510. goto out;
  1511. }
  1512. ret = cm_alloc_msg(cm_id_priv, &msg);
  1513. if (ret) {
  1514. cm_enter_timewait(cm_id_priv);
  1515. goto out;
  1516. }
  1517. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1518. private_data, private_data_len);
  1519. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  1520. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1521. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1522. &msg->send_wr, &bad_send_wr);
  1523. if (ret) {
  1524. cm_enter_timewait(cm_id_priv);
  1525. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1526. cm_free_msg(msg);
  1527. return ret;
  1528. }
  1529. cm_id->state = IB_CM_DREQ_SENT;
  1530. cm_id_priv->msg = msg;
  1531. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1532. return ret;
  1533. }
  1534. EXPORT_SYMBOL(ib_send_cm_dreq);
  1535. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1536. struct cm_id_private *cm_id_priv,
  1537. const void *private_data,
  1538. u8 private_data_len)
  1539. {
  1540. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1541. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1542. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1543. if (private_data && private_data_len)
  1544. memcpy(drep_msg->private_data, private_data, private_data_len);
  1545. }
  1546. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1547. const void *private_data,
  1548. u8 private_data_len)
  1549. {
  1550. struct cm_id_private *cm_id_priv;
  1551. struct ib_mad_send_buf *msg;
  1552. struct ib_send_wr *bad_send_wr;
  1553. unsigned long flags;
  1554. void *data;
  1555. int ret;
  1556. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1557. return -EINVAL;
  1558. data = cm_copy_private_data(private_data, private_data_len);
  1559. if (IS_ERR(data))
  1560. return PTR_ERR(data);
  1561. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1562. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1563. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1564. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1565. kfree(data);
  1566. return -EINVAL;
  1567. }
  1568. cm_set_private_data(cm_id_priv, data, private_data_len);
  1569. cm_enter_timewait(cm_id_priv);
  1570. ret = cm_alloc_msg(cm_id_priv, &msg);
  1571. if (ret)
  1572. goto out;
  1573. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1574. private_data, private_data_len);
  1575. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1576. &bad_send_wr);
  1577. if (ret) {
  1578. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1579. cm_free_msg(msg);
  1580. return ret;
  1581. }
  1582. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1583. return ret;
  1584. }
  1585. EXPORT_SYMBOL(ib_send_cm_drep);
  1586. static int cm_dreq_handler(struct cm_work *work)
  1587. {
  1588. struct cm_id_private *cm_id_priv;
  1589. struct cm_dreq_msg *dreq_msg;
  1590. struct ib_mad_send_buf *msg = NULL;
  1591. struct ib_send_wr *bad_send_wr;
  1592. unsigned long flags;
  1593. int ret;
  1594. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1595. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1596. dreq_msg->local_comm_id);
  1597. if (!cm_id_priv)
  1598. return -EINVAL;
  1599. work->cm_event.private_data = &dreq_msg->private_data;
  1600. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1601. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1602. goto unlock;
  1603. switch (cm_id_priv->id.state) {
  1604. case IB_CM_REP_SENT:
  1605. case IB_CM_DREQ_SENT:
  1606. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1607. (unsigned long) cm_id_priv->msg);
  1608. break;
  1609. case IB_CM_ESTABLISHED:
  1610. case IB_CM_MRA_REP_RCVD:
  1611. break;
  1612. case IB_CM_TIMEWAIT:
  1613. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1614. goto unlock;
  1615. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1616. cm_id_priv->private_data,
  1617. cm_id_priv->private_data_len);
  1618. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1619. if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1620. &msg->send_wr, &bad_send_wr))
  1621. cm_free_msg(msg);
  1622. goto deref;
  1623. default:
  1624. goto unlock;
  1625. }
  1626. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1627. cm_id_priv->tid = dreq_msg->hdr.tid;
  1628. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1629. if (!ret)
  1630. list_add_tail(&work->list, &cm_id_priv->work_list);
  1631. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1632. if (ret)
  1633. cm_process_work(cm_id_priv, work);
  1634. else
  1635. cm_deref_id(cm_id_priv);
  1636. return 0;
  1637. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1638. deref: cm_deref_id(cm_id_priv);
  1639. return -EINVAL;
  1640. }
  1641. static int cm_drep_handler(struct cm_work *work)
  1642. {
  1643. struct cm_id_private *cm_id_priv;
  1644. struct cm_drep_msg *drep_msg;
  1645. unsigned long flags;
  1646. int ret;
  1647. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1648. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1649. drep_msg->local_comm_id);
  1650. if (!cm_id_priv)
  1651. return -EINVAL;
  1652. work->cm_event.private_data = &drep_msg->private_data;
  1653. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1654. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1655. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1656. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1657. goto out;
  1658. }
  1659. cm_enter_timewait(cm_id_priv);
  1660. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1661. (unsigned long) cm_id_priv->msg);
  1662. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1663. if (!ret)
  1664. list_add_tail(&work->list, &cm_id_priv->work_list);
  1665. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1666. if (ret)
  1667. cm_process_work(cm_id_priv, work);
  1668. else
  1669. cm_deref_id(cm_id_priv);
  1670. return 0;
  1671. out:
  1672. cm_deref_id(cm_id_priv);
  1673. return -EINVAL;
  1674. }
  1675. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1676. enum ib_cm_rej_reason reason,
  1677. void *ari,
  1678. u8 ari_length,
  1679. const void *private_data,
  1680. u8 private_data_len)
  1681. {
  1682. struct cm_id_private *cm_id_priv;
  1683. struct ib_mad_send_buf *msg;
  1684. struct ib_send_wr *bad_send_wr;
  1685. unsigned long flags;
  1686. int ret;
  1687. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1688. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1689. return -EINVAL;
  1690. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1691. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1692. switch (cm_id->state) {
  1693. case IB_CM_REQ_SENT:
  1694. case IB_CM_MRA_REQ_RCVD:
  1695. case IB_CM_REQ_RCVD:
  1696. case IB_CM_MRA_REQ_SENT:
  1697. case IB_CM_REP_RCVD:
  1698. case IB_CM_MRA_REP_SENT:
  1699. ret = cm_alloc_msg(cm_id_priv, &msg);
  1700. if (!ret)
  1701. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1702. cm_id_priv, reason, ari, ari_length,
  1703. private_data, private_data_len);
  1704. cm_reset_to_idle(cm_id_priv);
  1705. break;
  1706. case IB_CM_REP_SENT:
  1707. case IB_CM_MRA_REP_RCVD:
  1708. ret = cm_alloc_msg(cm_id_priv, &msg);
  1709. if (!ret)
  1710. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1711. cm_id_priv, reason, ari, ari_length,
  1712. private_data, private_data_len);
  1713. cm_enter_timewait(cm_id_priv);
  1714. break;
  1715. default:
  1716. ret = -EINVAL;
  1717. goto out;
  1718. }
  1719. if (ret)
  1720. goto out;
  1721. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1722. &msg->send_wr, &bad_send_wr);
  1723. if (ret)
  1724. cm_free_msg(msg);
  1725. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1726. return ret;
  1727. }
  1728. EXPORT_SYMBOL(ib_send_cm_rej);
  1729. static void cm_format_rej_event(struct cm_work *work)
  1730. {
  1731. struct cm_rej_msg *rej_msg;
  1732. struct ib_cm_rej_event_param *param;
  1733. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1734. param = &work->cm_event.param.rej_rcvd;
  1735. param->ari = rej_msg->ari;
  1736. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1737. param->reason = __be16_to_cpu(rej_msg->reason);
  1738. work->cm_event.private_data = &rej_msg->private_data;
  1739. }
  1740. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1741. {
  1742. struct cm_timewait_info *timewait_info;
  1743. struct cm_id_private *cm_id_priv;
  1744. unsigned long flags;
  1745. __be32 remote_id;
  1746. remote_id = rej_msg->local_comm_id;
  1747. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1748. spin_lock_irqsave(&cm.lock, flags);
  1749. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1750. remote_id);
  1751. if (!timewait_info) {
  1752. spin_unlock_irqrestore(&cm.lock, flags);
  1753. return NULL;
  1754. }
  1755. cm_id_priv = idr_find(&cm.local_id_table,
  1756. (__force int) timewait_info->work.local_id);
  1757. if (cm_id_priv) {
  1758. if (cm_id_priv->id.remote_id == remote_id)
  1759. atomic_inc(&cm_id_priv->refcount);
  1760. else
  1761. cm_id_priv = NULL;
  1762. }
  1763. spin_unlock_irqrestore(&cm.lock, flags);
  1764. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1765. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1766. else
  1767. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1768. return cm_id_priv;
  1769. }
  1770. static int cm_rej_handler(struct cm_work *work)
  1771. {
  1772. struct cm_id_private *cm_id_priv;
  1773. struct cm_rej_msg *rej_msg;
  1774. unsigned long flags;
  1775. int ret;
  1776. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1777. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1778. if (!cm_id_priv)
  1779. return -EINVAL;
  1780. cm_format_rej_event(work);
  1781. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1782. switch (cm_id_priv->id.state) {
  1783. case IB_CM_REQ_SENT:
  1784. case IB_CM_MRA_REQ_RCVD:
  1785. case IB_CM_REP_SENT:
  1786. case IB_CM_MRA_REP_RCVD:
  1787. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1788. (unsigned long) cm_id_priv->msg);
  1789. /* fall through */
  1790. case IB_CM_REQ_RCVD:
  1791. case IB_CM_MRA_REQ_SENT:
  1792. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1793. cm_enter_timewait(cm_id_priv);
  1794. else
  1795. cm_reset_to_idle(cm_id_priv);
  1796. break;
  1797. case IB_CM_DREQ_SENT:
  1798. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1799. (unsigned long) cm_id_priv->msg);
  1800. /* fall through */
  1801. case IB_CM_REP_RCVD:
  1802. case IB_CM_MRA_REP_SENT:
  1803. case IB_CM_ESTABLISHED:
  1804. cm_enter_timewait(cm_id_priv);
  1805. break;
  1806. default:
  1807. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1808. ret = -EINVAL;
  1809. goto out;
  1810. }
  1811. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1812. if (!ret)
  1813. list_add_tail(&work->list, &cm_id_priv->work_list);
  1814. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1815. if (ret)
  1816. cm_process_work(cm_id_priv, work);
  1817. else
  1818. cm_deref_id(cm_id_priv);
  1819. return 0;
  1820. out:
  1821. cm_deref_id(cm_id_priv);
  1822. return -EINVAL;
  1823. }
  1824. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1825. u8 service_timeout,
  1826. const void *private_data,
  1827. u8 private_data_len)
  1828. {
  1829. struct cm_id_private *cm_id_priv;
  1830. struct ib_mad_send_buf *msg;
  1831. struct ib_send_wr *bad_send_wr;
  1832. void *data;
  1833. unsigned long flags;
  1834. int ret;
  1835. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1836. return -EINVAL;
  1837. data = cm_copy_private_data(private_data, private_data_len);
  1838. if (IS_ERR(data))
  1839. return PTR_ERR(data);
  1840. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1841. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1842. switch(cm_id_priv->id.state) {
  1843. case IB_CM_REQ_RCVD:
  1844. ret = cm_alloc_msg(cm_id_priv, &msg);
  1845. if (ret)
  1846. goto error1;
  1847. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1848. CM_MSG_RESPONSE_REQ, service_timeout,
  1849. private_data, private_data_len);
  1850. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1851. &msg->send_wr, &bad_send_wr);
  1852. if (ret)
  1853. goto error2;
  1854. cm_id->state = IB_CM_MRA_REQ_SENT;
  1855. break;
  1856. case IB_CM_REP_RCVD:
  1857. ret = cm_alloc_msg(cm_id_priv, &msg);
  1858. if (ret)
  1859. goto error1;
  1860. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1861. CM_MSG_RESPONSE_REP, service_timeout,
  1862. private_data, private_data_len);
  1863. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1864. &msg->send_wr, &bad_send_wr);
  1865. if (ret)
  1866. goto error2;
  1867. cm_id->state = IB_CM_MRA_REP_SENT;
  1868. break;
  1869. case IB_CM_ESTABLISHED:
  1870. ret = cm_alloc_msg(cm_id_priv, &msg);
  1871. if (ret)
  1872. goto error1;
  1873. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1874. CM_MSG_RESPONSE_OTHER, service_timeout,
  1875. private_data, private_data_len);
  1876. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1877. &msg->send_wr, &bad_send_wr);
  1878. if (ret)
  1879. goto error2;
  1880. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1881. break;
  1882. default:
  1883. ret = -EINVAL;
  1884. goto error1;
  1885. }
  1886. cm_id_priv->service_timeout = service_timeout;
  1887. cm_set_private_data(cm_id_priv, data, private_data_len);
  1888. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1889. return 0;
  1890. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1891. kfree(data);
  1892. return ret;
  1893. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1894. kfree(data);
  1895. cm_free_msg(msg);
  1896. return ret;
  1897. }
  1898. EXPORT_SYMBOL(ib_send_cm_mra);
  1899. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1900. {
  1901. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1902. case CM_MSG_RESPONSE_REQ:
  1903. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1904. case CM_MSG_RESPONSE_REP:
  1905. case CM_MSG_RESPONSE_OTHER:
  1906. return cm_acquire_id(mra_msg->remote_comm_id,
  1907. mra_msg->local_comm_id);
  1908. default:
  1909. return NULL;
  1910. }
  1911. }
  1912. static int cm_mra_handler(struct cm_work *work)
  1913. {
  1914. struct cm_id_private *cm_id_priv;
  1915. struct cm_mra_msg *mra_msg;
  1916. unsigned long flags;
  1917. int timeout, ret;
  1918. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  1919. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  1920. if (!cm_id_priv)
  1921. return -EINVAL;
  1922. work->cm_event.private_data = &mra_msg->private_data;
  1923. work->cm_event.param.mra_rcvd.service_timeout =
  1924. cm_mra_get_service_timeout(mra_msg);
  1925. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  1926. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  1927. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1928. switch (cm_id_priv->id.state) {
  1929. case IB_CM_REQ_SENT:
  1930. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  1931. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1932. (unsigned long) cm_id_priv->msg, timeout))
  1933. goto out;
  1934. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  1935. break;
  1936. case IB_CM_REP_SENT:
  1937. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  1938. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1939. (unsigned long) cm_id_priv->msg, timeout))
  1940. goto out;
  1941. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  1942. break;
  1943. case IB_CM_ESTABLISHED:
  1944. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  1945. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  1946. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1947. (unsigned long) cm_id_priv->msg, timeout))
  1948. goto out;
  1949. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  1950. break;
  1951. default:
  1952. goto out;
  1953. }
  1954. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  1955. cm_id_priv->id.state;
  1956. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1957. if (!ret)
  1958. list_add_tail(&work->list, &cm_id_priv->work_list);
  1959. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1960. if (ret)
  1961. cm_process_work(cm_id_priv, work);
  1962. else
  1963. cm_deref_id(cm_id_priv);
  1964. return 0;
  1965. out:
  1966. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1967. cm_deref_id(cm_id_priv);
  1968. return -EINVAL;
  1969. }
  1970. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  1971. struct cm_id_private *cm_id_priv,
  1972. struct ib_sa_path_rec *alternate_path,
  1973. const void *private_data,
  1974. u8 private_data_len)
  1975. {
  1976. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  1977. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  1978. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  1979. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1980. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  1981. /* todo: need remote CM response timeout */
  1982. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  1983. lap_msg->alt_local_lid = alternate_path->slid;
  1984. lap_msg->alt_remote_lid = alternate_path->dlid;
  1985. lap_msg->alt_local_gid = alternate_path->sgid;
  1986. lap_msg->alt_remote_gid = alternate_path->dgid;
  1987. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  1988. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  1989. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  1990. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  1991. cm_lap_set_sl(lap_msg, alternate_path->sl);
  1992. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  1993. cm_lap_set_local_ack_timeout(lap_msg,
  1994. min(31, alternate_path->packet_life_time + 1));
  1995. if (private_data && private_data_len)
  1996. memcpy(lap_msg->private_data, private_data, private_data_len);
  1997. }
  1998. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  1999. struct ib_sa_path_rec *alternate_path,
  2000. const void *private_data,
  2001. u8 private_data_len)
  2002. {
  2003. struct cm_id_private *cm_id_priv;
  2004. struct ib_mad_send_buf *msg;
  2005. struct ib_send_wr *bad_send_wr;
  2006. unsigned long flags;
  2007. int ret;
  2008. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  2009. return -EINVAL;
  2010. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2011. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2012. if (cm_id->state != IB_CM_ESTABLISHED ||
  2013. cm_id->lap_state != IB_CM_LAP_IDLE) {
  2014. ret = -EINVAL;
  2015. goto out;
  2016. }
  2017. ret = cm_alloc_msg(cm_id_priv, &msg);
  2018. if (ret)
  2019. goto out;
  2020. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2021. alternate_path, private_data, private_data_len);
  2022. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  2023. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2024. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2025. &msg->send_wr, &bad_send_wr);
  2026. if (ret) {
  2027. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2028. cm_free_msg(msg);
  2029. return ret;
  2030. }
  2031. cm_id->lap_state = IB_CM_LAP_SENT;
  2032. cm_id_priv->msg = msg;
  2033. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2034. return ret;
  2035. }
  2036. EXPORT_SYMBOL(ib_send_cm_lap);
  2037. static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
  2038. struct cm_lap_msg *lap_msg)
  2039. {
  2040. memset(path, 0, sizeof *path);
  2041. path->dgid = lap_msg->alt_local_gid;
  2042. path->sgid = lap_msg->alt_remote_gid;
  2043. path->dlid = lap_msg->alt_local_lid;
  2044. path->slid = lap_msg->alt_remote_lid;
  2045. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2046. path->hop_limit = lap_msg->alt_hop_limit;
  2047. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2048. path->reversible = 1;
  2049. /* pkey is same as in REQ */
  2050. path->sl = cm_lap_get_sl(lap_msg);
  2051. path->mtu_selector = IB_SA_EQ;
  2052. /* mtu is same as in REQ */
  2053. path->rate_selector = IB_SA_EQ;
  2054. path->rate = cm_lap_get_packet_rate(lap_msg);
  2055. path->packet_life_time_selector = IB_SA_EQ;
  2056. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2057. path->packet_life_time -= (path->packet_life_time > 0);
  2058. }
  2059. static int cm_lap_handler(struct cm_work *work)
  2060. {
  2061. struct cm_id_private *cm_id_priv;
  2062. struct cm_lap_msg *lap_msg;
  2063. struct ib_cm_lap_event_param *param;
  2064. struct ib_mad_send_buf *msg = NULL;
  2065. struct ib_send_wr *bad_send_wr;
  2066. unsigned long flags;
  2067. int ret;
  2068. /* todo: verify LAP request and send reject APR if invalid. */
  2069. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2070. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2071. lap_msg->local_comm_id);
  2072. if (!cm_id_priv)
  2073. return -EINVAL;
  2074. param = &work->cm_event.param.lap_rcvd;
  2075. param->alternate_path = &work->path[0];
  2076. cm_format_path_from_lap(param->alternate_path, lap_msg);
  2077. work->cm_event.private_data = &lap_msg->private_data;
  2078. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2079. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2080. goto unlock;
  2081. switch (cm_id_priv->id.lap_state) {
  2082. case IB_CM_LAP_IDLE:
  2083. break;
  2084. case IB_CM_MRA_LAP_SENT:
  2085. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2086. goto unlock;
  2087. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2088. CM_MSG_RESPONSE_OTHER,
  2089. cm_id_priv->service_timeout,
  2090. cm_id_priv->private_data,
  2091. cm_id_priv->private_data_len);
  2092. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2093. if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2094. &msg->send_wr, &bad_send_wr))
  2095. cm_free_msg(msg);
  2096. goto deref;
  2097. default:
  2098. goto unlock;
  2099. }
  2100. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2101. cm_id_priv->tid = lap_msg->hdr.tid;
  2102. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2103. if (!ret)
  2104. list_add_tail(&work->list, &cm_id_priv->work_list);
  2105. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2106. if (ret)
  2107. cm_process_work(cm_id_priv, work);
  2108. else
  2109. cm_deref_id(cm_id_priv);
  2110. return 0;
  2111. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2112. deref: cm_deref_id(cm_id_priv);
  2113. return -EINVAL;
  2114. }
  2115. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2116. struct cm_id_private *cm_id_priv,
  2117. enum ib_cm_apr_status status,
  2118. void *info,
  2119. u8 info_length,
  2120. const void *private_data,
  2121. u8 private_data_len)
  2122. {
  2123. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2124. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2125. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2126. apr_msg->ap_status = (u8) status;
  2127. if (info && info_length) {
  2128. apr_msg->info_length = info_length;
  2129. memcpy(apr_msg->info, info, info_length);
  2130. }
  2131. if (private_data && private_data_len)
  2132. memcpy(apr_msg->private_data, private_data, private_data_len);
  2133. }
  2134. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2135. enum ib_cm_apr_status status,
  2136. void *info,
  2137. u8 info_length,
  2138. const void *private_data,
  2139. u8 private_data_len)
  2140. {
  2141. struct cm_id_private *cm_id_priv;
  2142. struct ib_mad_send_buf *msg;
  2143. struct ib_send_wr *bad_send_wr;
  2144. unsigned long flags;
  2145. int ret;
  2146. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2147. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2148. return -EINVAL;
  2149. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2150. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2151. if (cm_id->state != IB_CM_ESTABLISHED ||
  2152. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2153. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2154. ret = -EINVAL;
  2155. goto out;
  2156. }
  2157. ret = cm_alloc_msg(cm_id_priv, &msg);
  2158. if (ret)
  2159. goto out;
  2160. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2161. info, info_length, private_data, private_data_len);
  2162. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2163. &msg->send_wr, &bad_send_wr);
  2164. if (ret) {
  2165. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2166. cm_free_msg(msg);
  2167. return ret;
  2168. }
  2169. cm_id->lap_state = IB_CM_LAP_IDLE;
  2170. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2171. return ret;
  2172. }
  2173. EXPORT_SYMBOL(ib_send_cm_apr);
  2174. static int cm_apr_handler(struct cm_work *work)
  2175. {
  2176. struct cm_id_private *cm_id_priv;
  2177. struct cm_apr_msg *apr_msg;
  2178. unsigned long flags;
  2179. int ret;
  2180. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2181. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2182. apr_msg->local_comm_id);
  2183. if (!cm_id_priv)
  2184. return -EINVAL; /* Unmatched reply. */
  2185. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2186. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2187. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2188. work->cm_event.private_data = &apr_msg->private_data;
  2189. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2190. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2191. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2192. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2193. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2194. goto out;
  2195. }
  2196. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2197. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  2198. (unsigned long) cm_id_priv->msg);
  2199. cm_id_priv->msg = NULL;
  2200. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2201. if (!ret)
  2202. list_add_tail(&work->list, &cm_id_priv->work_list);
  2203. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2204. if (ret)
  2205. cm_process_work(cm_id_priv, work);
  2206. else
  2207. cm_deref_id(cm_id_priv);
  2208. return 0;
  2209. out:
  2210. cm_deref_id(cm_id_priv);
  2211. return -EINVAL;
  2212. }
  2213. static int cm_timewait_handler(struct cm_work *work)
  2214. {
  2215. struct cm_timewait_info *timewait_info;
  2216. struct cm_id_private *cm_id_priv;
  2217. unsigned long flags;
  2218. int ret;
  2219. timewait_info = (struct cm_timewait_info *)work;
  2220. cm_cleanup_timewait(timewait_info);
  2221. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2222. timewait_info->work.remote_id);
  2223. if (!cm_id_priv)
  2224. return -EINVAL;
  2225. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2226. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2227. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2228. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2229. goto out;
  2230. }
  2231. cm_id_priv->id.state = IB_CM_IDLE;
  2232. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2233. if (!ret)
  2234. list_add_tail(&work->list, &cm_id_priv->work_list);
  2235. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2236. if (ret)
  2237. cm_process_work(cm_id_priv, work);
  2238. else
  2239. cm_deref_id(cm_id_priv);
  2240. return 0;
  2241. out:
  2242. cm_deref_id(cm_id_priv);
  2243. return -EINVAL;
  2244. }
  2245. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2246. struct cm_id_private *cm_id_priv,
  2247. struct ib_cm_sidr_req_param *param)
  2248. {
  2249. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2250. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2251. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2252. sidr_req_msg->pkey = cpu_to_be16(param->pkey);
  2253. sidr_req_msg->service_id = param->service_id;
  2254. if (param->private_data && param->private_data_len)
  2255. memcpy(sidr_req_msg->private_data, param->private_data,
  2256. param->private_data_len);
  2257. }
  2258. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2259. struct ib_cm_sidr_req_param *param)
  2260. {
  2261. struct cm_id_private *cm_id_priv;
  2262. struct ib_mad_send_buf *msg;
  2263. struct ib_send_wr *bad_send_wr;
  2264. unsigned long flags;
  2265. int ret;
  2266. if (!param->path || (param->private_data &&
  2267. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2268. return -EINVAL;
  2269. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2270. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2271. if (ret)
  2272. goto out;
  2273. cm_id->service_id = param->service_id;
  2274. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2275. cm_id_priv->timeout_ms = param->timeout_ms;
  2276. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2277. ret = cm_alloc_msg(cm_id_priv, &msg);
  2278. if (ret)
  2279. goto out;
  2280. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2281. param);
  2282. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  2283. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2284. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2285. if (cm_id->state == IB_CM_IDLE)
  2286. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2287. &msg->send_wr, &bad_send_wr);
  2288. else
  2289. ret = -EINVAL;
  2290. if (ret) {
  2291. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2292. cm_free_msg(msg);
  2293. goto out;
  2294. }
  2295. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2296. cm_id_priv->msg = msg;
  2297. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2298. out:
  2299. return ret;
  2300. }
  2301. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2302. static void cm_format_sidr_req_event(struct cm_work *work,
  2303. struct ib_cm_id *listen_id)
  2304. {
  2305. struct cm_sidr_req_msg *sidr_req_msg;
  2306. struct ib_cm_sidr_req_event_param *param;
  2307. sidr_req_msg = (struct cm_sidr_req_msg *)
  2308. work->mad_recv_wc->recv_buf.mad;
  2309. param = &work->cm_event.param.sidr_req_rcvd;
  2310. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2311. param->listen_id = listen_id;
  2312. param->port = work->port->port_num;
  2313. work->cm_event.private_data = &sidr_req_msg->private_data;
  2314. }
  2315. static int cm_sidr_req_handler(struct cm_work *work)
  2316. {
  2317. struct ib_cm_id *cm_id;
  2318. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2319. struct cm_sidr_req_msg *sidr_req_msg;
  2320. struct ib_wc *wc;
  2321. unsigned long flags;
  2322. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2323. if (IS_ERR(cm_id))
  2324. return PTR_ERR(cm_id);
  2325. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2326. /* Record SGID/SLID and request ID for lookup. */
  2327. sidr_req_msg = (struct cm_sidr_req_msg *)
  2328. work->mad_recv_wc->recv_buf.mad;
  2329. wc = work->mad_recv_wc->wc;
  2330. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2331. cm_id_priv->av.dgid.global.interface_id = 0;
  2332. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2333. &cm_id_priv->av);
  2334. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2335. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2336. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2337. atomic_inc(&cm_id_priv->work_count);
  2338. spin_lock_irqsave(&cm.lock, flags);
  2339. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2340. if (cur_cm_id_priv) {
  2341. spin_unlock_irqrestore(&cm.lock, flags);
  2342. goto out; /* Duplicate message. */
  2343. }
  2344. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2345. sidr_req_msg->service_id);
  2346. if (!cur_cm_id_priv) {
  2347. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2348. spin_unlock_irqrestore(&cm.lock, flags);
  2349. /* todo: reply with no match */
  2350. goto out; /* No match. */
  2351. }
  2352. atomic_inc(&cur_cm_id_priv->refcount);
  2353. spin_unlock_irqrestore(&cm.lock, flags);
  2354. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2355. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2356. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2357. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2358. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2359. cm_process_work(cm_id_priv, work);
  2360. cm_deref_id(cur_cm_id_priv);
  2361. return 0;
  2362. out:
  2363. ib_destroy_cm_id(&cm_id_priv->id);
  2364. return -EINVAL;
  2365. }
  2366. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2367. struct cm_id_private *cm_id_priv,
  2368. struct ib_cm_sidr_rep_param *param)
  2369. {
  2370. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2371. cm_id_priv->tid);
  2372. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2373. sidr_rep_msg->status = param->status;
  2374. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2375. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2376. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2377. if (param->info && param->info_length)
  2378. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2379. if (param->private_data && param->private_data_len)
  2380. memcpy(sidr_rep_msg->private_data, param->private_data,
  2381. param->private_data_len);
  2382. }
  2383. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2384. struct ib_cm_sidr_rep_param *param)
  2385. {
  2386. struct cm_id_private *cm_id_priv;
  2387. struct ib_mad_send_buf *msg;
  2388. struct ib_send_wr *bad_send_wr;
  2389. unsigned long flags;
  2390. int ret;
  2391. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2392. (param->private_data &&
  2393. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2394. return -EINVAL;
  2395. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2396. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2397. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2398. ret = -EINVAL;
  2399. goto error;
  2400. }
  2401. ret = cm_alloc_msg(cm_id_priv, &msg);
  2402. if (ret)
  2403. goto error;
  2404. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2405. param);
  2406. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2407. &msg->send_wr, &bad_send_wr);
  2408. if (ret) {
  2409. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2410. cm_free_msg(msg);
  2411. return ret;
  2412. }
  2413. cm_id->state = IB_CM_IDLE;
  2414. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2415. spin_lock_irqsave(&cm.lock, flags);
  2416. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2417. spin_unlock_irqrestore(&cm.lock, flags);
  2418. return 0;
  2419. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2420. return ret;
  2421. }
  2422. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2423. static void cm_format_sidr_rep_event(struct cm_work *work)
  2424. {
  2425. struct cm_sidr_rep_msg *sidr_rep_msg;
  2426. struct ib_cm_sidr_rep_event_param *param;
  2427. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2428. work->mad_recv_wc->recv_buf.mad;
  2429. param = &work->cm_event.param.sidr_rep_rcvd;
  2430. param->status = sidr_rep_msg->status;
  2431. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2432. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2433. param->info = &sidr_rep_msg->info;
  2434. param->info_len = sidr_rep_msg->info_length;
  2435. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2436. }
  2437. static int cm_sidr_rep_handler(struct cm_work *work)
  2438. {
  2439. struct cm_sidr_rep_msg *sidr_rep_msg;
  2440. struct cm_id_private *cm_id_priv;
  2441. unsigned long flags;
  2442. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2443. work->mad_recv_wc->recv_buf.mad;
  2444. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2445. if (!cm_id_priv)
  2446. return -EINVAL; /* Unmatched reply. */
  2447. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2448. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2449. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2450. goto out;
  2451. }
  2452. cm_id_priv->id.state = IB_CM_IDLE;
  2453. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  2454. (unsigned long) cm_id_priv->msg);
  2455. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2456. cm_format_sidr_rep_event(work);
  2457. cm_process_work(cm_id_priv, work);
  2458. return 0;
  2459. out:
  2460. cm_deref_id(cm_id_priv);
  2461. return -EINVAL;
  2462. }
  2463. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2464. enum ib_wc_status wc_status)
  2465. {
  2466. struct cm_id_private *cm_id_priv;
  2467. struct ib_cm_event cm_event;
  2468. enum ib_cm_state state;
  2469. unsigned long flags;
  2470. int ret;
  2471. memset(&cm_event, 0, sizeof cm_event);
  2472. cm_id_priv = msg->context[0];
  2473. /* Discard old sends or ones without a response. */
  2474. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2475. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2476. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2477. goto discard;
  2478. switch (state) {
  2479. case IB_CM_REQ_SENT:
  2480. case IB_CM_MRA_REQ_RCVD:
  2481. cm_reset_to_idle(cm_id_priv);
  2482. cm_event.event = IB_CM_REQ_ERROR;
  2483. break;
  2484. case IB_CM_REP_SENT:
  2485. case IB_CM_MRA_REP_RCVD:
  2486. cm_reset_to_idle(cm_id_priv);
  2487. cm_event.event = IB_CM_REP_ERROR;
  2488. break;
  2489. case IB_CM_DREQ_SENT:
  2490. cm_enter_timewait(cm_id_priv);
  2491. cm_event.event = IB_CM_DREQ_ERROR;
  2492. break;
  2493. case IB_CM_SIDR_REQ_SENT:
  2494. cm_id_priv->id.state = IB_CM_IDLE;
  2495. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2496. break;
  2497. default:
  2498. goto discard;
  2499. }
  2500. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2501. cm_event.param.send_status = wc_status;
  2502. /* No other events can occur on the cm_id at this point. */
  2503. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2504. cm_free_msg(msg);
  2505. if (ret)
  2506. ib_destroy_cm_id(&cm_id_priv->id);
  2507. return;
  2508. discard:
  2509. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2510. cm_free_msg(msg);
  2511. }
  2512. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2513. struct ib_mad_send_wc *mad_send_wc)
  2514. {
  2515. struct ib_mad_send_buf *msg;
  2516. msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
  2517. switch (mad_send_wc->status) {
  2518. case IB_WC_SUCCESS:
  2519. case IB_WC_WR_FLUSH_ERR:
  2520. cm_free_msg(msg);
  2521. break;
  2522. default:
  2523. if (msg->context[0] && msg->context[1])
  2524. cm_process_send_error(msg, mad_send_wc->status);
  2525. else
  2526. cm_free_msg(msg);
  2527. break;
  2528. }
  2529. }
  2530. static void cm_work_handler(void *data)
  2531. {
  2532. struct cm_work *work = data;
  2533. int ret;
  2534. switch (work->cm_event.event) {
  2535. case IB_CM_REQ_RECEIVED:
  2536. ret = cm_req_handler(work);
  2537. break;
  2538. case IB_CM_MRA_RECEIVED:
  2539. ret = cm_mra_handler(work);
  2540. break;
  2541. case IB_CM_REJ_RECEIVED:
  2542. ret = cm_rej_handler(work);
  2543. break;
  2544. case IB_CM_REP_RECEIVED:
  2545. ret = cm_rep_handler(work);
  2546. break;
  2547. case IB_CM_RTU_RECEIVED:
  2548. ret = cm_rtu_handler(work);
  2549. break;
  2550. case IB_CM_USER_ESTABLISHED:
  2551. ret = cm_establish_handler(work);
  2552. break;
  2553. case IB_CM_DREQ_RECEIVED:
  2554. ret = cm_dreq_handler(work);
  2555. break;
  2556. case IB_CM_DREP_RECEIVED:
  2557. ret = cm_drep_handler(work);
  2558. break;
  2559. case IB_CM_SIDR_REQ_RECEIVED:
  2560. ret = cm_sidr_req_handler(work);
  2561. break;
  2562. case IB_CM_SIDR_REP_RECEIVED:
  2563. ret = cm_sidr_rep_handler(work);
  2564. break;
  2565. case IB_CM_LAP_RECEIVED:
  2566. ret = cm_lap_handler(work);
  2567. break;
  2568. case IB_CM_APR_RECEIVED:
  2569. ret = cm_apr_handler(work);
  2570. break;
  2571. case IB_CM_TIMEWAIT_EXIT:
  2572. ret = cm_timewait_handler(work);
  2573. break;
  2574. default:
  2575. ret = -EINVAL;
  2576. break;
  2577. }
  2578. if (ret)
  2579. cm_free_work(work);
  2580. }
  2581. int ib_cm_establish(struct ib_cm_id *cm_id)
  2582. {
  2583. struct cm_id_private *cm_id_priv;
  2584. struct cm_work *work;
  2585. unsigned long flags;
  2586. int ret = 0;
  2587. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2588. if (!work)
  2589. return -ENOMEM;
  2590. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2591. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2592. switch (cm_id->state)
  2593. {
  2594. case IB_CM_REP_SENT:
  2595. case IB_CM_MRA_REP_RCVD:
  2596. cm_id->state = IB_CM_ESTABLISHED;
  2597. break;
  2598. case IB_CM_ESTABLISHED:
  2599. ret = -EISCONN;
  2600. break;
  2601. default:
  2602. ret = -EINVAL;
  2603. break;
  2604. }
  2605. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2606. if (ret) {
  2607. kfree(work);
  2608. goto out;
  2609. }
  2610. /*
  2611. * The CM worker thread may try to destroy the cm_id before it
  2612. * can execute this work item. To prevent potential deadlock,
  2613. * we need to find the cm_id once we're in the context of the
  2614. * worker thread, rather than holding a reference on it.
  2615. */
  2616. INIT_WORK(&work->work, cm_work_handler, work);
  2617. work->local_id = cm_id->local_id;
  2618. work->remote_id = cm_id->remote_id;
  2619. work->mad_recv_wc = NULL;
  2620. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2621. queue_work(cm.wq, &work->work);
  2622. out:
  2623. return ret;
  2624. }
  2625. EXPORT_SYMBOL(ib_cm_establish);
  2626. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2627. struct ib_mad_recv_wc *mad_recv_wc)
  2628. {
  2629. struct cm_work *work;
  2630. enum ib_cm_event_type event;
  2631. int paths = 0;
  2632. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2633. case CM_REQ_ATTR_ID:
  2634. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2635. alt_local_lid != 0);
  2636. event = IB_CM_REQ_RECEIVED;
  2637. break;
  2638. case CM_MRA_ATTR_ID:
  2639. event = IB_CM_MRA_RECEIVED;
  2640. break;
  2641. case CM_REJ_ATTR_ID:
  2642. event = IB_CM_REJ_RECEIVED;
  2643. break;
  2644. case CM_REP_ATTR_ID:
  2645. event = IB_CM_REP_RECEIVED;
  2646. break;
  2647. case CM_RTU_ATTR_ID:
  2648. event = IB_CM_RTU_RECEIVED;
  2649. break;
  2650. case CM_DREQ_ATTR_ID:
  2651. event = IB_CM_DREQ_RECEIVED;
  2652. break;
  2653. case CM_DREP_ATTR_ID:
  2654. event = IB_CM_DREP_RECEIVED;
  2655. break;
  2656. case CM_SIDR_REQ_ATTR_ID:
  2657. event = IB_CM_SIDR_REQ_RECEIVED;
  2658. break;
  2659. case CM_SIDR_REP_ATTR_ID:
  2660. event = IB_CM_SIDR_REP_RECEIVED;
  2661. break;
  2662. case CM_LAP_ATTR_ID:
  2663. paths = 1;
  2664. event = IB_CM_LAP_RECEIVED;
  2665. break;
  2666. case CM_APR_ATTR_ID:
  2667. event = IB_CM_APR_RECEIVED;
  2668. break;
  2669. default:
  2670. ib_free_recv_mad(mad_recv_wc);
  2671. return;
  2672. }
  2673. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2674. GFP_KERNEL);
  2675. if (!work) {
  2676. ib_free_recv_mad(mad_recv_wc);
  2677. return;
  2678. }
  2679. INIT_WORK(&work->work, cm_work_handler, work);
  2680. work->cm_event.event = event;
  2681. work->mad_recv_wc = mad_recv_wc;
  2682. work->port = (struct cm_port *)mad_agent->context;
  2683. queue_work(cm.wq, &work->work);
  2684. }
  2685. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2686. struct ib_qp_attr *qp_attr,
  2687. int *qp_attr_mask)
  2688. {
  2689. unsigned long flags;
  2690. int ret;
  2691. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2692. switch (cm_id_priv->id.state) {
  2693. case IB_CM_REQ_SENT:
  2694. case IB_CM_MRA_REQ_RCVD:
  2695. case IB_CM_REQ_RCVD:
  2696. case IB_CM_MRA_REQ_SENT:
  2697. case IB_CM_REP_RCVD:
  2698. case IB_CM_MRA_REP_SENT:
  2699. case IB_CM_REP_SENT:
  2700. case IB_CM_MRA_REP_RCVD:
  2701. case IB_CM_ESTABLISHED:
  2702. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2703. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2704. qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
  2705. if (cm_id_priv->responder_resources)
  2706. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
  2707. IB_ACCESS_REMOTE_READ;
  2708. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2709. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2710. ret = 0;
  2711. break;
  2712. default:
  2713. ret = -EINVAL;
  2714. break;
  2715. }
  2716. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2717. return ret;
  2718. }
  2719. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2720. struct ib_qp_attr *qp_attr,
  2721. int *qp_attr_mask)
  2722. {
  2723. unsigned long flags;
  2724. int ret;
  2725. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2726. switch (cm_id_priv->id.state) {
  2727. case IB_CM_REQ_RCVD:
  2728. case IB_CM_MRA_REQ_SENT:
  2729. case IB_CM_REP_RCVD:
  2730. case IB_CM_MRA_REP_SENT:
  2731. case IB_CM_REP_SENT:
  2732. case IB_CM_MRA_REP_RCVD:
  2733. case IB_CM_ESTABLISHED:
  2734. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2735. IB_QP_DEST_QPN | IB_QP_RQ_PSN |
  2736. IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
  2737. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2738. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2739. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2740. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2741. qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
  2742. qp_attr->min_rnr_timer = 0;
  2743. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2744. *qp_attr_mask |= IB_QP_ALT_PATH;
  2745. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2746. }
  2747. ret = 0;
  2748. break;
  2749. default:
  2750. ret = -EINVAL;
  2751. break;
  2752. }
  2753. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2754. return ret;
  2755. }
  2756. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2757. struct ib_qp_attr *qp_attr,
  2758. int *qp_attr_mask)
  2759. {
  2760. unsigned long flags;
  2761. int ret;
  2762. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2763. switch (cm_id_priv->id.state) {
  2764. case IB_CM_REP_RCVD:
  2765. case IB_CM_MRA_REP_SENT:
  2766. case IB_CM_REP_SENT:
  2767. case IB_CM_MRA_REP_RCVD:
  2768. case IB_CM_ESTABLISHED:
  2769. *qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2770. IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
  2771. IB_QP_MAX_QP_RD_ATOMIC;
  2772. qp_attr->timeout = cm_id_priv->local_ack_timeout;
  2773. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2774. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2775. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2776. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  2777. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2778. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2779. qp_attr->path_mig_state = IB_MIG_REARM;
  2780. }
  2781. ret = 0;
  2782. break;
  2783. default:
  2784. ret = -EINVAL;
  2785. break;
  2786. }
  2787. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2788. return ret;
  2789. }
  2790. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2791. struct ib_qp_attr *qp_attr,
  2792. int *qp_attr_mask)
  2793. {
  2794. struct cm_id_private *cm_id_priv;
  2795. int ret;
  2796. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2797. switch (qp_attr->qp_state) {
  2798. case IB_QPS_INIT:
  2799. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2800. break;
  2801. case IB_QPS_RTR:
  2802. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2803. break;
  2804. case IB_QPS_RTS:
  2805. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2806. break;
  2807. default:
  2808. ret = -EINVAL;
  2809. break;
  2810. }
  2811. return ret;
  2812. }
  2813. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2814. static __be64 cm_get_ca_guid(struct ib_device *device)
  2815. {
  2816. struct ib_device_attr *device_attr;
  2817. __be64 guid;
  2818. int ret;
  2819. device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
  2820. if (!device_attr)
  2821. return 0;
  2822. ret = ib_query_device(device, device_attr);
  2823. guid = ret ? 0 : device_attr->node_guid;
  2824. kfree(device_attr);
  2825. return guid;
  2826. }
  2827. static void cm_add_one(struct ib_device *device)
  2828. {
  2829. struct cm_device *cm_dev;
  2830. struct cm_port *port;
  2831. struct ib_mad_reg_req reg_req = {
  2832. .mgmt_class = IB_MGMT_CLASS_CM,
  2833. .mgmt_class_version = IB_CM_CLASS_VERSION
  2834. };
  2835. struct ib_port_modify port_modify = {
  2836. .set_port_cap_mask = IB_PORT_CM_SUP
  2837. };
  2838. unsigned long flags;
  2839. int ret;
  2840. u8 i;
  2841. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2842. device->phys_port_cnt, GFP_KERNEL);
  2843. if (!cm_dev)
  2844. return;
  2845. cm_dev->device = device;
  2846. cm_dev->ca_guid = cm_get_ca_guid(device);
  2847. if (!cm_dev->ca_guid)
  2848. goto error1;
  2849. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2850. for (i = 1; i <= device->phys_port_cnt; i++) {
  2851. port = &cm_dev->port[i-1];
  2852. port->cm_dev = cm_dev;
  2853. port->port_num = i;
  2854. port->mad_agent = ib_register_mad_agent(device, i,
  2855. IB_QPT_GSI,
  2856. &reg_req,
  2857. 0,
  2858. cm_send_handler,
  2859. cm_recv_handler,
  2860. port);
  2861. if (IS_ERR(port->mad_agent))
  2862. goto error2;
  2863. ret = ib_modify_port(device, i, 0, &port_modify);
  2864. if (ret)
  2865. goto error3;
  2866. }
  2867. ib_set_client_data(device, &cm_client, cm_dev);
  2868. write_lock_irqsave(&cm.device_lock, flags);
  2869. list_add_tail(&cm_dev->list, &cm.device_list);
  2870. write_unlock_irqrestore(&cm.device_lock, flags);
  2871. return;
  2872. error3:
  2873. ib_unregister_mad_agent(port->mad_agent);
  2874. error2:
  2875. port_modify.set_port_cap_mask = 0;
  2876. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  2877. while (--i) {
  2878. port = &cm_dev->port[i-1];
  2879. ib_modify_port(device, port->port_num, 0, &port_modify);
  2880. ib_unregister_mad_agent(port->mad_agent);
  2881. }
  2882. error1:
  2883. kfree(cm_dev);
  2884. }
  2885. static void cm_remove_one(struct ib_device *device)
  2886. {
  2887. struct cm_device *cm_dev;
  2888. struct cm_port *port;
  2889. struct ib_port_modify port_modify = {
  2890. .clr_port_cap_mask = IB_PORT_CM_SUP
  2891. };
  2892. unsigned long flags;
  2893. int i;
  2894. cm_dev = ib_get_client_data(device, &cm_client);
  2895. if (!cm_dev)
  2896. return;
  2897. write_lock_irqsave(&cm.device_lock, flags);
  2898. list_del(&cm_dev->list);
  2899. write_unlock_irqrestore(&cm.device_lock, flags);
  2900. for (i = 1; i <= device->phys_port_cnt; i++) {
  2901. port = &cm_dev->port[i-1];
  2902. ib_modify_port(device, port->port_num, 0, &port_modify);
  2903. ib_unregister_mad_agent(port->mad_agent);
  2904. }
  2905. kfree(cm_dev);
  2906. }
  2907. static int __init ib_cm_init(void)
  2908. {
  2909. int ret;
  2910. memset(&cm, 0, sizeof cm);
  2911. INIT_LIST_HEAD(&cm.device_list);
  2912. rwlock_init(&cm.device_lock);
  2913. spin_lock_init(&cm.lock);
  2914. cm.listen_service_table = RB_ROOT;
  2915. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  2916. cm.remote_id_table = RB_ROOT;
  2917. cm.remote_qp_table = RB_ROOT;
  2918. cm.remote_sidr_table = RB_ROOT;
  2919. idr_init(&cm.local_id_table);
  2920. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  2921. cm.wq = create_workqueue("ib_cm");
  2922. if (!cm.wq)
  2923. return -ENOMEM;
  2924. ret = ib_register_client(&cm_client);
  2925. if (ret)
  2926. goto error;
  2927. return 0;
  2928. error:
  2929. destroy_workqueue(cm.wq);
  2930. return ret;
  2931. }
  2932. static void __exit ib_cm_cleanup(void)
  2933. {
  2934. flush_workqueue(cm.wq);
  2935. destroy_workqueue(cm.wq);
  2936. ib_unregister_client(&cm_client);
  2937. }
  2938. module_init(ib_cm_init);
  2939. module_exit(ib_cm_cleanup);