cm.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330
  1. /*
  2. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
  36. */
  37. #include <linux/dma-mapping.h>
  38. #include <linux/err.h>
  39. #include <linux/idr.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/pci.h>
  42. #include <linux/rbtree.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/workqueue.h>
  45. #include <rdma/ib_cache.h>
  46. #include <rdma/ib_cm.h>
  47. #include "cm_msgs.h"
  48. MODULE_AUTHOR("Sean Hefty");
  49. MODULE_DESCRIPTION("InfiniBand CM");
  50. MODULE_LICENSE("Dual BSD/GPL");
  51. static void cm_add_one(struct ib_device *device);
  52. static void cm_remove_one(struct ib_device *device);
  53. static struct ib_client cm_client = {
  54. .name = "cm",
  55. .add = cm_add_one,
  56. .remove = cm_remove_one
  57. };
  58. static struct ib_cm {
  59. spinlock_t lock;
  60. struct list_head device_list;
  61. rwlock_t device_lock;
  62. struct rb_root listen_service_table;
  63. u64 listen_service_id;
  64. /* struct rb_root peer_service_table; todo: fix peer to peer */
  65. struct rb_root remote_qp_table;
  66. struct rb_root remote_id_table;
  67. struct rb_root remote_sidr_table;
  68. struct idr local_id_table;
  69. struct workqueue_struct *wq;
  70. } cm;
  71. struct cm_port {
  72. struct cm_device *cm_dev;
  73. struct ib_mad_agent *mad_agent;
  74. u8 port_num;
  75. };
  76. struct cm_device {
  77. struct list_head list;
  78. struct ib_device *device;
  79. __be64 ca_guid;
  80. struct cm_port port[0];
  81. };
  82. struct cm_av {
  83. struct cm_port *port;
  84. union ib_gid dgid;
  85. struct ib_ah_attr ah_attr;
  86. u16 pkey_index;
  87. u8 packet_life_time;
  88. };
  89. struct cm_work {
  90. struct work_struct work;
  91. struct list_head list;
  92. struct cm_port *port;
  93. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  94. __be32 local_id; /* Established / timewait */
  95. __be32 remote_id;
  96. struct ib_cm_event cm_event;
  97. struct ib_sa_path_rec path[0];
  98. };
  99. struct cm_timewait_info {
  100. struct cm_work work; /* Must be first. */
  101. struct rb_node remote_qp_node;
  102. struct rb_node remote_id_node;
  103. __be64 remote_ca_guid;
  104. __be32 remote_qpn;
  105. u8 inserted_remote_qp;
  106. u8 inserted_remote_id;
  107. };
  108. struct cm_id_private {
  109. struct ib_cm_id id;
  110. struct rb_node service_node;
  111. struct rb_node sidr_id_node;
  112. spinlock_t lock;
  113. wait_queue_head_t wait;
  114. atomic_t refcount;
  115. struct ib_mad_send_buf *msg;
  116. struct cm_timewait_info *timewait_info;
  117. /* todo: use alternate port on send failure */
  118. struct cm_av av;
  119. struct cm_av alt_av;
  120. void *private_data;
  121. __be64 tid;
  122. __be32 local_qpn;
  123. __be32 remote_qpn;
  124. __be32 sq_psn;
  125. __be32 rq_psn;
  126. int timeout_ms;
  127. enum ib_mtu path_mtu;
  128. u8 private_data_len;
  129. u8 max_cm_retries;
  130. u8 peer_to_peer;
  131. u8 responder_resources;
  132. u8 initiator_depth;
  133. u8 local_ack_timeout;
  134. u8 retry_count;
  135. u8 rnr_retry_count;
  136. u8 service_timeout;
  137. struct list_head work_list;
  138. atomic_t work_count;
  139. };
  140. static void cm_work_handler(void *data);
  141. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  142. {
  143. if (atomic_dec_and_test(&cm_id_priv->refcount))
  144. wake_up(&cm_id_priv->wait);
  145. }
  146. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  147. struct ib_mad_send_buf **msg)
  148. {
  149. struct ib_mad_agent *mad_agent;
  150. struct ib_mad_send_buf *m;
  151. struct ib_ah *ah;
  152. mad_agent = cm_id_priv->av.port->mad_agent;
  153. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  154. if (IS_ERR(ah))
  155. return PTR_ERR(ah);
  156. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  157. cm_id_priv->av.pkey_index,
  158. ah, 0, sizeof(struct ib_mad_hdr),
  159. sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
  160. GFP_ATOMIC);
  161. if (IS_ERR(m)) {
  162. ib_destroy_ah(ah);
  163. return PTR_ERR(m);
  164. }
  165. /* Timeout set by caller if response is expected. */
  166. m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
  167. atomic_inc(&cm_id_priv->refcount);
  168. m->context[0] = cm_id_priv;
  169. *msg = m;
  170. return 0;
  171. }
  172. static int cm_alloc_response_msg(struct cm_port *port,
  173. struct ib_mad_recv_wc *mad_recv_wc,
  174. struct ib_mad_send_buf **msg)
  175. {
  176. struct ib_mad_send_buf *m;
  177. struct ib_ah *ah;
  178. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  179. mad_recv_wc->recv_buf.grh, port->port_num);
  180. if (IS_ERR(ah))
  181. return PTR_ERR(ah);
  182. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  183. ah, 0, sizeof(struct ib_mad_hdr),
  184. sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
  185. GFP_ATOMIC);
  186. if (IS_ERR(m)) {
  187. ib_destroy_ah(ah);
  188. return PTR_ERR(m);
  189. }
  190. *msg = m;
  191. return 0;
  192. }
  193. static void cm_free_msg(struct ib_mad_send_buf *msg)
  194. {
  195. ib_destroy_ah(msg->send_wr.wr.ud.ah);
  196. if (msg->context[0])
  197. cm_deref_id(msg->context[0]);
  198. ib_free_send_mad(msg);
  199. }
  200. static void * cm_copy_private_data(const void *private_data,
  201. u8 private_data_len)
  202. {
  203. void *data;
  204. if (!private_data || !private_data_len)
  205. return NULL;
  206. data = kmalloc(private_data_len, GFP_KERNEL);
  207. if (!data)
  208. return ERR_PTR(-ENOMEM);
  209. memcpy(data, private_data, private_data_len);
  210. return data;
  211. }
  212. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  213. void *private_data, u8 private_data_len)
  214. {
  215. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  216. kfree(cm_id_priv->private_data);
  217. cm_id_priv->private_data = private_data;
  218. cm_id_priv->private_data_len = private_data_len;
  219. }
  220. static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
  221. u16 dlid, u8 sl, u16 src_path_bits)
  222. {
  223. memset(ah_attr, 0, sizeof ah_attr);
  224. ah_attr->dlid = dlid;
  225. ah_attr->sl = sl;
  226. ah_attr->src_path_bits = src_path_bits;
  227. ah_attr->port_num = port_num;
  228. }
  229. static void cm_init_av_for_response(struct cm_port *port,
  230. struct ib_wc *wc, struct cm_av *av)
  231. {
  232. av->port = port;
  233. av->pkey_index = wc->pkey_index;
  234. cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
  235. wc->sl, wc->dlid_path_bits);
  236. }
  237. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  238. {
  239. struct cm_device *cm_dev;
  240. struct cm_port *port = NULL;
  241. unsigned long flags;
  242. int ret;
  243. u8 p;
  244. read_lock_irqsave(&cm.device_lock, flags);
  245. list_for_each_entry(cm_dev, &cm.device_list, list) {
  246. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  247. &p, NULL)) {
  248. port = &cm_dev->port[p-1];
  249. break;
  250. }
  251. }
  252. read_unlock_irqrestore(&cm.device_lock, flags);
  253. if (!port)
  254. return -EINVAL;
  255. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  256. be16_to_cpu(path->pkey), &av->pkey_index);
  257. if (ret)
  258. return ret;
  259. av->port = port;
  260. cm_set_ah_attr(&av->ah_attr, av->port->port_num,
  261. be16_to_cpu(path->dlid), path->sl,
  262. be16_to_cpu(path->slid) & 0x7F);
  263. av->packet_life_time = path->packet_life_time;
  264. return 0;
  265. }
  266. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  267. {
  268. unsigned long flags;
  269. int ret;
  270. do {
  271. spin_lock_irqsave(&cm.lock, flags);
  272. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
  273. (__force int *) &cm_id_priv->id.local_id);
  274. spin_unlock_irqrestore(&cm.lock, flags);
  275. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  276. return ret;
  277. }
  278. static void cm_free_id(__be32 local_id)
  279. {
  280. unsigned long flags;
  281. spin_lock_irqsave(&cm.lock, flags);
  282. idr_remove(&cm.local_id_table, (__force int) local_id);
  283. spin_unlock_irqrestore(&cm.lock, flags);
  284. }
  285. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  286. {
  287. struct cm_id_private *cm_id_priv;
  288. cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
  289. if (cm_id_priv) {
  290. if (cm_id_priv->id.remote_id == remote_id)
  291. atomic_inc(&cm_id_priv->refcount);
  292. else
  293. cm_id_priv = NULL;
  294. }
  295. return cm_id_priv;
  296. }
  297. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  298. {
  299. struct cm_id_private *cm_id_priv;
  300. unsigned long flags;
  301. spin_lock_irqsave(&cm.lock, flags);
  302. cm_id_priv = cm_get_id(local_id, remote_id);
  303. spin_unlock_irqrestore(&cm.lock, flags);
  304. return cm_id_priv;
  305. }
  306. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  307. {
  308. struct rb_node **link = &cm.listen_service_table.rb_node;
  309. struct rb_node *parent = NULL;
  310. struct cm_id_private *cur_cm_id_priv;
  311. __be64 service_id = cm_id_priv->id.service_id;
  312. __be64 service_mask = cm_id_priv->id.service_mask;
  313. while (*link) {
  314. parent = *link;
  315. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  316. service_node);
  317. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  318. (service_mask & cur_cm_id_priv->id.service_id))
  319. return cm_id_priv;
  320. if (service_id < cur_cm_id_priv->id.service_id)
  321. link = &(*link)->rb_left;
  322. else
  323. link = &(*link)->rb_right;
  324. }
  325. rb_link_node(&cm_id_priv->service_node, parent, link);
  326. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  327. return NULL;
  328. }
  329. static struct cm_id_private * cm_find_listen(__be64 service_id)
  330. {
  331. struct rb_node *node = cm.listen_service_table.rb_node;
  332. struct cm_id_private *cm_id_priv;
  333. while (node) {
  334. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  335. if ((cm_id_priv->id.service_mask & service_id) ==
  336. (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
  337. return cm_id_priv;
  338. if (service_id < cm_id_priv->id.service_id)
  339. node = node->rb_left;
  340. else
  341. node = node->rb_right;
  342. }
  343. return NULL;
  344. }
  345. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  346. *timewait_info)
  347. {
  348. struct rb_node **link = &cm.remote_id_table.rb_node;
  349. struct rb_node *parent = NULL;
  350. struct cm_timewait_info *cur_timewait_info;
  351. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  352. __be32 remote_id = timewait_info->work.remote_id;
  353. while (*link) {
  354. parent = *link;
  355. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  356. remote_id_node);
  357. if (remote_id < cur_timewait_info->work.remote_id)
  358. link = &(*link)->rb_left;
  359. else if (remote_id > cur_timewait_info->work.remote_id)
  360. link = &(*link)->rb_right;
  361. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  362. link = &(*link)->rb_left;
  363. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  364. link = &(*link)->rb_right;
  365. else
  366. return cur_timewait_info;
  367. }
  368. timewait_info->inserted_remote_id = 1;
  369. rb_link_node(&timewait_info->remote_id_node, parent, link);
  370. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  371. return NULL;
  372. }
  373. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  374. __be32 remote_id)
  375. {
  376. struct rb_node *node = cm.remote_id_table.rb_node;
  377. struct cm_timewait_info *timewait_info;
  378. while (node) {
  379. timewait_info = rb_entry(node, struct cm_timewait_info,
  380. remote_id_node);
  381. if (remote_id < timewait_info->work.remote_id)
  382. node = node->rb_left;
  383. else if (remote_id > timewait_info->work.remote_id)
  384. node = node->rb_right;
  385. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  386. node = node->rb_left;
  387. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  388. node = node->rb_right;
  389. else
  390. return timewait_info;
  391. }
  392. return NULL;
  393. }
  394. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  395. *timewait_info)
  396. {
  397. struct rb_node **link = &cm.remote_qp_table.rb_node;
  398. struct rb_node *parent = NULL;
  399. struct cm_timewait_info *cur_timewait_info;
  400. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  401. __be32 remote_qpn = timewait_info->remote_qpn;
  402. while (*link) {
  403. parent = *link;
  404. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  405. remote_qp_node);
  406. if (remote_qpn < cur_timewait_info->remote_qpn)
  407. link = &(*link)->rb_left;
  408. else if (remote_qpn > cur_timewait_info->remote_qpn)
  409. link = &(*link)->rb_right;
  410. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  411. link = &(*link)->rb_left;
  412. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  413. link = &(*link)->rb_right;
  414. else
  415. return cur_timewait_info;
  416. }
  417. timewait_info->inserted_remote_qp = 1;
  418. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  419. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  420. return NULL;
  421. }
  422. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  423. *cm_id_priv)
  424. {
  425. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  426. struct rb_node *parent = NULL;
  427. struct cm_id_private *cur_cm_id_priv;
  428. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  429. __be32 remote_id = cm_id_priv->id.remote_id;
  430. while (*link) {
  431. parent = *link;
  432. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  433. sidr_id_node);
  434. if (remote_id < cur_cm_id_priv->id.remote_id)
  435. link = &(*link)->rb_left;
  436. else if (remote_id > cur_cm_id_priv->id.remote_id)
  437. link = &(*link)->rb_right;
  438. else {
  439. int cmp;
  440. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  441. sizeof *port_gid);
  442. if (cmp < 0)
  443. link = &(*link)->rb_left;
  444. else if (cmp > 0)
  445. link = &(*link)->rb_right;
  446. else
  447. return cur_cm_id_priv;
  448. }
  449. }
  450. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  451. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  452. return NULL;
  453. }
  454. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  455. enum ib_cm_sidr_status status)
  456. {
  457. struct ib_cm_sidr_rep_param param;
  458. memset(&param, 0, sizeof param);
  459. param.status = status;
  460. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  461. }
  462. struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
  463. void *context)
  464. {
  465. struct cm_id_private *cm_id_priv;
  466. int ret;
  467. cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
  468. if (!cm_id_priv)
  469. return ERR_PTR(-ENOMEM);
  470. memset(cm_id_priv, 0, sizeof *cm_id_priv);
  471. cm_id_priv->id.state = IB_CM_IDLE;
  472. cm_id_priv->id.cm_handler = cm_handler;
  473. cm_id_priv->id.context = context;
  474. cm_id_priv->id.remote_cm_qpn = 1;
  475. ret = cm_alloc_id(cm_id_priv);
  476. if (ret)
  477. goto error;
  478. spin_lock_init(&cm_id_priv->lock);
  479. init_waitqueue_head(&cm_id_priv->wait);
  480. INIT_LIST_HEAD(&cm_id_priv->work_list);
  481. atomic_set(&cm_id_priv->work_count, -1);
  482. atomic_set(&cm_id_priv->refcount, 1);
  483. return &cm_id_priv->id;
  484. error:
  485. kfree(cm_id_priv);
  486. return ERR_PTR(-ENOMEM);
  487. }
  488. EXPORT_SYMBOL(ib_create_cm_id);
  489. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  490. {
  491. struct cm_work *work;
  492. if (list_empty(&cm_id_priv->work_list))
  493. return NULL;
  494. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  495. list_del(&work->list);
  496. return work;
  497. }
  498. static void cm_free_work(struct cm_work *work)
  499. {
  500. if (work->mad_recv_wc)
  501. ib_free_recv_mad(work->mad_recv_wc);
  502. kfree(work);
  503. }
  504. static inline int cm_convert_to_ms(int iba_time)
  505. {
  506. /* approximate conversion to ms from 4.096us x 2^iba_time */
  507. return 1 << max(iba_time - 8, 0);
  508. }
  509. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  510. {
  511. unsigned long flags;
  512. if (!timewait_info->inserted_remote_id &&
  513. !timewait_info->inserted_remote_qp)
  514. return;
  515. spin_lock_irqsave(&cm.lock, flags);
  516. if (timewait_info->inserted_remote_id) {
  517. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  518. timewait_info->inserted_remote_id = 0;
  519. }
  520. if (timewait_info->inserted_remote_qp) {
  521. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  522. timewait_info->inserted_remote_qp = 0;
  523. }
  524. spin_unlock_irqrestore(&cm.lock, flags);
  525. }
  526. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  527. {
  528. struct cm_timewait_info *timewait_info;
  529. timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
  530. if (!timewait_info)
  531. return ERR_PTR(-ENOMEM);
  532. memset(timewait_info, 0, sizeof *timewait_info);
  533. timewait_info->work.local_id = local_id;
  534. INIT_WORK(&timewait_info->work.work, cm_work_handler,
  535. &timewait_info->work);
  536. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  537. return timewait_info;
  538. }
  539. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  540. {
  541. int wait_time;
  542. /*
  543. * The cm_id could be destroyed by the user before we exit timewait.
  544. * To protect against this, we search for the cm_id after exiting
  545. * timewait before notifying the user that we've exited timewait.
  546. */
  547. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  548. wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
  549. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  550. msecs_to_jiffies(wait_time));
  551. cm_id_priv->timewait_info = NULL;
  552. }
  553. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  554. {
  555. cm_id_priv->id.state = IB_CM_IDLE;
  556. if (cm_id_priv->timewait_info) {
  557. cm_cleanup_timewait(cm_id_priv->timewait_info);
  558. kfree(cm_id_priv->timewait_info);
  559. cm_id_priv->timewait_info = NULL;
  560. }
  561. }
  562. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  563. {
  564. struct cm_id_private *cm_id_priv;
  565. struct cm_work *work;
  566. unsigned long flags;
  567. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  568. retest:
  569. spin_lock_irqsave(&cm_id_priv->lock, flags);
  570. switch (cm_id->state) {
  571. case IB_CM_LISTEN:
  572. cm_id->state = IB_CM_IDLE;
  573. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  574. spin_lock_irqsave(&cm.lock, flags);
  575. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  576. spin_unlock_irqrestore(&cm.lock, flags);
  577. break;
  578. case IB_CM_SIDR_REQ_SENT:
  579. cm_id->state = IB_CM_IDLE;
  580. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  581. (unsigned long) cm_id_priv->msg);
  582. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  583. break;
  584. case IB_CM_SIDR_REQ_RCVD:
  585. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  586. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  587. break;
  588. case IB_CM_REQ_SENT:
  589. case IB_CM_MRA_REQ_RCVD:
  590. case IB_CM_REP_SENT:
  591. case IB_CM_MRA_REP_RCVD:
  592. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  593. (unsigned long) cm_id_priv->msg);
  594. /* Fall through */
  595. case IB_CM_REQ_RCVD:
  596. case IB_CM_MRA_REQ_SENT:
  597. case IB_CM_REP_RCVD:
  598. case IB_CM_MRA_REP_SENT:
  599. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  600. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  601. &cm_id_priv->av.port->cm_dev->ca_guid,
  602. sizeof cm_id_priv->av.port->cm_dev->ca_guid,
  603. NULL, 0);
  604. break;
  605. case IB_CM_ESTABLISHED:
  606. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  607. ib_send_cm_dreq(cm_id, NULL, 0);
  608. goto retest;
  609. case IB_CM_DREQ_SENT:
  610. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  611. (unsigned long) cm_id_priv->msg);
  612. cm_enter_timewait(cm_id_priv);
  613. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  614. break;
  615. case IB_CM_DREQ_RCVD:
  616. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  617. ib_send_cm_drep(cm_id, NULL, 0);
  618. break;
  619. default:
  620. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  621. break;
  622. }
  623. cm_free_id(cm_id->local_id);
  624. atomic_dec(&cm_id_priv->refcount);
  625. wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
  626. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  627. cm_free_work(work);
  628. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  629. kfree(cm_id_priv->private_data);
  630. kfree(cm_id_priv);
  631. }
  632. EXPORT_SYMBOL(ib_destroy_cm_id);
  633. int ib_cm_listen(struct ib_cm_id *cm_id,
  634. __be64 service_id,
  635. __be64 service_mask)
  636. {
  637. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  638. unsigned long flags;
  639. int ret = 0;
  640. service_mask = service_mask ? service_mask :
  641. __constant_cpu_to_be64(~0ULL);
  642. service_id &= service_mask;
  643. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  644. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  645. return -EINVAL;
  646. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  647. BUG_ON(cm_id->state != IB_CM_IDLE);
  648. cm_id->state = IB_CM_LISTEN;
  649. spin_lock_irqsave(&cm.lock, flags);
  650. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  651. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  652. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  653. } else {
  654. cm_id->service_id = service_id;
  655. cm_id->service_mask = service_mask;
  656. }
  657. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  658. spin_unlock_irqrestore(&cm.lock, flags);
  659. if (cur_cm_id_priv) {
  660. cm_id->state = IB_CM_IDLE;
  661. ret = -EBUSY;
  662. }
  663. return ret;
  664. }
  665. EXPORT_SYMBOL(ib_cm_listen);
  666. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  667. enum cm_msg_sequence msg_seq)
  668. {
  669. u64 hi_tid, low_tid;
  670. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  671. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  672. (msg_seq << 30));
  673. return cpu_to_be64(hi_tid | low_tid);
  674. }
  675. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  676. __be16 attr_id, __be64 tid)
  677. {
  678. hdr->base_version = IB_MGMT_BASE_VERSION;
  679. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  680. hdr->class_version = IB_CM_CLASS_VERSION;
  681. hdr->method = IB_MGMT_METHOD_SEND;
  682. hdr->attr_id = attr_id;
  683. hdr->tid = tid;
  684. }
  685. static void cm_format_req(struct cm_req_msg *req_msg,
  686. struct cm_id_private *cm_id_priv,
  687. struct ib_cm_req_param *param)
  688. {
  689. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  690. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  691. req_msg->local_comm_id = cm_id_priv->id.local_id;
  692. req_msg->service_id = param->service_id;
  693. req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  694. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  695. cm_req_set_resp_res(req_msg, param->responder_resources);
  696. cm_req_set_init_depth(req_msg, param->initiator_depth);
  697. cm_req_set_remote_resp_timeout(req_msg,
  698. param->remote_cm_response_timeout);
  699. cm_req_set_qp_type(req_msg, param->qp_type);
  700. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  701. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  702. cm_req_set_local_resp_timeout(req_msg,
  703. param->local_cm_response_timeout);
  704. cm_req_set_retry_count(req_msg, param->retry_count);
  705. req_msg->pkey = param->primary_path->pkey;
  706. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  707. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  708. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  709. cm_req_set_srq(req_msg, param->srq);
  710. req_msg->primary_local_lid = param->primary_path->slid;
  711. req_msg->primary_remote_lid = param->primary_path->dlid;
  712. req_msg->primary_local_gid = param->primary_path->sgid;
  713. req_msg->primary_remote_gid = param->primary_path->dgid;
  714. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  715. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  716. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  717. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  718. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  719. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  720. cm_req_set_primary_local_ack_timeout(req_msg,
  721. min(31, param->primary_path->packet_life_time + 1));
  722. if (param->alternate_path) {
  723. req_msg->alt_local_lid = param->alternate_path->slid;
  724. req_msg->alt_remote_lid = param->alternate_path->dlid;
  725. req_msg->alt_local_gid = param->alternate_path->sgid;
  726. req_msg->alt_remote_gid = param->alternate_path->dgid;
  727. cm_req_set_alt_flow_label(req_msg,
  728. param->alternate_path->flow_label);
  729. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  730. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  731. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  732. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  733. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  734. cm_req_set_alt_local_ack_timeout(req_msg,
  735. min(31, param->alternate_path->packet_life_time + 1));
  736. }
  737. if (param->private_data && param->private_data_len)
  738. memcpy(req_msg->private_data, param->private_data,
  739. param->private_data_len);
  740. }
  741. static inline int cm_validate_req_param(struct ib_cm_req_param *param)
  742. {
  743. /* peer-to-peer not supported */
  744. if (param->peer_to_peer)
  745. return -EINVAL;
  746. if (!param->primary_path)
  747. return -EINVAL;
  748. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  749. return -EINVAL;
  750. if (param->private_data &&
  751. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  752. return -EINVAL;
  753. if (param->alternate_path &&
  754. (param->alternate_path->pkey != param->primary_path->pkey ||
  755. param->alternate_path->mtu != param->primary_path->mtu))
  756. return -EINVAL;
  757. return 0;
  758. }
  759. int ib_send_cm_req(struct ib_cm_id *cm_id,
  760. struct ib_cm_req_param *param)
  761. {
  762. struct cm_id_private *cm_id_priv;
  763. struct ib_send_wr *bad_send_wr;
  764. struct cm_req_msg *req_msg;
  765. unsigned long flags;
  766. int ret;
  767. ret = cm_validate_req_param(param);
  768. if (ret)
  769. return ret;
  770. /* Verify that we're not in timewait. */
  771. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  772. spin_lock_irqsave(&cm_id_priv->lock, flags);
  773. if (cm_id->state != IB_CM_IDLE) {
  774. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  775. ret = -EINVAL;
  776. goto out;
  777. }
  778. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  779. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  780. id.local_id);
  781. if (IS_ERR(cm_id_priv->timewait_info))
  782. goto out;
  783. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  784. if (ret)
  785. goto error1;
  786. if (param->alternate_path) {
  787. ret = cm_init_av_by_path(param->alternate_path,
  788. &cm_id_priv->alt_av);
  789. if (ret)
  790. goto error1;
  791. }
  792. cm_id->service_id = param->service_id;
  793. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  794. cm_id_priv->timeout_ms = cm_convert_to_ms(
  795. param->primary_path->packet_life_time) * 2 +
  796. cm_convert_to_ms(
  797. param->remote_cm_response_timeout);
  798. cm_id_priv->max_cm_retries = param->max_cm_retries;
  799. cm_id_priv->initiator_depth = param->initiator_depth;
  800. cm_id_priv->responder_resources = param->responder_resources;
  801. cm_id_priv->retry_count = param->retry_count;
  802. cm_id_priv->path_mtu = param->primary_path->mtu;
  803. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  804. if (ret)
  805. goto error1;
  806. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  807. cm_format_req(req_msg, cm_id_priv, param);
  808. cm_id_priv->tid = req_msg->hdr.tid;
  809. cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  810. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  811. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  812. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  813. cm_id_priv->local_ack_timeout =
  814. cm_req_get_primary_local_ack_timeout(req_msg);
  815. spin_lock_irqsave(&cm_id_priv->lock, flags);
  816. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  817. &cm_id_priv->msg->send_wr, &bad_send_wr);
  818. if (ret) {
  819. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  820. goto error2;
  821. }
  822. BUG_ON(cm_id->state != IB_CM_IDLE);
  823. cm_id->state = IB_CM_REQ_SENT;
  824. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  825. return 0;
  826. error2: cm_free_msg(cm_id_priv->msg);
  827. error1: kfree(cm_id_priv->timewait_info);
  828. out: return ret;
  829. }
  830. EXPORT_SYMBOL(ib_send_cm_req);
  831. static int cm_issue_rej(struct cm_port *port,
  832. struct ib_mad_recv_wc *mad_recv_wc,
  833. enum ib_cm_rej_reason reason,
  834. enum cm_msg_response msg_rejected,
  835. void *ari, u8 ari_length)
  836. {
  837. struct ib_mad_send_buf *msg = NULL;
  838. struct ib_send_wr *bad_send_wr;
  839. struct cm_rej_msg *rej_msg, *rcv_msg;
  840. int ret;
  841. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  842. if (ret)
  843. return ret;
  844. /* We just need common CM header information. Cast to any message. */
  845. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  846. rej_msg = (struct cm_rej_msg *) msg->mad;
  847. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  848. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  849. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  850. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  851. rej_msg->reason = cpu_to_be16(reason);
  852. if (ari && ari_length) {
  853. cm_rej_set_reject_info_len(rej_msg, ari_length);
  854. memcpy(rej_msg->ari, ari, ari_length);
  855. }
  856. ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
  857. if (ret)
  858. cm_free_msg(msg);
  859. return ret;
  860. }
  861. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  862. __be32 local_qpn, __be32 remote_qpn)
  863. {
  864. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  865. ((local_ca_guid == remote_ca_guid) &&
  866. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  867. }
  868. static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  869. struct ib_sa_path_rec *primary_path,
  870. struct ib_sa_path_rec *alt_path)
  871. {
  872. memset(primary_path, 0, sizeof *primary_path);
  873. primary_path->dgid = req_msg->primary_local_gid;
  874. primary_path->sgid = req_msg->primary_remote_gid;
  875. primary_path->dlid = req_msg->primary_local_lid;
  876. primary_path->slid = req_msg->primary_remote_lid;
  877. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  878. primary_path->hop_limit = req_msg->primary_hop_limit;
  879. primary_path->traffic_class = req_msg->primary_traffic_class;
  880. primary_path->reversible = 1;
  881. primary_path->pkey = req_msg->pkey;
  882. primary_path->sl = cm_req_get_primary_sl(req_msg);
  883. primary_path->mtu_selector = IB_SA_EQ;
  884. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  885. primary_path->rate_selector = IB_SA_EQ;
  886. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  887. primary_path->packet_life_time_selector = IB_SA_EQ;
  888. primary_path->packet_life_time =
  889. cm_req_get_primary_local_ack_timeout(req_msg);
  890. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  891. if (req_msg->alt_local_lid) {
  892. memset(alt_path, 0, sizeof *alt_path);
  893. alt_path->dgid = req_msg->alt_local_gid;
  894. alt_path->sgid = req_msg->alt_remote_gid;
  895. alt_path->dlid = req_msg->alt_local_lid;
  896. alt_path->slid = req_msg->alt_remote_lid;
  897. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  898. alt_path->hop_limit = req_msg->alt_hop_limit;
  899. alt_path->traffic_class = req_msg->alt_traffic_class;
  900. alt_path->reversible = 1;
  901. alt_path->pkey = req_msg->pkey;
  902. alt_path->sl = cm_req_get_alt_sl(req_msg);
  903. alt_path->mtu_selector = IB_SA_EQ;
  904. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  905. alt_path->rate_selector = IB_SA_EQ;
  906. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  907. alt_path->packet_life_time_selector = IB_SA_EQ;
  908. alt_path->packet_life_time =
  909. cm_req_get_alt_local_ack_timeout(req_msg);
  910. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  911. }
  912. }
  913. static void cm_format_req_event(struct cm_work *work,
  914. struct cm_id_private *cm_id_priv,
  915. struct ib_cm_id *listen_id)
  916. {
  917. struct cm_req_msg *req_msg;
  918. struct ib_cm_req_event_param *param;
  919. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  920. param = &work->cm_event.param.req_rcvd;
  921. param->listen_id = listen_id;
  922. param->device = cm_id_priv->av.port->mad_agent->device;
  923. param->port = cm_id_priv->av.port->port_num;
  924. param->primary_path = &work->path[0];
  925. if (req_msg->alt_local_lid)
  926. param->alternate_path = &work->path[1];
  927. else
  928. param->alternate_path = NULL;
  929. param->remote_ca_guid = req_msg->local_ca_guid;
  930. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  931. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  932. param->qp_type = cm_req_get_qp_type(req_msg);
  933. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  934. param->responder_resources = cm_req_get_init_depth(req_msg);
  935. param->initiator_depth = cm_req_get_resp_res(req_msg);
  936. param->local_cm_response_timeout =
  937. cm_req_get_remote_resp_timeout(req_msg);
  938. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  939. param->remote_cm_response_timeout =
  940. cm_req_get_local_resp_timeout(req_msg);
  941. param->retry_count = cm_req_get_retry_count(req_msg);
  942. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  943. param->srq = cm_req_get_srq(req_msg);
  944. work->cm_event.private_data = &req_msg->private_data;
  945. }
  946. static void cm_process_work(struct cm_id_private *cm_id_priv,
  947. struct cm_work *work)
  948. {
  949. unsigned long flags;
  950. int ret;
  951. /* We will typically only have the current event to report. */
  952. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  953. cm_free_work(work);
  954. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  955. spin_lock_irqsave(&cm_id_priv->lock, flags);
  956. work = cm_dequeue_work(cm_id_priv);
  957. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  958. BUG_ON(!work);
  959. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  960. &work->cm_event);
  961. cm_free_work(work);
  962. }
  963. cm_deref_id(cm_id_priv);
  964. if (ret)
  965. ib_destroy_cm_id(&cm_id_priv->id);
  966. }
  967. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  968. struct cm_id_private *cm_id_priv,
  969. enum cm_msg_response msg_mraed, u8 service_timeout,
  970. const void *private_data, u8 private_data_len)
  971. {
  972. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  973. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  974. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  975. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  976. cm_mra_set_service_timeout(mra_msg, service_timeout);
  977. if (private_data && private_data_len)
  978. memcpy(mra_msg->private_data, private_data, private_data_len);
  979. }
  980. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  981. struct cm_id_private *cm_id_priv,
  982. enum ib_cm_rej_reason reason,
  983. void *ari,
  984. u8 ari_length,
  985. const void *private_data,
  986. u8 private_data_len)
  987. {
  988. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  989. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  990. switch(cm_id_priv->id.state) {
  991. case IB_CM_REQ_RCVD:
  992. rej_msg->local_comm_id = 0;
  993. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  994. break;
  995. case IB_CM_MRA_REQ_SENT:
  996. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  997. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  998. break;
  999. case IB_CM_REP_RCVD:
  1000. case IB_CM_MRA_REP_SENT:
  1001. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1002. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1003. break;
  1004. default:
  1005. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1006. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1007. break;
  1008. }
  1009. rej_msg->reason = cpu_to_be16(reason);
  1010. if (ari && ari_length) {
  1011. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1012. memcpy(rej_msg->ari, ari, ari_length);
  1013. }
  1014. if (private_data && private_data_len)
  1015. memcpy(rej_msg->private_data, private_data, private_data_len);
  1016. }
  1017. static void cm_dup_req_handler(struct cm_work *work,
  1018. struct cm_id_private *cm_id_priv)
  1019. {
  1020. struct ib_mad_send_buf *msg = NULL;
  1021. struct ib_send_wr *bad_send_wr;
  1022. unsigned long flags;
  1023. int ret;
  1024. /* Quick state check to discard duplicate REQs. */
  1025. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1026. return;
  1027. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1028. if (ret)
  1029. return;
  1030. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1031. switch (cm_id_priv->id.state) {
  1032. case IB_CM_MRA_REQ_SENT:
  1033. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1034. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1035. cm_id_priv->private_data,
  1036. cm_id_priv->private_data_len);
  1037. break;
  1038. case IB_CM_TIMEWAIT:
  1039. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1040. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1041. break;
  1042. default:
  1043. goto unlock;
  1044. }
  1045. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1046. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1047. &bad_send_wr);
  1048. if (ret)
  1049. goto free;
  1050. return;
  1051. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1052. free: cm_free_msg(msg);
  1053. }
  1054. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1055. struct cm_id_private *cm_id_priv)
  1056. {
  1057. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1058. struct cm_timewait_info *timewait_info;
  1059. struct cm_req_msg *req_msg;
  1060. unsigned long flags;
  1061. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1062. /* Check for duplicate REQ and stale connections. */
  1063. spin_lock_irqsave(&cm.lock, flags);
  1064. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1065. if (!timewait_info)
  1066. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1067. if (timewait_info) {
  1068. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1069. timewait_info->work.remote_id);
  1070. spin_unlock_irqrestore(&cm.lock, flags);
  1071. if (cur_cm_id_priv) {
  1072. cm_dup_req_handler(work, cur_cm_id_priv);
  1073. cm_deref_id(cur_cm_id_priv);
  1074. } else
  1075. cm_issue_rej(work->port, work->mad_recv_wc,
  1076. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1077. NULL, 0);
  1078. goto error;
  1079. }
  1080. /* Find matching listen request. */
  1081. listen_cm_id_priv = cm_find_listen(req_msg->service_id);
  1082. if (!listen_cm_id_priv) {
  1083. spin_unlock_irqrestore(&cm.lock, flags);
  1084. cm_issue_rej(work->port, work->mad_recv_wc,
  1085. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1086. NULL, 0);
  1087. goto error;
  1088. }
  1089. atomic_inc(&listen_cm_id_priv->refcount);
  1090. atomic_inc(&cm_id_priv->refcount);
  1091. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1092. atomic_inc(&cm_id_priv->work_count);
  1093. spin_unlock_irqrestore(&cm.lock, flags);
  1094. return listen_cm_id_priv;
  1095. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1096. return NULL;
  1097. }
  1098. static int cm_req_handler(struct cm_work *work)
  1099. {
  1100. struct ib_cm_id *cm_id;
  1101. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1102. struct cm_req_msg *req_msg;
  1103. int ret;
  1104. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1105. cm_id = ib_create_cm_id(NULL, NULL);
  1106. if (IS_ERR(cm_id))
  1107. return PTR_ERR(cm_id);
  1108. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1109. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1110. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1111. &cm_id_priv->av);
  1112. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1113. id.local_id);
  1114. if (IS_ERR(cm_id_priv->timewait_info)) {
  1115. ret = PTR_ERR(cm_id_priv->timewait_info);
  1116. goto error1;
  1117. }
  1118. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1119. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1120. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1121. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1122. if (!listen_cm_id_priv) {
  1123. ret = -EINVAL;
  1124. goto error2;
  1125. }
  1126. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1127. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1128. cm_id_priv->id.service_id = req_msg->service_id;
  1129. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1130. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1131. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1132. if (ret)
  1133. goto error3;
  1134. if (req_msg->alt_local_lid) {
  1135. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1136. if (ret)
  1137. goto error3;
  1138. }
  1139. cm_id_priv->tid = req_msg->hdr.tid;
  1140. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1141. cm_req_get_local_resp_timeout(req_msg));
  1142. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1143. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1144. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1145. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1146. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1147. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1148. cm_id_priv->local_ack_timeout =
  1149. cm_req_get_primary_local_ack_timeout(req_msg);
  1150. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1151. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1152. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1153. cm_process_work(cm_id_priv, work);
  1154. cm_deref_id(listen_cm_id_priv);
  1155. return 0;
  1156. error3: atomic_dec(&cm_id_priv->refcount);
  1157. cm_deref_id(listen_cm_id_priv);
  1158. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1159. error2: kfree(cm_id_priv->timewait_info);
  1160. cm_id_priv->timewait_info = NULL;
  1161. error1: ib_destroy_cm_id(&cm_id_priv->id);
  1162. return ret;
  1163. }
  1164. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1165. struct cm_id_private *cm_id_priv,
  1166. struct ib_cm_rep_param *param)
  1167. {
  1168. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1169. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1170. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1171. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1172. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1173. rep_msg->resp_resources = param->responder_resources;
  1174. rep_msg->initiator_depth = param->initiator_depth;
  1175. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1176. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1177. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1178. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1179. cm_rep_set_srq(rep_msg, param->srq);
  1180. rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  1181. if (param->private_data && param->private_data_len)
  1182. memcpy(rep_msg->private_data, param->private_data,
  1183. param->private_data_len);
  1184. }
  1185. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1186. struct ib_cm_rep_param *param)
  1187. {
  1188. struct cm_id_private *cm_id_priv;
  1189. struct ib_mad_send_buf *msg;
  1190. struct cm_rep_msg *rep_msg;
  1191. struct ib_send_wr *bad_send_wr;
  1192. unsigned long flags;
  1193. int ret;
  1194. if (param->private_data &&
  1195. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1196. return -EINVAL;
  1197. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1198. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1199. if (cm_id->state != IB_CM_REQ_RCVD &&
  1200. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1201. ret = -EINVAL;
  1202. goto out;
  1203. }
  1204. ret = cm_alloc_msg(cm_id_priv, &msg);
  1205. if (ret)
  1206. goto out;
  1207. rep_msg = (struct cm_rep_msg *) msg->mad;
  1208. cm_format_rep(rep_msg, cm_id_priv, param);
  1209. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  1210. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1211. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1212. &msg->send_wr, &bad_send_wr);
  1213. if (ret) {
  1214. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1215. cm_free_msg(msg);
  1216. return ret;
  1217. }
  1218. cm_id->state = IB_CM_REP_SENT;
  1219. cm_id_priv->msg = msg;
  1220. cm_id_priv->initiator_depth = param->initiator_depth;
  1221. cm_id_priv->responder_resources = param->responder_resources;
  1222. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1223. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1224. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1225. return ret;
  1226. }
  1227. EXPORT_SYMBOL(ib_send_cm_rep);
  1228. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1229. struct cm_id_private *cm_id_priv,
  1230. const void *private_data,
  1231. u8 private_data_len)
  1232. {
  1233. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1234. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1235. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1236. if (private_data && private_data_len)
  1237. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1238. }
  1239. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1240. const void *private_data,
  1241. u8 private_data_len)
  1242. {
  1243. struct cm_id_private *cm_id_priv;
  1244. struct ib_mad_send_buf *msg;
  1245. struct ib_send_wr *bad_send_wr;
  1246. unsigned long flags;
  1247. void *data;
  1248. int ret;
  1249. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1250. return -EINVAL;
  1251. data = cm_copy_private_data(private_data, private_data_len);
  1252. if (IS_ERR(data))
  1253. return PTR_ERR(data);
  1254. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1255. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1256. if (cm_id->state != IB_CM_REP_RCVD &&
  1257. cm_id->state != IB_CM_MRA_REP_SENT) {
  1258. ret = -EINVAL;
  1259. goto error;
  1260. }
  1261. ret = cm_alloc_msg(cm_id_priv, &msg);
  1262. if (ret)
  1263. goto error;
  1264. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1265. private_data, private_data_len);
  1266. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1267. &msg->send_wr, &bad_send_wr);
  1268. if (ret) {
  1269. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1270. cm_free_msg(msg);
  1271. kfree(data);
  1272. return ret;
  1273. }
  1274. cm_id->state = IB_CM_ESTABLISHED;
  1275. cm_set_private_data(cm_id_priv, data, private_data_len);
  1276. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1277. return 0;
  1278. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1279. kfree(data);
  1280. return ret;
  1281. }
  1282. EXPORT_SYMBOL(ib_send_cm_rtu);
  1283. static void cm_format_rep_event(struct cm_work *work)
  1284. {
  1285. struct cm_rep_msg *rep_msg;
  1286. struct ib_cm_rep_event_param *param;
  1287. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1288. param = &work->cm_event.param.rep_rcvd;
  1289. param->remote_ca_guid = rep_msg->local_ca_guid;
  1290. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1291. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1292. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1293. param->responder_resources = rep_msg->initiator_depth;
  1294. param->initiator_depth = rep_msg->resp_resources;
  1295. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1296. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1297. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1298. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1299. param->srq = cm_rep_get_srq(rep_msg);
  1300. work->cm_event.private_data = &rep_msg->private_data;
  1301. }
  1302. static void cm_dup_rep_handler(struct cm_work *work)
  1303. {
  1304. struct cm_id_private *cm_id_priv;
  1305. struct cm_rep_msg *rep_msg;
  1306. struct ib_mad_send_buf *msg = NULL;
  1307. struct ib_send_wr *bad_send_wr;
  1308. unsigned long flags;
  1309. int ret;
  1310. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1311. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1312. rep_msg->local_comm_id);
  1313. if (!cm_id_priv)
  1314. return;
  1315. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1316. if (ret)
  1317. goto deref;
  1318. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1319. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1320. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1321. cm_id_priv->private_data,
  1322. cm_id_priv->private_data_len);
  1323. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1324. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1325. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1326. cm_id_priv->private_data,
  1327. cm_id_priv->private_data_len);
  1328. else
  1329. goto unlock;
  1330. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1331. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1332. &bad_send_wr);
  1333. if (ret)
  1334. goto free;
  1335. goto deref;
  1336. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1337. free: cm_free_msg(msg);
  1338. deref: cm_deref_id(cm_id_priv);
  1339. }
  1340. static int cm_rep_handler(struct cm_work *work)
  1341. {
  1342. struct cm_id_private *cm_id_priv;
  1343. struct cm_rep_msg *rep_msg;
  1344. unsigned long flags;
  1345. int ret;
  1346. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1347. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1348. if (!cm_id_priv) {
  1349. cm_dup_rep_handler(work);
  1350. return -EINVAL;
  1351. }
  1352. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1353. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1354. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1355. spin_lock_irqsave(&cm.lock, flags);
  1356. /* Check for duplicate REP. */
  1357. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1358. spin_unlock_irqrestore(&cm.lock, flags);
  1359. ret = -EINVAL;
  1360. goto error;
  1361. }
  1362. /* Check for a stale connection. */
  1363. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1364. spin_unlock_irqrestore(&cm.lock, flags);
  1365. cm_issue_rej(work->port, work->mad_recv_wc,
  1366. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1367. NULL, 0);
  1368. ret = -EINVAL;
  1369. goto error;
  1370. }
  1371. spin_unlock_irqrestore(&cm.lock, flags);
  1372. cm_format_rep_event(work);
  1373. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1374. switch (cm_id_priv->id.state) {
  1375. case IB_CM_REQ_SENT:
  1376. case IB_CM_MRA_REQ_RCVD:
  1377. break;
  1378. default:
  1379. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1380. ret = -EINVAL;
  1381. goto error;
  1382. }
  1383. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1384. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1385. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1386. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1387. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1388. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1389. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1390. /* todo: handle peer_to_peer */
  1391. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1392. (unsigned long) cm_id_priv->msg);
  1393. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1394. if (!ret)
  1395. list_add_tail(&work->list, &cm_id_priv->work_list);
  1396. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1397. if (ret)
  1398. cm_process_work(cm_id_priv, work);
  1399. else
  1400. cm_deref_id(cm_id_priv);
  1401. return 0;
  1402. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1403. cm_deref_id(cm_id_priv);
  1404. return ret;
  1405. }
  1406. static int cm_establish_handler(struct cm_work *work)
  1407. {
  1408. struct cm_id_private *cm_id_priv;
  1409. unsigned long flags;
  1410. int ret;
  1411. /* See comment in ib_cm_establish about lookup. */
  1412. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1413. if (!cm_id_priv)
  1414. return -EINVAL;
  1415. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1416. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1417. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1418. goto out;
  1419. }
  1420. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1421. (unsigned long) cm_id_priv->msg);
  1422. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1423. if (!ret)
  1424. list_add_tail(&work->list, &cm_id_priv->work_list);
  1425. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1426. if (ret)
  1427. cm_process_work(cm_id_priv, work);
  1428. else
  1429. cm_deref_id(cm_id_priv);
  1430. return 0;
  1431. out:
  1432. cm_deref_id(cm_id_priv);
  1433. return -EINVAL;
  1434. }
  1435. static int cm_rtu_handler(struct cm_work *work)
  1436. {
  1437. struct cm_id_private *cm_id_priv;
  1438. struct cm_rtu_msg *rtu_msg;
  1439. unsigned long flags;
  1440. int ret;
  1441. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1442. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1443. rtu_msg->local_comm_id);
  1444. if (!cm_id_priv)
  1445. return -EINVAL;
  1446. work->cm_event.private_data = &rtu_msg->private_data;
  1447. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1448. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1449. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1450. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1451. goto out;
  1452. }
  1453. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1454. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1455. (unsigned long) cm_id_priv->msg);
  1456. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1457. if (!ret)
  1458. list_add_tail(&work->list, &cm_id_priv->work_list);
  1459. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1460. if (ret)
  1461. cm_process_work(cm_id_priv, work);
  1462. else
  1463. cm_deref_id(cm_id_priv);
  1464. return 0;
  1465. out:
  1466. cm_deref_id(cm_id_priv);
  1467. return -EINVAL;
  1468. }
  1469. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1470. struct cm_id_private *cm_id_priv,
  1471. const void *private_data,
  1472. u8 private_data_len)
  1473. {
  1474. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1475. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1476. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1477. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1478. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1479. if (private_data && private_data_len)
  1480. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1481. }
  1482. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1483. const void *private_data,
  1484. u8 private_data_len)
  1485. {
  1486. struct cm_id_private *cm_id_priv;
  1487. struct ib_mad_send_buf *msg;
  1488. struct ib_send_wr *bad_send_wr;
  1489. unsigned long flags;
  1490. int ret;
  1491. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1492. return -EINVAL;
  1493. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1494. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1495. if (cm_id->state != IB_CM_ESTABLISHED) {
  1496. ret = -EINVAL;
  1497. goto out;
  1498. }
  1499. ret = cm_alloc_msg(cm_id_priv, &msg);
  1500. if (ret) {
  1501. cm_enter_timewait(cm_id_priv);
  1502. goto out;
  1503. }
  1504. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1505. private_data, private_data_len);
  1506. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  1507. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1508. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1509. &msg->send_wr, &bad_send_wr);
  1510. if (ret) {
  1511. cm_enter_timewait(cm_id_priv);
  1512. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1513. cm_free_msg(msg);
  1514. return ret;
  1515. }
  1516. cm_id->state = IB_CM_DREQ_SENT;
  1517. cm_id_priv->msg = msg;
  1518. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1519. return ret;
  1520. }
  1521. EXPORT_SYMBOL(ib_send_cm_dreq);
  1522. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1523. struct cm_id_private *cm_id_priv,
  1524. const void *private_data,
  1525. u8 private_data_len)
  1526. {
  1527. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1528. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1529. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1530. if (private_data && private_data_len)
  1531. memcpy(drep_msg->private_data, private_data, private_data_len);
  1532. }
  1533. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1534. const void *private_data,
  1535. u8 private_data_len)
  1536. {
  1537. struct cm_id_private *cm_id_priv;
  1538. struct ib_mad_send_buf *msg;
  1539. struct ib_send_wr *bad_send_wr;
  1540. unsigned long flags;
  1541. void *data;
  1542. int ret;
  1543. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1544. return -EINVAL;
  1545. data = cm_copy_private_data(private_data, private_data_len);
  1546. if (IS_ERR(data))
  1547. return PTR_ERR(data);
  1548. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1549. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1550. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1551. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1552. kfree(data);
  1553. return -EINVAL;
  1554. }
  1555. cm_set_private_data(cm_id_priv, data, private_data_len);
  1556. cm_enter_timewait(cm_id_priv);
  1557. ret = cm_alloc_msg(cm_id_priv, &msg);
  1558. if (ret)
  1559. goto out;
  1560. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1561. private_data, private_data_len);
  1562. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
  1563. &bad_send_wr);
  1564. if (ret) {
  1565. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1566. cm_free_msg(msg);
  1567. return ret;
  1568. }
  1569. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1570. return ret;
  1571. }
  1572. EXPORT_SYMBOL(ib_send_cm_drep);
  1573. static int cm_dreq_handler(struct cm_work *work)
  1574. {
  1575. struct cm_id_private *cm_id_priv;
  1576. struct cm_dreq_msg *dreq_msg;
  1577. struct ib_mad_send_buf *msg = NULL;
  1578. struct ib_send_wr *bad_send_wr;
  1579. unsigned long flags;
  1580. int ret;
  1581. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1582. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1583. dreq_msg->local_comm_id);
  1584. if (!cm_id_priv)
  1585. return -EINVAL;
  1586. work->cm_event.private_data = &dreq_msg->private_data;
  1587. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1588. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1589. goto unlock;
  1590. switch (cm_id_priv->id.state) {
  1591. case IB_CM_REP_SENT:
  1592. case IB_CM_DREQ_SENT:
  1593. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1594. (unsigned long) cm_id_priv->msg);
  1595. break;
  1596. case IB_CM_ESTABLISHED:
  1597. case IB_CM_MRA_REP_RCVD:
  1598. break;
  1599. case IB_CM_TIMEWAIT:
  1600. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1601. goto unlock;
  1602. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1603. cm_id_priv->private_data,
  1604. cm_id_priv->private_data_len);
  1605. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1606. if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1607. &msg->send_wr, &bad_send_wr))
  1608. cm_free_msg(msg);
  1609. goto deref;
  1610. default:
  1611. goto unlock;
  1612. }
  1613. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1614. cm_id_priv->tid = dreq_msg->hdr.tid;
  1615. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1616. if (!ret)
  1617. list_add_tail(&work->list, &cm_id_priv->work_list);
  1618. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1619. if (ret)
  1620. cm_process_work(cm_id_priv, work);
  1621. else
  1622. cm_deref_id(cm_id_priv);
  1623. return 0;
  1624. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1625. deref: cm_deref_id(cm_id_priv);
  1626. return -EINVAL;
  1627. }
  1628. static int cm_drep_handler(struct cm_work *work)
  1629. {
  1630. struct cm_id_private *cm_id_priv;
  1631. struct cm_drep_msg *drep_msg;
  1632. unsigned long flags;
  1633. int ret;
  1634. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1635. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1636. drep_msg->local_comm_id);
  1637. if (!cm_id_priv)
  1638. return -EINVAL;
  1639. work->cm_event.private_data = &drep_msg->private_data;
  1640. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1641. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1642. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1643. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1644. goto out;
  1645. }
  1646. cm_enter_timewait(cm_id_priv);
  1647. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1648. (unsigned long) cm_id_priv->msg);
  1649. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1650. if (!ret)
  1651. list_add_tail(&work->list, &cm_id_priv->work_list);
  1652. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1653. if (ret)
  1654. cm_process_work(cm_id_priv, work);
  1655. else
  1656. cm_deref_id(cm_id_priv);
  1657. return 0;
  1658. out:
  1659. cm_deref_id(cm_id_priv);
  1660. return -EINVAL;
  1661. }
  1662. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1663. enum ib_cm_rej_reason reason,
  1664. void *ari,
  1665. u8 ari_length,
  1666. const void *private_data,
  1667. u8 private_data_len)
  1668. {
  1669. struct cm_id_private *cm_id_priv;
  1670. struct ib_mad_send_buf *msg;
  1671. struct ib_send_wr *bad_send_wr;
  1672. unsigned long flags;
  1673. int ret;
  1674. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1675. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1676. return -EINVAL;
  1677. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1678. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1679. switch (cm_id->state) {
  1680. case IB_CM_REQ_SENT:
  1681. case IB_CM_MRA_REQ_RCVD:
  1682. case IB_CM_REQ_RCVD:
  1683. case IB_CM_MRA_REQ_SENT:
  1684. case IB_CM_REP_RCVD:
  1685. case IB_CM_MRA_REP_SENT:
  1686. ret = cm_alloc_msg(cm_id_priv, &msg);
  1687. if (!ret)
  1688. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1689. cm_id_priv, reason, ari, ari_length,
  1690. private_data, private_data_len);
  1691. cm_reset_to_idle(cm_id_priv);
  1692. break;
  1693. case IB_CM_REP_SENT:
  1694. case IB_CM_MRA_REP_RCVD:
  1695. ret = cm_alloc_msg(cm_id_priv, &msg);
  1696. if (!ret)
  1697. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1698. cm_id_priv, reason, ari, ari_length,
  1699. private_data, private_data_len);
  1700. cm_enter_timewait(cm_id_priv);
  1701. break;
  1702. default:
  1703. ret = -EINVAL;
  1704. goto out;
  1705. }
  1706. if (ret)
  1707. goto out;
  1708. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1709. &msg->send_wr, &bad_send_wr);
  1710. if (ret)
  1711. cm_free_msg(msg);
  1712. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1713. return ret;
  1714. }
  1715. EXPORT_SYMBOL(ib_send_cm_rej);
  1716. static void cm_format_rej_event(struct cm_work *work)
  1717. {
  1718. struct cm_rej_msg *rej_msg;
  1719. struct ib_cm_rej_event_param *param;
  1720. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1721. param = &work->cm_event.param.rej_rcvd;
  1722. param->ari = rej_msg->ari;
  1723. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1724. param->reason = __be16_to_cpu(rej_msg->reason);
  1725. work->cm_event.private_data = &rej_msg->private_data;
  1726. }
  1727. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1728. {
  1729. struct cm_timewait_info *timewait_info;
  1730. struct cm_id_private *cm_id_priv;
  1731. unsigned long flags;
  1732. __be32 remote_id;
  1733. remote_id = rej_msg->local_comm_id;
  1734. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1735. spin_lock_irqsave(&cm.lock, flags);
  1736. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1737. remote_id);
  1738. if (!timewait_info) {
  1739. spin_unlock_irqrestore(&cm.lock, flags);
  1740. return NULL;
  1741. }
  1742. cm_id_priv = idr_find(&cm.local_id_table,
  1743. (__force int) timewait_info->work.local_id);
  1744. if (cm_id_priv) {
  1745. if (cm_id_priv->id.remote_id == remote_id)
  1746. atomic_inc(&cm_id_priv->refcount);
  1747. else
  1748. cm_id_priv = NULL;
  1749. }
  1750. spin_unlock_irqrestore(&cm.lock, flags);
  1751. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1752. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1753. else
  1754. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1755. return cm_id_priv;
  1756. }
  1757. static int cm_rej_handler(struct cm_work *work)
  1758. {
  1759. struct cm_id_private *cm_id_priv;
  1760. struct cm_rej_msg *rej_msg;
  1761. unsigned long flags;
  1762. int ret;
  1763. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1764. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1765. if (!cm_id_priv)
  1766. return -EINVAL;
  1767. cm_format_rej_event(work);
  1768. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1769. switch (cm_id_priv->id.state) {
  1770. case IB_CM_REQ_SENT:
  1771. case IB_CM_MRA_REQ_RCVD:
  1772. case IB_CM_REP_SENT:
  1773. case IB_CM_MRA_REP_RCVD:
  1774. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1775. (unsigned long) cm_id_priv->msg);
  1776. /* fall through */
  1777. case IB_CM_REQ_RCVD:
  1778. case IB_CM_MRA_REQ_SENT:
  1779. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1780. cm_enter_timewait(cm_id_priv);
  1781. else
  1782. cm_reset_to_idle(cm_id_priv);
  1783. break;
  1784. case IB_CM_DREQ_SENT:
  1785. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  1786. (unsigned long) cm_id_priv->msg);
  1787. /* fall through */
  1788. case IB_CM_REP_RCVD:
  1789. case IB_CM_MRA_REP_SENT:
  1790. case IB_CM_ESTABLISHED:
  1791. cm_enter_timewait(cm_id_priv);
  1792. break;
  1793. default:
  1794. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1795. ret = -EINVAL;
  1796. goto out;
  1797. }
  1798. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1799. if (!ret)
  1800. list_add_tail(&work->list, &cm_id_priv->work_list);
  1801. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1802. if (ret)
  1803. cm_process_work(cm_id_priv, work);
  1804. else
  1805. cm_deref_id(cm_id_priv);
  1806. return 0;
  1807. out:
  1808. cm_deref_id(cm_id_priv);
  1809. return -EINVAL;
  1810. }
  1811. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1812. u8 service_timeout,
  1813. const void *private_data,
  1814. u8 private_data_len)
  1815. {
  1816. struct cm_id_private *cm_id_priv;
  1817. struct ib_mad_send_buf *msg;
  1818. struct ib_send_wr *bad_send_wr;
  1819. void *data;
  1820. unsigned long flags;
  1821. int ret;
  1822. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1823. return -EINVAL;
  1824. data = cm_copy_private_data(private_data, private_data_len);
  1825. if (IS_ERR(data))
  1826. return PTR_ERR(data);
  1827. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1828. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1829. switch(cm_id_priv->id.state) {
  1830. case IB_CM_REQ_RCVD:
  1831. ret = cm_alloc_msg(cm_id_priv, &msg);
  1832. if (ret)
  1833. goto error1;
  1834. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1835. CM_MSG_RESPONSE_REQ, service_timeout,
  1836. private_data, private_data_len);
  1837. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1838. &msg->send_wr, &bad_send_wr);
  1839. if (ret)
  1840. goto error2;
  1841. cm_id->state = IB_CM_MRA_REQ_SENT;
  1842. break;
  1843. case IB_CM_REP_RCVD:
  1844. ret = cm_alloc_msg(cm_id_priv, &msg);
  1845. if (ret)
  1846. goto error1;
  1847. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1848. CM_MSG_RESPONSE_REP, service_timeout,
  1849. private_data, private_data_len);
  1850. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1851. &msg->send_wr, &bad_send_wr);
  1852. if (ret)
  1853. goto error2;
  1854. cm_id->state = IB_CM_MRA_REP_SENT;
  1855. break;
  1856. case IB_CM_ESTABLISHED:
  1857. ret = cm_alloc_msg(cm_id_priv, &msg);
  1858. if (ret)
  1859. goto error1;
  1860. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1861. CM_MSG_RESPONSE_OTHER, service_timeout,
  1862. private_data, private_data_len);
  1863. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  1864. &msg->send_wr, &bad_send_wr);
  1865. if (ret)
  1866. goto error2;
  1867. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1868. break;
  1869. default:
  1870. ret = -EINVAL;
  1871. goto error1;
  1872. }
  1873. cm_id_priv->service_timeout = service_timeout;
  1874. cm_set_private_data(cm_id_priv, data, private_data_len);
  1875. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1876. return 0;
  1877. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1878. kfree(data);
  1879. return ret;
  1880. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1881. kfree(data);
  1882. cm_free_msg(msg);
  1883. return ret;
  1884. }
  1885. EXPORT_SYMBOL(ib_send_cm_mra);
  1886. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1887. {
  1888. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1889. case CM_MSG_RESPONSE_REQ:
  1890. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1891. case CM_MSG_RESPONSE_REP:
  1892. case CM_MSG_RESPONSE_OTHER:
  1893. return cm_acquire_id(mra_msg->remote_comm_id,
  1894. mra_msg->local_comm_id);
  1895. default:
  1896. return NULL;
  1897. }
  1898. }
  1899. static int cm_mra_handler(struct cm_work *work)
  1900. {
  1901. struct cm_id_private *cm_id_priv;
  1902. struct cm_mra_msg *mra_msg;
  1903. unsigned long flags;
  1904. int timeout, ret;
  1905. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  1906. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  1907. if (!cm_id_priv)
  1908. return -EINVAL;
  1909. work->cm_event.private_data = &mra_msg->private_data;
  1910. work->cm_event.param.mra_rcvd.service_timeout =
  1911. cm_mra_get_service_timeout(mra_msg);
  1912. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  1913. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  1914. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1915. switch (cm_id_priv->id.state) {
  1916. case IB_CM_REQ_SENT:
  1917. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  1918. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1919. (unsigned long) cm_id_priv->msg, timeout))
  1920. goto out;
  1921. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  1922. break;
  1923. case IB_CM_REP_SENT:
  1924. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  1925. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1926. (unsigned long) cm_id_priv->msg, timeout))
  1927. goto out;
  1928. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  1929. break;
  1930. case IB_CM_ESTABLISHED:
  1931. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  1932. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  1933. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1934. (unsigned long) cm_id_priv->msg, timeout))
  1935. goto out;
  1936. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  1937. break;
  1938. default:
  1939. goto out;
  1940. }
  1941. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  1942. cm_id_priv->id.state;
  1943. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1944. if (!ret)
  1945. list_add_tail(&work->list, &cm_id_priv->work_list);
  1946. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1947. if (ret)
  1948. cm_process_work(cm_id_priv, work);
  1949. else
  1950. cm_deref_id(cm_id_priv);
  1951. return 0;
  1952. out:
  1953. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1954. cm_deref_id(cm_id_priv);
  1955. return -EINVAL;
  1956. }
  1957. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  1958. struct cm_id_private *cm_id_priv,
  1959. struct ib_sa_path_rec *alternate_path,
  1960. const void *private_data,
  1961. u8 private_data_len)
  1962. {
  1963. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  1964. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  1965. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  1966. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1967. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  1968. /* todo: need remote CM response timeout */
  1969. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  1970. lap_msg->alt_local_lid = alternate_path->slid;
  1971. lap_msg->alt_remote_lid = alternate_path->dlid;
  1972. lap_msg->alt_local_gid = alternate_path->sgid;
  1973. lap_msg->alt_remote_gid = alternate_path->dgid;
  1974. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  1975. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  1976. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  1977. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  1978. cm_lap_set_sl(lap_msg, alternate_path->sl);
  1979. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  1980. cm_lap_set_local_ack_timeout(lap_msg,
  1981. min(31, alternate_path->packet_life_time + 1));
  1982. if (private_data && private_data_len)
  1983. memcpy(lap_msg->private_data, private_data, private_data_len);
  1984. }
  1985. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  1986. struct ib_sa_path_rec *alternate_path,
  1987. const void *private_data,
  1988. u8 private_data_len)
  1989. {
  1990. struct cm_id_private *cm_id_priv;
  1991. struct ib_mad_send_buf *msg;
  1992. struct ib_send_wr *bad_send_wr;
  1993. unsigned long flags;
  1994. int ret;
  1995. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  1996. return -EINVAL;
  1997. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1998. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1999. if (cm_id->state != IB_CM_ESTABLISHED ||
  2000. cm_id->lap_state != IB_CM_LAP_IDLE) {
  2001. ret = -EINVAL;
  2002. goto out;
  2003. }
  2004. ret = cm_alloc_msg(cm_id_priv, &msg);
  2005. if (ret)
  2006. goto out;
  2007. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2008. alternate_path, private_data, private_data_len);
  2009. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  2010. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2011. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2012. &msg->send_wr, &bad_send_wr);
  2013. if (ret) {
  2014. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2015. cm_free_msg(msg);
  2016. return ret;
  2017. }
  2018. cm_id->lap_state = IB_CM_LAP_SENT;
  2019. cm_id_priv->msg = msg;
  2020. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2021. return ret;
  2022. }
  2023. EXPORT_SYMBOL(ib_send_cm_lap);
  2024. static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
  2025. struct cm_lap_msg *lap_msg)
  2026. {
  2027. memset(path, 0, sizeof *path);
  2028. path->dgid = lap_msg->alt_local_gid;
  2029. path->sgid = lap_msg->alt_remote_gid;
  2030. path->dlid = lap_msg->alt_local_lid;
  2031. path->slid = lap_msg->alt_remote_lid;
  2032. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2033. path->hop_limit = lap_msg->alt_hop_limit;
  2034. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2035. path->reversible = 1;
  2036. /* pkey is same as in REQ */
  2037. path->sl = cm_lap_get_sl(lap_msg);
  2038. path->mtu_selector = IB_SA_EQ;
  2039. /* mtu is same as in REQ */
  2040. path->rate_selector = IB_SA_EQ;
  2041. path->rate = cm_lap_get_packet_rate(lap_msg);
  2042. path->packet_life_time_selector = IB_SA_EQ;
  2043. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2044. path->packet_life_time -= (path->packet_life_time > 0);
  2045. }
  2046. static int cm_lap_handler(struct cm_work *work)
  2047. {
  2048. struct cm_id_private *cm_id_priv;
  2049. struct cm_lap_msg *lap_msg;
  2050. struct ib_cm_lap_event_param *param;
  2051. struct ib_mad_send_buf *msg = NULL;
  2052. struct ib_send_wr *bad_send_wr;
  2053. unsigned long flags;
  2054. int ret;
  2055. /* todo: verify LAP request and send reject APR if invalid. */
  2056. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2057. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2058. lap_msg->local_comm_id);
  2059. if (!cm_id_priv)
  2060. return -EINVAL;
  2061. param = &work->cm_event.param.lap_rcvd;
  2062. param->alternate_path = &work->path[0];
  2063. cm_format_path_from_lap(param->alternate_path, lap_msg);
  2064. work->cm_event.private_data = &lap_msg->private_data;
  2065. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2066. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2067. goto unlock;
  2068. switch (cm_id_priv->id.lap_state) {
  2069. case IB_CM_LAP_IDLE:
  2070. break;
  2071. case IB_CM_MRA_LAP_SENT:
  2072. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2073. goto unlock;
  2074. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2075. CM_MSG_RESPONSE_OTHER,
  2076. cm_id_priv->service_timeout,
  2077. cm_id_priv->private_data,
  2078. cm_id_priv->private_data_len);
  2079. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2080. if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2081. &msg->send_wr, &bad_send_wr))
  2082. cm_free_msg(msg);
  2083. goto deref;
  2084. default:
  2085. goto unlock;
  2086. }
  2087. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2088. cm_id_priv->tid = lap_msg->hdr.tid;
  2089. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2090. if (!ret)
  2091. list_add_tail(&work->list, &cm_id_priv->work_list);
  2092. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2093. if (ret)
  2094. cm_process_work(cm_id_priv, work);
  2095. else
  2096. cm_deref_id(cm_id_priv);
  2097. return 0;
  2098. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2099. deref: cm_deref_id(cm_id_priv);
  2100. return -EINVAL;
  2101. }
  2102. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2103. struct cm_id_private *cm_id_priv,
  2104. enum ib_cm_apr_status status,
  2105. void *info,
  2106. u8 info_length,
  2107. const void *private_data,
  2108. u8 private_data_len)
  2109. {
  2110. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2111. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2112. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2113. apr_msg->ap_status = (u8) status;
  2114. if (info && info_length) {
  2115. apr_msg->info_length = info_length;
  2116. memcpy(apr_msg->info, info, info_length);
  2117. }
  2118. if (private_data && private_data_len)
  2119. memcpy(apr_msg->private_data, private_data, private_data_len);
  2120. }
  2121. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2122. enum ib_cm_apr_status status,
  2123. void *info,
  2124. u8 info_length,
  2125. const void *private_data,
  2126. u8 private_data_len)
  2127. {
  2128. struct cm_id_private *cm_id_priv;
  2129. struct ib_mad_send_buf *msg;
  2130. struct ib_send_wr *bad_send_wr;
  2131. unsigned long flags;
  2132. int ret;
  2133. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2134. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2135. return -EINVAL;
  2136. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2137. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2138. if (cm_id->state != IB_CM_ESTABLISHED ||
  2139. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2140. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2141. ret = -EINVAL;
  2142. goto out;
  2143. }
  2144. ret = cm_alloc_msg(cm_id_priv, &msg);
  2145. if (ret)
  2146. goto out;
  2147. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2148. info, info_length, private_data, private_data_len);
  2149. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2150. &msg->send_wr, &bad_send_wr);
  2151. if (ret) {
  2152. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2153. cm_free_msg(msg);
  2154. return ret;
  2155. }
  2156. cm_id->lap_state = IB_CM_LAP_IDLE;
  2157. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2158. return ret;
  2159. }
  2160. EXPORT_SYMBOL(ib_send_cm_apr);
  2161. static int cm_apr_handler(struct cm_work *work)
  2162. {
  2163. struct cm_id_private *cm_id_priv;
  2164. struct cm_apr_msg *apr_msg;
  2165. unsigned long flags;
  2166. int ret;
  2167. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2168. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2169. apr_msg->local_comm_id);
  2170. if (!cm_id_priv)
  2171. return -EINVAL; /* Unmatched reply. */
  2172. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2173. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2174. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2175. work->cm_event.private_data = &apr_msg->private_data;
  2176. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2177. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2178. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2179. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2180. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2181. goto out;
  2182. }
  2183. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2184. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  2185. (unsigned long) cm_id_priv->msg);
  2186. cm_id_priv->msg = NULL;
  2187. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2188. if (!ret)
  2189. list_add_tail(&work->list, &cm_id_priv->work_list);
  2190. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2191. if (ret)
  2192. cm_process_work(cm_id_priv, work);
  2193. else
  2194. cm_deref_id(cm_id_priv);
  2195. return 0;
  2196. out:
  2197. cm_deref_id(cm_id_priv);
  2198. return -EINVAL;
  2199. }
  2200. static int cm_timewait_handler(struct cm_work *work)
  2201. {
  2202. struct cm_timewait_info *timewait_info;
  2203. struct cm_id_private *cm_id_priv;
  2204. unsigned long flags;
  2205. int ret;
  2206. timewait_info = (struct cm_timewait_info *)work;
  2207. cm_cleanup_timewait(timewait_info);
  2208. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2209. timewait_info->work.remote_id);
  2210. if (!cm_id_priv)
  2211. return -EINVAL;
  2212. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2213. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2214. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2215. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2216. goto out;
  2217. }
  2218. cm_id_priv->id.state = IB_CM_IDLE;
  2219. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2220. if (!ret)
  2221. list_add_tail(&work->list, &cm_id_priv->work_list);
  2222. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2223. if (ret)
  2224. cm_process_work(cm_id_priv, work);
  2225. else
  2226. cm_deref_id(cm_id_priv);
  2227. return 0;
  2228. out:
  2229. cm_deref_id(cm_id_priv);
  2230. return -EINVAL;
  2231. }
  2232. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2233. struct cm_id_private *cm_id_priv,
  2234. struct ib_cm_sidr_req_param *param)
  2235. {
  2236. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2237. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2238. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2239. sidr_req_msg->pkey = cpu_to_be16(param->pkey);
  2240. sidr_req_msg->service_id = param->service_id;
  2241. if (param->private_data && param->private_data_len)
  2242. memcpy(sidr_req_msg->private_data, param->private_data,
  2243. param->private_data_len);
  2244. }
  2245. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2246. struct ib_cm_sidr_req_param *param)
  2247. {
  2248. struct cm_id_private *cm_id_priv;
  2249. struct ib_mad_send_buf *msg;
  2250. struct ib_send_wr *bad_send_wr;
  2251. unsigned long flags;
  2252. int ret;
  2253. if (!param->path || (param->private_data &&
  2254. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2255. return -EINVAL;
  2256. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2257. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2258. if (ret)
  2259. goto out;
  2260. cm_id->service_id = param->service_id;
  2261. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2262. cm_id_priv->timeout_ms = param->timeout_ms;
  2263. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2264. ret = cm_alloc_msg(cm_id_priv, &msg);
  2265. if (ret)
  2266. goto out;
  2267. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2268. param);
  2269. msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
  2270. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2271. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2272. if (cm_id->state == IB_CM_IDLE)
  2273. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2274. &msg->send_wr, &bad_send_wr);
  2275. else
  2276. ret = -EINVAL;
  2277. if (ret) {
  2278. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2279. cm_free_msg(msg);
  2280. goto out;
  2281. }
  2282. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2283. cm_id_priv->msg = msg;
  2284. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2285. out:
  2286. return ret;
  2287. }
  2288. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2289. static void cm_format_sidr_req_event(struct cm_work *work,
  2290. struct ib_cm_id *listen_id)
  2291. {
  2292. struct cm_sidr_req_msg *sidr_req_msg;
  2293. struct ib_cm_sidr_req_event_param *param;
  2294. sidr_req_msg = (struct cm_sidr_req_msg *)
  2295. work->mad_recv_wc->recv_buf.mad;
  2296. param = &work->cm_event.param.sidr_req_rcvd;
  2297. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2298. param->listen_id = listen_id;
  2299. param->device = work->port->mad_agent->device;
  2300. param->port = work->port->port_num;
  2301. work->cm_event.private_data = &sidr_req_msg->private_data;
  2302. }
  2303. static int cm_sidr_req_handler(struct cm_work *work)
  2304. {
  2305. struct ib_cm_id *cm_id;
  2306. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2307. struct cm_sidr_req_msg *sidr_req_msg;
  2308. struct ib_wc *wc;
  2309. unsigned long flags;
  2310. cm_id = ib_create_cm_id(NULL, NULL);
  2311. if (IS_ERR(cm_id))
  2312. return PTR_ERR(cm_id);
  2313. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2314. /* Record SGID/SLID and request ID for lookup. */
  2315. sidr_req_msg = (struct cm_sidr_req_msg *)
  2316. work->mad_recv_wc->recv_buf.mad;
  2317. wc = work->mad_recv_wc->wc;
  2318. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2319. cm_id_priv->av.dgid.global.interface_id = 0;
  2320. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2321. &cm_id_priv->av);
  2322. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2323. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2324. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2325. atomic_inc(&cm_id_priv->work_count);
  2326. spin_lock_irqsave(&cm.lock, flags);
  2327. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2328. if (cur_cm_id_priv) {
  2329. spin_unlock_irqrestore(&cm.lock, flags);
  2330. goto out; /* Duplicate message. */
  2331. }
  2332. cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
  2333. if (!cur_cm_id_priv) {
  2334. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2335. spin_unlock_irqrestore(&cm.lock, flags);
  2336. /* todo: reply with no match */
  2337. goto out; /* No match. */
  2338. }
  2339. atomic_inc(&cur_cm_id_priv->refcount);
  2340. spin_unlock_irqrestore(&cm.lock, flags);
  2341. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2342. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2343. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2344. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2345. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2346. cm_process_work(cm_id_priv, work);
  2347. cm_deref_id(cur_cm_id_priv);
  2348. return 0;
  2349. out:
  2350. ib_destroy_cm_id(&cm_id_priv->id);
  2351. return -EINVAL;
  2352. }
  2353. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2354. struct cm_id_private *cm_id_priv,
  2355. struct ib_cm_sidr_rep_param *param)
  2356. {
  2357. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2358. cm_id_priv->tid);
  2359. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2360. sidr_rep_msg->status = param->status;
  2361. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2362. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2363. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2364. if (param->info && param->info_length)
  2365. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2366. if (param->private_data && param->private_data_len)
  2367. memcpy(sidr_rep_msg->private_data, param->private_data,
  2368. param->private_data_len);
  2369. }
  2370. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2371. struct ib_cm_sidr_rep_param *param)
  2372. {
  2373. struct cm_id_private *cm_id_priv;
  2374. struct ib_mad_send_buf *msg;
  2375. struct ib_send_wr *bad_send_wr;
  2376. unsigned long flags;
  2377. int ret;
  2378. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2379. (param->private_data &&
  2380. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2381. return -EINVAL;
  2382. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2383. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2384. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2385. ret = -EINVAL;
  2386. goto error;
  2387. }
  2388. ret = cm_alloc_msg(cm_id_priv, &msg);
  2389. if (ret)
  2390. goto error;
  2391. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2392. param);
  2393. ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
  2394. &msg->send_wr, &bad_send_wr);
  2395. if (ret) {
  2396. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2397. cm_free_msg(msg);
  2398. return ret;
  2399. }
  2400. cm_id->state = IB_CM_IDLE;
  2401. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2402. spin_lock_irqsave(&cm.lock, flags);
  2403. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2404. spin_unlock_irqrestore(&cm.lock, flags);
  2405. return 0;
  2406. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2407. return ret;
  2408. }
  2409. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2410. static void cm_format_sidr_rep_event(struct cm_work *work)
  2411. {
  2412. struct cm_sidr_rep_msg *sidr_rep_msg;
  2413. struct ib_cm_sidr_rep_event_param *param;
  2414. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2415. work->mad_recv_wc->recv_buf.mad;
  2416. param = &work->cm_event.param.sidr_rep_rcvd;
  2417. param->status = sidr_rep_msg->status;
  2418. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2419. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2420. param->info = &sidr_rep_msg->info;
  2421. param->info_len = sidr_rep_msg->info_length;
  2422. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2423. }
  2424. static int cm_sidr_rep_handler(struct cm_work *work)
  2425. {
  2426. struct cm_sidr_rep_msg *sidr_rep_msg;
  2427. struct cm_id_private *cm_id_priv;
  2428. unsigned long flags;
  2429. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2430. work->mad_recv_wc->recv_buf.mad;
  2431. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2432. if (!cm_id_priv)
  2433. return -EINVAL; /* Unmatched reply. */
  2434. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2435. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2436. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2437. goto out;
  2438. }
  2439. cm_id_priv->id.state = IB_CM_IDLE;
  2440. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  2441. (unsigned long) cm_id_priv->msg);
  2442. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2443. cm_format_sidr_rep_event(work);
  2444. cm_process_work(cm_id_priv, work);
  2445. return 0;
  2446. out:
  2447. cm_deref_id(cm_id_priv);
  2448. return -EINVAL;
  2449. }
  2450. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2451. enum ib_wc_status wc_status)
  2452. {
  2453. struct cm_id_private *cm_id_priv;
  2454. struct ib_cm_event cm_event;
  2455. enum ib_cm_state state;
  2456. unsigned long flags;
  2457. int ret;
  2458. memset(&cm_event, 0, sizeof cm_event);
  2459. cm_id_priv = msg->context[0];
  2460. /* Discard old sends or ones without a response. */
  2461. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2462. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2463. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2464. goto discard;
  2465. switch (state) {
  2466. case IB_CM_REQ_SENT:
  2467. case IB_CM_MRA_REQ_RCVD:
  2468. cm_reset_to_idle(cm_id_priv);
  2469. cm_event.event = IB_CM_REQ_ERROR;
  2470. break;
  2471. case IB_CM_REP_SENT:
  2472. case IB_CM_MRA_REP_RCVD:
  2473. cm_reset_to_idle(cm_id_priv);
  2474. cm_event.event = IB_CM_REP_ERROR;
  2475. break;
  2476. case IB_CM_DREQ_SENT:
  2477. cm_enter_timewait(cm_id_priv);
  2478. cm_event.event = IB_CM_DREQ_ERROR;
  2479. break;
  2480. case IB_CM_SIDR_REQ_SENT:
  2481. cm_id_priv->id.state = IB_CM_IDLE;
  2482. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2483. break;
  2484. default:
  2485. goto discard;
  2486. }
  2487. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2488. cm_event.param.send_status = wc_status;
  2489. /* No other events can occur on the cm_id at this point. */
  2490. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2491. cm_free_msg(msg);
  2492. if (ret)
  2493. ib_destroy_cm_id(&cm_id_priv->id);
  2494. return;
  2495. discard:
  2496. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2497. cm_free_msg(msg);
  2498. }
  2499. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2500. struct ib_mad_send_wc *mad_send_wc)
  2501. {
  2502. struct ib_mad_send_buf *msg;
  2503. msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
  2504. switch (mad_send_wc->status) {
  2505. case IB_WC_SUCCESS:
  2506. case IB_WC_WR_FLUSH_ERR:
  2507. cm_free_msg(msg);
  2508. break;
  2509. default:
  2510. if (msg->context[0] && msg->context[1])
  2511. cm_process_send_error(msg, mad_send_wc->status);
  2512. else
  2513. cm_free_msg(msg);
  2514. break;
  2515. }
  2516. }
  2517. static void cm_work_handler(void *data)
  2518. {
  2519. struct cm_work *work = data;
  2520. int ret;
  2521. switch (work->cm_event.event) {
  2522. case IB_CM_REQ_RECEIVED:
  2523. ret = cm_req_handler(work);
  2524. break;
  2525. case IB_CM_MRA_RECEIVED:
  2526. ret = cm_mra_handler(work);
  2527. break;
  2528. case IB_CM_REJ_RECEIVED:
  2529. ret = cm_rej_handler(work);
  2530. break;
  2531. case IB_CM_REP_RECEIVED:
  2532. ret = cm_rep_handler(work);
  2533. break;
  2534. case IB_CM_RTU_RECEIVED:
  2535. ret = cm_rtu_handler(work);
  2536. break;
  2537. case IB_CM_USER_ESTABLISHED:
  2538. ret = cm_establish_handler(work);
  2539. break;
  2540. case IB_CM_DREQ_RECEIVED:
  2541. ret = cm_dreq_handler(work);
  2542. break;
  2543. case IB_CM_DREP_RECEIVED:
  2544. ret = cm_drep_handler(work);
  2545. break;
  2546. case IB_CM_SIDR_REQ_RECEIVED:
  2547. ret = cm_sidr_req_handler(work);
  2548. break;
  2549. case IB_CM_SIDR_REP_RECEIVED:
  2550. ret = cm_sidr_rep_handler(work);
  2551. break;
  2552. case IB_CM_LAP_RECEIVED:
  2553. ret = cm_lap_handler(work);
  2554. break;
  2555. case IB_CM_APR_RECEIVED:
  2556. ret = cm_apr_handler(work);
  2557. break;
  2558. case IB_CM_TIMEWAIT_EXIT:
  2559. ret = cm_timewait_handler(work);
  2560. break;
  2561. default:
  2562. ret = -EINVAL;
  2563. break;
  2564. }
  2565. if (ret)
  2566. cm_free_work(work);
  2567. }
  2568. int ib_cm_establish(struct ib_cm_id *cm_id)
  2569. {
  2570. struct cm_id_private *cm_id_priv;
  2571. struct cm_work *work;
  2572. unsigned long flags;
  2573. int ret = 0;
  2574. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2575. if (!work)
  2576. return -ENOMEM;
  2577. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2578. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2579. switch (cm_id->state)
  2580. {
  2581. case IB_CM_REP_SENT:
  2582. case IB_CM_MRA_REP_RCVD:
  2583. cm_id->state = IB_CM_ESTABLISHED;
  2584. break;
  2585. case IB_CM_ESTABLISHED:
  2586. ret = -EISCONN;
  2587. break;
  2588. default:
  2589. ret = -EINVAL;
  2590. break;
  2591. }
  2592. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2593. if (ret) {
  2594. kfree(work);
  2595. goto out;
  2596. }
  2597. /*
  2598. * The CM worker thread may try to destroy the cm_id before it
  2599. * can execute this work item. To prevent potential deadlock,
  2600. * we need to find the cm_id once we're in the context of the
  2601. * worker thread, rather than holding a reference on it.
  2602. */
  2603. INIT_WORK(&work->work, cm_work_handler, work);
  2604. work->local_id = cm_id->local_id;
  2605. work->remote_id = cm_id->remote_id;
  2606. work->mad_recv_wc = NULL;
  2607. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2608. queue_work(cm.wq, &work->work);
  2609. out:
  2610. return ret;
  2611. }
  2612. EXPORT_SYMBOL(ib_cm_establish);
  2613. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2614. struct ib_mad_recv_wc *mad_recv_wc)
  2615. {
  2616. struct cm_work *work;
  2617. enum ib_cm_event_type event;
  2618. int paths = 0;
  2619. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2620. case CM_REQ_ATTR_ID:
  2621. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2622. alt_local_lid != 0);
  2623. event = IB_CM_REQ_RECEIVED;
  2624. break;
  2625. case CM_MRA_ATTR_ID:
  2626. event = IB_CM_MRA_RECEIVED;
  2627. break;
  2628. case CM_REJ_ATTR_ID:
  2629. event = IB_CM_REJ_RECEIVED;
  2630. break;
  2631. case CM_REP_ATTR_ID:
  2632. event = IB_CM_REP_RECEIVED;
  2633. break;
  2634. case CM_RTU_ATTR_ID:
  2635. event = IB_CM_RTU_RECEIVED;
  2636. break;
  2637. case CM_DREQ_ATTR_ID:
  2638. event = IB_CM_DREQ_RECEIVED;
  2639. break;
  2640. case CM_DREP_ATTR_ID:
  2641. event = IB_CM_DREP_RECEIVED;
  2642. break;
  2643. case CM_SIDR_REQ_ATTR_ID:
  2644. event = IB_CM_SIDR_REQ_RECEIVED;
  2645. break;
  2646. case CM_SIDR_REP_ATTR_ID:
  2647. event = IB_CM_SIDR_REP_RECEIVED;
  2648. break;
  2649. case CM_LAP_ATTR_ID:
  2650. paths = 1;
  2651. event = IB_CM_LAP_RECEIVED;
  2652. break;
  2653. case CM_APR_ATTR_ID:
  2654. event = IB_CM_APR_RECEIVED;
  2655. break;
  2656. default:
  2657. ib_free_recv_mad(mad_recv_wc);
  2658. return;
  2659. }
  2660. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2661. GFP_KERNEL);
  2662. if (!work) {
  2663. ib_free_recv_mad(mad_recv_wc);
  2664. return;
  2665. }
  2666. INIT_WORK(&work->work, cm_work_handler, work);
  2667. work->cm_event.event = event;
  2668. work->mad_recv_wc = mad_recv_wc;
  2669. work->port = (struct cm_port *)mad_agent->context;
  2670. queue_work(cm.wq, &work->work);
  2671. }
  2672. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2673. struct ib_qp_attr *qp_attr,
  2674. int *qp_attr_mask)
  2675. {
  2676. unsigned long flags;
  2677. int ret;
  2678. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2679. switch (cm_id_priv->id.state) {
  2680. case IB_CM_REQ_SENT:
  2681. case IB_CM_MRA_REQ_RCVD:
  2682. case IB_CM_REQ_RCVD:
  2683. case IB_CM_MRA_REQ_SENT:
  2684. case IB_CM_REP_RCVD:
  2685. case IB_CM_MRA_REP_SENT:
  2686. case IB_CM_REP_SENT:
  2687. case IB_CM_MRA_REP_RCVD:
  2688. case IB_CM_ESTABLISHED:
  2689. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2690. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2691. qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
  2692. if (cm_id_priv->responder_resources)
  2693. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
  2694. IB_ACCESS_REMOTE_READ;
  2695. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2696. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2697. ret = 0;
  2698. break;
  2699. default:
  2700. ret = -EINVAL;
  2701. break;
  2702. }
  2703. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2704. return ret;
  2705. }
  2706. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2707. struct ib_qp_attr *qp_attr,
  2708. int *qp_attr_mask)
  2709. {
  2710. unsigned long flags;
  2711. int ret;
  2712. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2713. switch (cm_id_priv->id.state) {
  2714. case IB_CM_REQ_RCVD:
  2715. case IB_CM_MRA_REQ_SENT:
  2716. case IB_CM_REP_RCVD:
  2717. case IB_CM_MRA_REP_SENT:
  2718. case IB_CM_REP_SENT:
  2719. case IB_CM_MRA_REP_RCVD:
  2720. case IB_CM_ESTABLISHED:
  2721. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2722. IB_QP_DEST_QPN | IB_QP_RQ_PSN |
  2723. IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
  2724. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2725. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2726. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2727. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2728. qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
  2729. qp_attr->min_rnr_timer = 0;
  2730. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2731. *qp_attr_mask |= IB_QP_ALT_PATH;
  2732. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2733. }
  2734. ret = 0;
  2735. break;
  2736. default:
  2737. ret = -EINVAL;
  2738. break;
  2739. }
  2740. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2741. return ret;
  2742. }
  2743. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2744. struct ib_qp_attr *qp_attr,
  2745. int *qp_attr_mask)
  2746. {
  2747. unsigned long flags;
  2748. int ret;
  2749. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2750. switch (cm_id_priv->id.state) {
  2751. case IB_CM_REP_RCVD:
  2752. case IB_CM_MRA_REP_SENT:
  2753. case IB_CM_REP_SENT:
  2754. case IB_CM_MRA_REP_RCVD:
  2755. case IB_CM_ESTABLISHED:
  2756. *qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2757. IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
  2758. IB_QP_MAX_QP_RD_ATOMIC;
  2759. qp_attr->timeout = cm_id_priv->local_ack_timeout;
  2760. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2761. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2762. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2763. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  2764. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2765. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2766. qp_attr->path_mig_state = IB_MIG_REARM;
  2767. }
  2768. ret = 0;
  2769. break;
  2770. default:
  2771. ret = -EINVAL;
  2772. break;
  2773. }
  2774. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2775. return ret;
  2776. }
  2777. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2778. struct ib_qp_attr *qp_attr,
  2779. int *qp_attr_mask)
  2780. {
  2781. struct cm_id_private *cm_id_priv;
  2782. int ret;
  2783. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2784. switch (qp_attr->qp_state) {
  2785. case IB_QPS_INIT:
  2786. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2787. break;
  2788. case IB_QPS_RTR:
  2789. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2790. break;
  2791. case IB_QPS_RTS:
  2792. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2793. break;
  2794. default:
  2795. ret = -EINVAL;
  2796. break;
  2797. }
  2798. return ret;
  2799. }
  2800. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2801. static __be64 cm_get_ca_guid(struct ib_device *device)
  2802. {
  2803. struct ib_device_attr *device_attr;
  2804. __be64 guid;
  2805. int ret;
  2806. device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
  2807. if (!device_attr)
  2808. return 0;
  2809. ret = ib_query_device(device, device_attr);
  2810. guid = ret ? 0 : device_attr->node_guid;
  2811. kfree(device_attr);
  2812. return guid;
  2813. }
  2814. static void cm_add_one(struct ib_device *device)
  2815. {
  2816. struct cm_device *cm_dev;
  2817. struct cm_port *port;
  2818. struct ib_mad_reg_req reg_req = {
  2819. .mgmt_class = IB_MGMT_CLASS_CM,
  2820. .mgmt_class_version = IB_CM_CLASS_VERSION
  2821. };
  2822. struct ib_port_modify port_modify = {
  2823. .set_port_cap_mask = IB_PORT_CM_SUP
  2824. };
  2825. unsigned long flags;
  2826. int ret;
  2827. u8 i;
  2828. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2829. device->phys_port_cnt, GFP_KERNEL);
  2830. if (!cm_dev)
  2831. return;
  2832. cm_dev->device = device;
  2833. cm_dev->ca_guid = cm_get_ca_guid(device);
  2834. if (!cm_dev->ca_guid)
  2835. goto error1;
  2836. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2837. for (i = 1; i <= device->phys_port_cnt; i++) {
  2838. port = &cm_dev->port[i-1];
  2839. port->cm_dev = cm_dev;
  2840. port->port_num = i;
  2841. port->mad_agent = ib_register_mad_agent(device, i,
  2842. IB_QPT_GSI,
  2843. &reg_req,
  2844. 0,
  2845. cm_send_handler,
  2846. cm_recv_handler,
  2847. port);
  2848. if (IS_ERR(port->mad_agent))
  2849. goto error2;
  2850. ret = ib_modify_port(device, i, 0, &port_modify);
  2851. if (ret)
  2852. goto error3;
  2853. }
  2854. ib_set_client_data(device, &cm_client, cm_dev);
  2855. write_lock_irqsave(&cm.device_lock, flags);
  2856. list_add_tail(&cm_dev->list, &cm.device_list);
  2857. write_unlock_irqrestore(&cm.device_lock, flags);
  2858. return;
  2859. error3:
  2860. ib_unregister_mad_agent(port->mad_agent);
  2861. error2:
  2862. port_modify.set_port_cap_mask = 0;
  2863. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  2864. while (--i) {
  2865. port = &cm_dev->port[i-1];
  2866. ib_modify_port(device, port->port_num, 0, &port_modify);
  2867. ib_unregister_mad_agent(port->mad_agent);
  2868. }
  2869. error1:
  2870. kfree(cm_dev);
  2871. }
  2872. static void cm_remove_one(struct ib_device *device)
  2873. {
  2874. struct cm_device *cm_dev;
  2875. struct cm_port *port;
  2876. struct ib_port_modify port_modify = {
  2877. .clr_port_cap_mask = IB_PORT_CM_SUP
  2878. };
  2879. unsigned long flags;
  2880. int i;
  2881. cm_dev = ib_get_client_data(device, &cm_client);
  2882. if (!cm_dev)
  2883. return;
  2884. write_lock_irqsave(&cm.device_lock, flags);
  2885. list_del(&cm_dev->list);
  2886. write_unlock_irqrestore(&cm.device_lock, flags);
  2887. for (i = 1; i <= device->phys_port_cnt; i++) {
  2888. port = &cm_dev->port[i-1];
  2889. ib_modify_port(device, port->port_num, 0, &port_modify);
  2890. ib_unregister_mad_agent(port->mad_agent);
  2891. }
  2892. kfree(cm_dev);
  2893. }
  2894. static int __init ib_cm_init(void)
  2895. {
  2896. int ret;
  2897. memset(&cm, 0, sizeof cm);
  2898. INIT_LIST_HEAD(&cm.device_list);
  2899. rwlock_init(&cm.device_lock);
  2900. spin_lock_init(&cm.lock);
  2901. cm.listen_service_table = RB_ROOT;
  2902. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  2903. cm.remote_id_table = RB_ROOT;
  2904. cm.remote_qp_table = RB_ROOT;
  2905. cm.remote_sidr_table = RB_ROOT;
  2906. idr_init(&cm.local_id_table);
  2907. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  2908. cm.wq = create_workqueue("ib_cm");
  2909. if (!cm.wq)
  2910. return -ENOMEM;
  2911. ret = ib_register_client(&cm_client);
  2912. if (ret)
  2913. goto error;
  2914. return 0;
  2915. error:
  2916. destroy_workqueue(cm.wq);
  2917. return ret;
  2918. }
  2919. static void __exit ib_cm_cleanup(void)
  2920. {
  2921. flush_workqueue(cm.wq);
  2922. destroy_workqueue(cm.wq);
  2923. ib_unregister_client(&cm_client);
  2924. }
  2925. module_init(ib_cm_init);
  2926. module_exit(ib_cm_cleanup);