cm.c 96 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447
  1. /*
  2. * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  36. */
  37. #include <linux/completion.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/err.h>
  40. #include <linux/idr.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/pci.h>
  43. #include <linux/random.h>
  44. #include <linux/rbtree.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/workqueue.h>
  47. #include <rdma/ib_cache.h>
  48. #include <rdma/ib_cm.h>
  49. #include "cm_msgs.h"
  50. MODULE_AUTHOR("Sean Hefty");
  51. MODULE_DESCRIPTION("InfiniBand CM");
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. static void cm_add_one(struct ib_device *device);
  54. static void cm_remove_one(struct ib_device *device);
  55. static struct ib_client cm_client = {
  56. .name = "cm",
  57. .add = cm_add_one,
  58. .remove = cm_remove_one
  59. };
  60. static struct ib_cm {
  61. spinlock_t lock;
  62. struct list_head device_list;
  63. rwlock_t device_lock;
  64. struct rb_root listen_service_table;
  65. u64 listen_service_id;
  66. /* struct rb_root peer_service_table; todo: fix peer to peer */
  67. struct rb_root remote_qp_table;
  68. struct rb_root remote_id_table;
  69. struct rb_root remote_sidr_table;
  70. struct idr local_id_table;
  71. __be32 random_id_operand;
  72. struct list_head timewait_list;
  73. struct workqueue_struct *wq;
  74. } cm;
  75. struct cm_port {
  76. struct cm_device *cm_dev;
  77. struct ib_mad_agent *mad_agent;
  78. u8 port_num;
  79. };
  80. struct cm_device {
  81. struct list_head list;
  82. struct ib_device *device;
  83. __be64 ca_guid;
  84. struct cm_port port[0];
  85. };
  86. struct cm_av {
  87. struct cm_port *port;
  88. union ib_gid dgid;
  89. struct ib_ah_attr ah_attr;
  90. u16 pkey_index;
  91. u8 packet_life_time;
  92. };
  93. struct cm_work {
  94. struct work_struct work;
  95. struct list_head list;
  96. struct cm_port *port;
  97. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  98. __be32 local_id; /* Established / timewait */
  99. __be32 remote_id;
  100. struct ib_cm_event cm_event;
  101. struct ib_sa_path_rec path[0];
  102. };
  103. struct cm_timewait_info {
  104. struct cm_work work; /* Must be first. */
  105. struct list_head list;
  106. struct rb_node remote_qp_node;
  107. struct rb_node remote_id_node;
  108. __be64 remote_ca_guid;
  109. __be32 remote_qpn;
  110. u8 inserted_remote_qp;
  111. u8 inserted_remote_id;
  112. };
  113. struct cm_id_private {
  114. struct ib_cm_id id;
  115. struct rb_node service_node;
  116. struct rb_node sidr_id_node;
  117. spinlock_t lock; /* Do not acquire inside cm.lock */
  118. struct completion comp;
  119. atomic_t refcount;
  120. struct ib_mad_send_buf *msg;
  121. struct cm_timewait_info *timewait_info;
  122. /* todo: use alternate port on send failure */
  123. struct cm_av av;
  124. struct cm_av alt_av;
  125. struct ib_cm_compare_data *compare_data;
  126. void *private_data;
  127. __be64 tid;
  128. __be32 local_qpn;
  129. __be32 remote_qpn;
  130. enum ib_qp_type qp_type;
  131. __be32 sq_psn;
  132. __be32 rq_psn;
  133. int timeout_ms;
  134. enum ib_mtu path_mtu;
  135. u8 private_data_len;
  136. u8 max_cm_retries;
  137. u8 peer_to_peer;
  138. u8 responder_resources;
  139. u8 initiator_depth;
  140. u8 local_ack_timeout;
  141. u8 retry_count;
  142. u8 rnr_retry_count;
  143. u8 service_timeout;
  144. struct list_head work_list;
  145. atomic_t work_count;
  146. };
  147. static void cm_work_handler(void *data);
  148. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  149. {
  150. if (atomic_dec_and_test(&cm_id_priv->refcount))
  151. complete(&cm_id_priv->comp);
  152. }
  153. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  154. struct ib_mad_send_buf **msg)
  155. {
  156. struct ib_mad_agent *mad_agent;
  157. struct ib_mad_send_buf *m;
  158. struct ib_ah *ah;
  159. mad_agent = cm_id_priv->av.port->mad_agent;
  160. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  161. if (IS_ERR(ah))
  162. return PTR_ERR(ah);
  163. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  164. cm_id_priv->av.pkey_index,
  165. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  166. GFP_ATOMIC);
  167. if (IS_ERR(m)) {
  168. ib_destroy_ah(ah);
  169. return PTR_ERR(m);
  170. }
  171. /* Timeout set by caller if response is expected. */
  172. m->ah = ah;
  173. m->retries = cm_id_priv->max_cm_retries;
  174. atomic_inc(&cm_id_priv->refcount);
  175. m->context[0] = cm_id_priv;
  176. *msg = m;
  177. return 0;
  178. }
  179. static int cm_alloc_response_msg(struct cm_port *port,
  180. struct ib_mad_recv_wc *mad_recv_wc,
  181. struct ib_mad_send_buf **msg)
  182. {
  183. struct ib_mad_send_buf *m;
  184. struct ib_ah *ah;
  185. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  186. mad_recv_wc->recv_buf.grh, port->port_num);
  187. if (IS_ERR(ah))
  188. return PTR_ERR(ah);
  189. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  190. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  191. GFP_ATOMIC);
  192. if (IS_ERR(m)) {
  193. ib_destroy_ah(ah);
  194. return PTR_ERR(m);
  195. }
  196. m->ah = ah;
  197. *msg = m;
  198. return 0;
  199. }
  200. static void cm_free_msg(struct ib_mad_send_buf *msg)
  201. {
  202. ib_destroy_ah(msg->ah);
  203. if (msg->context[0])
  204. cm_deref_id(msg->context[0]);
  205. ib_free_send_mad(msg);
  206. }
  207. static void * cm_copy_private_data(const void *private_data,
  208. u8 private_data_len)
  209. {
  210. void *data;
  211. if (!private_data || !private_data_len)
  212. return NULL;
  213. data = kmalloc(private_data_len, GFP_KERNEL);
  214. if (!data)
  215. return ERR_PTR(-ENOMEM);
  216. memcpy(data, private_data, private_data_len);
  217. return data;
  218. }
  219. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  220. void *private_data, u8 private_data_len)
  221. {
  222. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  223. kfree(cm_id_priv->private_data);
  224. cm_id_priv->private_data = private_data;
  225. cm_id_priv->private_data_len = private_data_len;
  226. }
  227. static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
  228. struct ib_grh *grh, struct cm_av *av)
  229. {
  230. av->port = port;
  231. av->pkey_index = wc->pkey_index;
  232. ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
  233. grh, &av->ah_attr);
  234. }
  235. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  236. {
  237. struct cm_device *cm_dev;
  238. struct cm_port *port = NULL;
  239. unsigned long flags;
  240. int ret;
  241. u8 p;
  242. read_lock_irqsave(&cm.device_lock, flags);
  243. list_for_each_entry(cm_dev, &cm.device_list, list) {
  244. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  245. &p, NULL)) {
  246. port = &cm_dev->port[p-1];
  247. break;
  248. }
  249. }
  250. read_unlock_irqrestore(&cm.device_lock, flags);
  251. if (!port)
  252. return -EINVAL;
  253. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  254. be16_to_cpu(path->pkey), &av->pkey_index);
  255. if (ret)
  256. return ret;
  257. av->port = port;
  258. ib_init_ah_from_path(cm_dev->device, port->port_num, path,
  259. &av->ah_attr);
  260. av->packet_life_time = path->packet_life_time;
  261. return 0;
  262. }
  263. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  264. {
  265. unsigned long flags;
  266. int ret, id;
  267. static int next_id;
  268. do {
  269. spin_lock_irqsave(&cm.lock, flags);
  270. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
  271. next_id++, &id);
  272. spin_unlock_irqrestore(&cm.lock, flags);
  273. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  274. cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
  275. return ret;
  276. }
  277. static void cm_free_id(__be32 local_id)
  278. {
  279. unsigned long flags;
  280. spin_lock_irqsave(&cm.lock, flags);
  281. idr_remove(&cm.local_id_table,
  282. (__force int) (local_id ^ cm.random_id_operand));
  283. spin_unlock_irqrestore(&cm.lock, flags);
  284. }
  285. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  286. {
  287. struct cm_id_private *cm_id_priv;
  288. cm_id_priv = idr_find(&cm.local_id_table,
  289. (__force int) (local_id ^ cm.random_id_operand));
  290. if (cm_id_priv) {
  291. if (cm_id_priv->id.remote_id == remote_id)
  292. atomic_inc(&cm_id_priv->refcount);
  293. else
  294. cm_id_priv = NULL;
  295. }
  296. return cm_id_priv;
  297. }
  298. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  299. {
  300. struct cm_id_private *cm_id_priv;
  301. unsigned long flags;
  302. spin_lock_irqsave(&cm.lock, flags);
  303. cm_id_priv = cm_get_id(local_id, remote_id);
  304. spin_unlock_irqrestore(&cm.lock, flags);
  305. return cm_id_priv;
  306. }
  307. static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
  308. {
  309. int i;
  310. for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
  311. ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
  312. ((unsigned long *) mask)[i];
  313. }
  314. static int cm_compare_data(struct ib_cm_compare_data *src_data,
  315. struct ib_cm_compare_data *dst_data)
  316. {
  317. u8 src[IB_CM_COMPARE_SIZE];
  318. u8 dst[IB_CM_COMPARE_SIZE];
  319. if (!src_data || !dst_data)
  320. return 0;
  321. cm_mask_copy(src, src_data->data, dst_data->mask);
  322. cm_mask_copy(dst, dst_data->data, src_data->mask);
  323. return memcmp(src, dst, IB_CM_COMPARE_SIZE);
  324. }
  325. static int cm_compare_private_data(u8 *private_data,
  326. struct ib_cm_compare_data *dst_data)
  327. {
  328. u8 src[IB_CM_COMPARE_SIZE];
  329. if (!dst_data)
  330. return 0;
  331. cm_mask_copy(src, private_data, dst_data->mask);
  332. return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
  333. }
  334. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  335. {
  336. struct rb_node **link = &cm.listen_service_table.rb_node;
  337. struct rb_node *parent = NULL;
  338. struct cm_id_private *cur_cm_id_priv;
  339. __be64 service_id = cm_id_priv->id.service_id;
  340. __be64 service_mask = cm_id_priv->id.service_mask;
  341. int data_cmp;
  342. while (*link) {
  343. parent = *link;
  344. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  345. service_node);
  346. data_cmp = cm_compare_data(cm_id_priv->compare_data,
  347. cur_cm_id_priv->compare_data);
  348. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  349. (service_mask & cur_cm_id_priv->id.service_id) &&
  350. (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
  351. !data_cmp)
  352. return cur_cm_id_priv;
  353. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  354. link = &(*link)->rb_left;
  355. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  356. link = &(*link)->rb_right;
  357. else if (service_id < cur_cm_id_priv->id.service_id)
  358. link = &(*link)->rb_left;
  359. else if (service_id > cur_cm_id_priv->id.service_id)
  360. link = &(*link)->rb_right;
  361. else if (data_cmp < 0)
  362. link = &(*link)->rb_left;
  363. else
  364. link = &(*link)->rb_right;
  365. }
  366. rb_link_node(&cm_id_priv->service_node, parent, link);
  367. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  368. return NULL;
  369. }
  370. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  371. __be64 service_id,
  372. u8 *private_data)
  373. {
  374. struct rb_node *node = cm.listen_service_table.rb_node;
  375. struct cm_id_private *cm_id_priv;
  376. int data_cmp;
  377. while (node) {
  378. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  379. data_cmp = cm_compare_private_data(private_data,
  380. cm_id_priv->compare_data);
  381. if ((cm_id_priv->id.service_mask & service_id) ==
  382. cm_id_priv->id.service_id &&
  383. (cm_id_priv->id.device == device) && !data_cmp)
  384. return cm_id_priv;
  385. if (device < cm_id_priv->id.device)
  386. node = node->rb_left;
  387. else if (device > cm_id_priv->id.device)
  388. node = node->rb_right;
  389. else if (service_id < cm_id_priv->id.service_id)
  390. node = node->rb_left;
  391. else if (service_id > cm_id_priv->id.service_id)
  392. node = node->rb_right;
  393. else if (data_cmp < 0)
  394. node = node->rb_left;
  395. else
  396. node = node->rb_right;
  397. }
  398. return NULL;
  399. }
  400. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  401. *timewait_info)
  402. {
  403. struct rb_node **link = &cm.remote_id_table.rb_node;
  404. struct rb_node *parent = NULL;
  405. struct cm_timewait_info *cur_timewait_info;
  406. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  407. __be32 remote_id = timewait_info->work.remote_id;
  408. while (*link) {
  409. parent = *link;
  410. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  411. remote_id_node);
  412. if (remote_id < cur_timewait_info->work.remote_id)
  413. link = &(*link)->rb_left;
  414. else if (remote_id > cur_timewait_info->work.remote_id)
  415. link = &(*link)->rb_right;
  416. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  417. link = &(*link)->rb_left;
  418. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  419. link = &(*link)->rb_right;
  420. else
  421. return cur_timewait_info;
  422. }
  423. timewait_info->inserted_remote_id = 1;
  424. rb_link_node(&timewait_info->remote_id_node, parent, link);
  425. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  426. return NULL;
  427. }
  428. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  429. __be32 remote_id)
  430. {
  431. struct rb_node *node = cm.remote_id_table.rb_node;
  432. struct cm_timewait_info *timewait_info;
  433. while (node) {
  434. timewait_info = rb_entry(node, struct cm_timewait_info,
  435. remote_id_node);
  436. if (remote_id < timewait_info->work.remote_id)
  437. node = node->rb_left;
  438. else if (remote_id > timewait_info->work.remote_id)
  439. node = node->rb_right;
  440. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  441. node = node->rb_left;
  442. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  443. node = node->rb_right;
  444. else
  445. return timewait_info;
  446. }
  447. return NULL;
  448. }
  449. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  450. *timewait_info)
  451. {
  452. struct rb_node **link = &cm.remote_qp_table.rb_node;
  453. struct rb_node *parent = NULL;
  454. struct cm_timewait_info *cur_timewait_info;
  455. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  456. __be32 remote_qpn = timewait_info->remote_qpn;
  457. while (*link) {
  458. parent = *link;
  459. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  460. remote_qp_node);
  461. if (remote_qpn < cur_timewait_info->remote_qpn)
  462. link = &(*link)->rb_left;
  463. else if (remote_qpn > cur_timewait_info->remote_qpn)
  464. link = &(*link)->rb_right;
  465. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  466. link = &(*link)->rb_left;
  467. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  468. link = &(*link)->rb_right;
  469. else
  470. return cur_timewait_info;
  471. }
  472. timewait_info->inserted_remote_qp = 1;
  473. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  474. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  475. return NULL;
  476. }
  477. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  478. *cm_id_priv)
  479. {
  480. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  481. struct rb_node *parent = NULL;
  482. struct cm_id_private *cur_cm_id_priv;
  483. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  484. __be32 remote_id = cm_id_priv->id.remote_id;
  485. while (*link) {
  486. parent = *link;
  487. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  488. sidr_id_node);
  489. if (remote_id < cur_cm_id_priv->id.remote_id)
  490. link = &(*link)->rb_left;
  491. else if (remote_id > cur_cm_id_priv->id.remote_id)
  492. link = &(*link)->rb_right;
  493. else {
  494. int cmp;
  495. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  496. sizeof *port_gid);
  497. if (cmp < 0)
  498. link = &(*link)->rb_left;
  499. else if (cmp > 0)
  500. link = &(*link)->rb_right;
  501. else
  502. return cur_cm_id_priv;
  503. }
  504. }
  505. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  506. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  507. return NULL;
  508. }
  509. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  510. enum ib_cm_sidr_status status)
  511. {
  512. struct ib_cm_sidr_rep_param param;
  513. memset(&param, 0, sizeof param);
  514. param.status = status;
  515. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  516. }
  517. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  518. ib_cm_handler cm_handler,
  519. void *context)
  520. {
  521. struct cm_id_private *cm_id_priv;
  522. int ret;
  523. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  524. if (!cm_id_priv)
  525. return ERR_PTR(-ENOMEM);
  526. cm_id_priv->id.state = IB_CM_IDLE;
  527. cm_id_priv->id.device = device;
  528. cm_id_priv->id.cm_handler = cm_handler;
  529. cm_id_priv->id.context = context;
  530. cm_id_priv->id.remote_cm_qpn = 1;
  531. ret = cm_alloc_id(cm_id_priv);
  532. if (ret)
  533. goto error;
  534. spin_lock_init(&cm_id_priv->lock);
  535. init_completion(&cm_id_priv->comp);
  536. INIT_LIST_HEAD(&cm_id_priv->work_list);
  537. atomic_set(&cm_id_priv->work_count, -1);
  538. atomic_set(&cm_id_priv->refcount, 1);
  539. return &cm_id_priv->id;
  540. error:
  541. kfree(cm_id_priv);
  542. return ERR_PTR(-ENOMEM);
  543. }
  544. EXPORT_SYMBOL(ib_create_cm_id);
  545. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  546. {
  547. struct cm_work *work;
  548. if (list_empty(&cm_id_priv->work_list))
  549. return NULL;
  550. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  551. list_del(&work->list);
  552. return work;
  553. }
  554. static void cm_free_work(struct cm_work *work)
  555. {
  556. if (work->mad_recv_wc)
  557. ib_free_recv_mad(work->mad_recv_wc);
  558. kfree(work);
  559. }
  560. static inline int cm_convert_to_ms(int iba_time)
  561. {
  562. /* approximate conversion to ms from 4.096us x 2^iba_time */
  563. return 1 << max(iba_time - 8, 0);
  564. }
  565. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  566. {
  567. if (timewait_info->inserted_remote_id) {
  568. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  569. timewait_info->inserted_remote_id = 0;
  570. }
  571. if (timewait_info->inserted_remote_qp) {
  572. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  573. timewait_info->inserted_remote_qp = 0;
  574. }
  575. }
  576. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  577. {
  578. struct cm_timewait_info *timewait_info;
  579. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  580. if (!timewait_info)
  581. return ERR_PTR(-ENOMEM);
  582. timewait_info->work.local_id = local_id;
  583. INIT_WORK(&timewait_info->work.work, cm_work_handler,
  584. &timewait_info->work);
  585. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  586. return timewait_info;
  587. }
  588. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  589. {
  590. int wait_time;
  591. unsigned long flags;
  592. spin_lock_irqsave(&cm.lock, flags);
  593. cm_cleanup_timewait(cm_id_priv->timewait_info);
  594. list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
  595. spin_unlock_irqrestore(&cm.lock, flags);
  596. /*
  597. * The cm_id could be destroyed by the user before we exit timewait.
  598. * To protect against this, we search for the cm_id after exiting
  599. * timewait before notifying the user that we've exited timewait.
  600. */
  601. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  602. wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
  603. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  604. msecs_to_jiffies(wait_time));
  605. cm_id_priv->timewait_info = NULL;
  606. }
  607. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  608. {
  609. unsigned long flags;
  610. cm_id_priv->id.state = IB_CM_IDLE;
  611. if (cm_id_priv->timewait_info) {
  612. spin_lock_irqsave(&cm.lock, flags);
  613. cm_cleanup_timewait(cm_id_priv->timewait_info);
  614. spin_unlock_irqrestore(&cm.lock, flags);
  615. kfree(cm_id_priv->timewait_info);
  616. cm_id_priv->timewait_info = NULL;
  617. }
  618. }
  619. static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
  620. {
  621. struct cm_id_private *cm_id_priv;
  622. struct cm_work *work;
  623. unsigned long flags;
  624. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  625. retest:
  626. spin_lock_irqsave(&cm_id_priv->lock, flags);
  627. switch (cm_id->state) {
  628. case IB_CM_LISTEN:
  629. cm_id->state = IB_CM_IDLE;
  630. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  631. spin_lock_irqsave(&cm.lock, flags);
  632. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  633. spin_unlock_irqrestore(&cm.lock, flags);
  634. break;
  635. case IB_CM_SIDR_REQ_SENT:
  636. cm_id->state = IB_CM_IDLE;
  637. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  638. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  639. break;
  640. case IB_CM_SIDR_REQ_RCVD:
  641. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  642. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  643. break;
  644. case IB_CM_REQ_SENT:
  645. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  646. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  647. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  648. &cm_id_priv->av.port->cm_dev->ca_guid,
  649. sizeof cm_id_priv->av.port->cm_dev->ca_guid,
  650. NULL, 0);
  651. break;
  652. case IB_CM_REQ_RCVD:
  653. if (err == -ENOMEM) {
  654. /* Do not reject to allow future retries. */
  655. cm_reset_to_idle(cm_id_priv);
  656. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  657. } else {
  658. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  659. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  660. NULL, 0, NULL, 0);
  661. }
  662. break;
  663. case IB_CM_MRA_REQ_RCVD:
  664. case IB_CM_REP_SENT:
  665. case IB_CM_MRA_REP_RCVD:
  666. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  667. /* Fall through */
  668. case IB_CM_MRA_REQ_SENT:
  669. case IB_CM_REP_RCVD:
  670. case IB_CM_MRA_REP_SENT:
  671. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  672. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  673. NULL, 0, NULL, 0);
  674. break;
  675. case IB_CM_ESTABLISHED:
  676. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  677. ib_send_cm_dreq(cm_id, NULL, 0);
  678. goto retest;
  679. case IB_CM_DREQ_SENT:
  680. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  681. cm_enter_timewait(cm_id_priv);
  682. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  683. break;
  684. case IB_CM_DREQ_RCVD:
  685. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  686. ib_send_cm_drep(cm_id, NULL, 0);
  687. break;
  688. default:
  689. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  690. break;
  691. }
  692. cm_free_id(cm_id->local_id);
  693. cm_deref_id(cm_id_priv);
  694. wait_for_completion(&cm_id_priv->comp);
  695. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  696. cm_free_work(work);
  697. kfree(cm_id_priv->compare_data);
  698. kfree(cm_id_priv->private_data);
  699. kfree(cm_id_priv);
  700. }
  701. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  702. {
  703. cm_destroy_id(cm_id, 0);
  704. }
  705. EXPORT_SYMBOL(ib_destroy_cm_id);
  706. int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
  707. struct ib_cm_compare_data *compare_data)
  708. {
  709. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  710. unsigned long flags;
  711. int ret = 0;
  712. service_mask = service_mask ? service_mask :
  713. __constant_cpu_to_be64(~0ULL);
  714. service_id &= service_mask;
  715. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  716. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  717. return -EINVAL;
  718. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  719. if (cm_id->state != IB_CM_IDLE)
  720. return -EINVAL;
  721. if (compare_data) {
  722. cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
  723. GFP_KERNEL);
  724. if (!cm_id_priv->compare_data)
  725. return -ENOMEM;
  726. cm_mask_copy(cm_id_priv->compare_data->data,
  727. compare_data->data, compare_data->mask);
  728. memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
  729. IB_CM_COMPARE_SIZE);
  730. }
  731. cm_id->state = IB_CM_LISTEN;
  732. spin_lock_irqsave(&cm.lock, flags);
  733. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  734. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  735. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  736. } else {
  737. cm_id->service_id = service_id;
  738. cm_id->service_mask = service_mask;
  739. }
  740. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  741. spin_unlock_irqrestore(&cm.lock, flags);
  742. if (cur_cm_id_priv) {
  743. cm_id->state = IB_CM_IDLE;
  744. kfree(cm_id_priv->compare_data);
  745. cm_id_priv->compare_data = NULL;
  746. ret = -EBUSY;
  747. }
  748. return ret;
  749. }
  750. EXPORT_SYMBOL(ib_cm_listen);
  751. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  752. enum cm_msg_sequence msg_seq)
  753. {
  754. u64 hi_tid, low_tid;
  755. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  756. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  757. (msg_seq << 30));
  758. return cpu_to_be64(hi_tid | low_tid);
  759. }
  760. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  761. __be16 attr_id, __be64 tid)
  762. {
  763. hdr->base_version = IB_MGMT_BASE_VERSION;
  764. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  765. hdr->class_version = IB_CM_CLASS_VERSION;
  766. hdr->method = IB_MGMT_METHOD_SEND;
  767. hdr->attr_id = attr_id;
  768. hdr->tid = tid;
  769. }
  770. static void cm_format_req(struct cm_req_msg *req_msg,
  771. struct cm_id_private *cm_id_priv,
  772. struct ib_cm_req_param *param)
  773. {
  774. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  775. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  776. req_msg->local_comm_id = cm_id_priv->id.local_id;
  777. req_msg->service_id = param->service_id;
  778. req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  779. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  780. cm_req_set_resp_res(req_msg, param->responder_resources);
  781. cm_req_set_init_depth(req_msg, param->initiator_depth);
  782. cm_req_set_remote_resp_timeout(req_msg,
  783. param->remote_cm_response_timeout);
  784. cm_req_set_qp_type(req_msg, param->qp_type);
  785. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  786. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  787. cm_req_set_local_resp_timeout(req_msg,
  788. param->local_cm_response_timeout);
  789. cm_req_set_retry_count(req_msg, param->retry_count);
  790. req_msg->pkey = param->primary_path->pkey;
  791. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  792. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  793. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  794. cm_req_set_srq(req_msg, param->srq);
  795. req_msg->primary_local_lid = param->primary_path->slid;
  796. req_msg->primary_remote_lid = param->primary_path->dlid;
  797. req_msg->primary_local_gid = param->primary_path->sgid;
  798. req_msg->primary_remote_gid = param->primary_path->dgid;
  799. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  800. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  801. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  802. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  803. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  804. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  805. cm_req_set_primary_local_ack_timeout(req_msg,
  806. min(31, param->primary_path->packet_life_time + 1));
  807. if (param->alternate_path) {
  808. req_msg->alt_local_lid = param->alternate_path->slid;
  809. req_msg->alt_remote_lid = param->alternate_path->dlid;
  810. req_msg->alt_local_gid = param->alternate_path->sgid;
  811. req_msg->alt_remote_gid = param->alternate_path->dgid;
  812. cm_req_set_alt_flow_label(req_msg,
  813. param->alternate_path->flow_label);
  814. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  815. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  816. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  817. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  818. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  819. cm_req_set_alt_local_ack_timeout(req_msg,
  820. min(31, param->alternate_path->packet_life_time + 1));
  821. }
  822. if (param->private_data && param->private_data_len)
  823. memcpy(req_msg->private_data, param->private_data,
  824. param->private_data_len);
  825. }
  826. static int cm_validate_req_param(struct ib_cm_req_param *param)
  827. {
  828. /* peer-to-peer not supported */
  829. if (param->peer_to_peer)
  830. return -EINVAL;
  831. if (!param->primary_path)
  832. return -EINVAL;
  833. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  834. return -EINVAL;
  835. if (param->private_data &&
  836. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  837. return -EINVAL;
  838. if (param->alternate_path &&
  839. (param->alternate_path->pkey != param->primary_path->pkey ||
  840. param->alternate_path->mtu != param->primary_path->mtu))
  841. return -EINVAL;
  842. return 0;
  843. }
  844. int ib_send_cm_req(struct ib_cm_id *cm_id,
  845. struct ib_cm_req_param *param)
  846. {
  847. struct cm_id_private *cm_id_priv;
  848. struct cm_req_msg *req_msg;
  849. unsigned long flags;
  850. int ret;
  851. ret = cm_validate_req_param(param);
  852. if (ret)
  853. return ret;
  854. /* Verify that we're not in timewait. */
  855. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  856. spin_lock_irqsave(&cm_id_priv->lock, flags);
  857. if (cm_id->state != IB_CM_IDLE) {
  858. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  859. ret = -EINVAL;
  860. goto out;
  861. }
  862. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  863. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  864. id.local_id);
  865. if (IS_ERR(cm_id_priv->timewait_info)) {
  866. ret = PTR_ERR(cm_id_priv->timewait_info);
  867. goto out;
  868. }
  869. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  870. if (ret)
  871. goto error1;
  872. if (param->alternate_path) {
  873. ret = cm_init_av_by_path(param->alternate_path,
  874. &cm_id_priv->alt_av);
  875. if (ret)
  876. goto error1;
  877. }
  878. cm_id->service_id = param->service_id;
  879. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  880. cm_id_priv->timeout_ms = cm_convert_to_ms(
  881. param->primary_path->packet_life_time) * 2 +
  882. cm_convert_to_ms(
  883. param->remote_cm_response_timeout);
  884. cm_id_priv->max_cm_retries = param->max_cm_retries;
  885. cm_id_priv->initiator_depth = param->initiator_depth;
  886. cm_id_priv->responder_resources = param->responder_resources;
  887. cm_id_priv->retry_count = param->retry_count;
  888. cm_id_priv->path_mtu = param->primary_path->mtu;
  889. cm_id_priv->qp_type = param->qp_type;
  890. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  891. if (ret)
  892. goto error1;
  893. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  894. cm_format_req(req_msg, cm_id_priv, param);
  895. cm_id_priv->tid = req_msg->hdr.tid;
  896. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  897. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  898. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  899. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  900. cm_id_priv->local_ack_timeout =
  901. cm_req_get_primary_local_ack_timeout(req_msg);
  902. spin_lock_irqsave(&cm_id_priv->lock, flags);
  903. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  904. if (ret) {
  905. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  906. goto error2;
  907. }
  908. BUG_ON(cm_id->state != IB_CM_IDLE);
  909. cm_id->state = IB_CM_REQ_SENT;
  910. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  911. return 0;
  912. error2: cm_free_msg(cm_id_priv->msg);
  913. error1: kfree(cm_id_priv->timewait_info);
  914. out: return ret;
  915. }
  916. EXPORT_SYMBOL(ib_send_cm_req);
  917. static int cm_issue_rej(struct cm_port *port,
  918. struct ib_mad_recv_wc *mad_recv_wc,
  919. enum ib_cm_rej_reason reason,
  920. enum cm_msg_response msg_rejected,
  921. void *ari, u8 ari_length)
  922. {
  923. struct ib_mad_send_buf *msg = NULL;
  924. struct cm_rej_msg *rej_msg, *rcv_msg;
  925. int ret;
  926. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  927. if (ret)
  928. return ret;
  929. /* We just need common CM header information. Cast to any message. */
  930. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  931. rej_msg = (struct cm_rej_msg *) msg->mad;
  932. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  933. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  934. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  935. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  936. rej_msg->reason = cpu_to_be16(reason);
  937. if (ari && ari_length) {
  938. cm_rej_set_reject_info_len(rej_msg, ari_length);
  939. memcpy(rej_msg->ari, ari, ari_length);
  940. }
  941. ret = ib_post_send_mad(msg, NULL);
  942. if (ret)
  943. cm_free_msg(msg);
  944. return ret;
  945. }
  946. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  947. __be32 local_qpn, __be32 remote_qpn)
  948. {
  949. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  950. ((local_ca_guid == remote_ca_guid) &&
  951. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  952. }
  953. static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  954. struct ib_sa_path_rec *primary_path,
  955. struct ib_sa_path_rec *alt_path)
  956. {
  957. memset(primary_path, 0, sizeof *primary_path);
  958. primary_path->dgid = req_msg->primary_local_gid;
  959. primary_path->sgid = req_msg->primary_remote_gid;
  960. primary_path->dlid = req_msg->primary_local_lid;
  961. primary_path->slid = req_msg->primary_remote_lid;
  962. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  963. primary_path->hop_limit = req_msg->primary_hop_limit;
  964. primary_path->traffic_class = req_msg->primary_traffic_class;
  965. primary_path->reversible = 1;
  966. primary_path->pkey = req_msg->pkey;
  967. primary_path->sl = cm_req_get_primary_sl(req_msg);
  968. primary_path->mtu_selector = IB_SA_EQ;
  969. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  970. primary_path->rate_selector = IB_SA_EQ;
  971. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  972. primary_path->packet_life_time_selector = IB_SA_EQ;
  973. primary_path->packet_life_time =
  974. cm_req_get_primary_local_ack_timeout(req_msg);
  975. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  976. if (req_msg->alt_local_lid) {
  977. memset(alt_path, 0, sizeof *alt_path);
  978. alt_path->dgid = req_msg->alt_local_gid;
  979. alt_path->sgid = req_msg->alt_remote_gid;
  980. alt_path->dlid = req_msg->alt_local_lid;
  981. alt_path->slid = req_msg->alt_remote_lid;
  982. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  983. alt_path->hop_limit = req_msg->alt_hop_limit;
  984. alt_path->traffic_class = req_msg->alt_traffic_class;
  985. alt_path->reversible = 1;
  986. alt_path->pkey = req_msg->pkey;
  987. alt_path->sl = cm_req_get_alt_sl(req_msg);
  988. alt_path->mtu_selector = IB_SA_EQ;
  989. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  990. alt_path->rate_selector = IB_SA_EQ;
  991. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  992. alt_path->packet_life_time_selector = IB_SA_EQ;
  993. alt_path->packet_life_time =
  994. cm_req_get_alt_local_ack_timeout(req_msg);
  995. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  996. }
  997. }
  998. static void cm_format_req_event(struct cm_work *work,
  999. struct cm_id_private *cm_id_priv,
  1000. struct ib_cm_id *listen_id)
  1001. {
  1002. struct cm_req_msg *req_msg;
  1003. struct ib_cm_req_event_param *param;
  1004. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1005. param = &work->cm_event.param.req_rcvd;
  1006. param->listen_id = listen_id;
  1007. param->port = cm_id_priv->av.port->port_num;
  1008. param->primary_path = &work->path[0];
  1009. if (req_msg->alt_local_lid)
  1010. param->alternate_path = &work->path[1];
  1011. else
  1012. param->alternate_path = NULL;
  1013. param->remote_ca_guid = req_msg->local_ca_guid;
  1014. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  1015. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  1016. param->qp_type = cm_req_get_qp_type(req_msg);
  1017. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  1018. param->responder_resources = cm_req_get_init_depth(req_msg);
  1019. param->initiator_depth = cm_req_get_resp_res(req_msg);
  1020. param->local_cm_response_timeout =
  1021. cm_req_get_remote_resp_timeout(req_msg);
  1022. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  1023. param->remote_cm_response_timeout =
  1024. cm_req_get_local_resp_timeout(req_msg);
  1025. param->retry_count = cm_req_get_retry_count(req_msg);
  1026. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1027. param->srq = cm_req_get_srq(req_msg);
  1028. work->cm_event.private_data = &req_msg->private_data;
  1029. }
  1030. static void cm_process_work(struct cm_id_private *cm_id_priv,
  1031. struct cm_work *work)
  1032. {
  1033. unsigned long flags;
  1034. int ret;
  1035. /* We will typically only have the current event to report. */
  1036. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  1037. cm_free_work(work);
  1038. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  1039. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1040. work = cm_dequeue_work(cm_id_priv);
  1041. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1042. BUG_ON(!work);
  1043. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  1044. &work->cm_event);
  1045. cm_free_work(work);
  1046. }
  1047. cm_deref_id(cm_id_priv);
  1048. if (ret)
  1049. cm_destroy_id(&cm_id_priv->id, ret);
  1050. }
  1051. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  1052. struct cm_id_private *cm_id_priv,
  1053. enum cm_msg_response msg_mraed, u8 service_timeout,
  1054. const void *private_data, u8 private_data_len)
  1055. {
  1056. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  1057. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  1058. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  1059. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1060. cm_mra_set_service_timeout(mra_msg, service_timeout);
  1061. if (private_data && private_data_len)
  1062. memcpy(mra_msg->private_data, private_data, private_data_len);
  1063. }
  1064. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  1065. struct cm_id_private *cm_id_priv,
  1066. enum ib_cm_rej_reason reason,
  1067. void *ari,
  1068. u8 ari_length,
  1069. const void *private_data,
  1070. u8 private_data_len)
  1071. {
  1072. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1073. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1074. switch(cm_id_priv->id.state) {
  1075. case IB_CM_REQ_RCVD:
  1076. rej_msg->local_comm_id = 0;
  1077. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1078. break;
  1079. case IB_CM_MRA_REQ_SENT:
  1080. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1081. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1082. break;
  1083. case IB_CM_REP_RCVD:
  1084. case IB_CM_MRA_REP_SENT:
  1085. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1086. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1087. break;
  1088. default:
  1089. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1090. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1091. break;
  1092. }
  1093. rej_msg->reason = cpu_to_be16(reason);
  1094. if (ari && ari_length) {
  1095. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1096. memcpy(rej_msg->ari, ari, ari_length);
  1097. }
  1098. if (private_data && private_data_len)
  1099. memcpy(rej_msg->private_data, private_data, private_data_len);
  1100. }
  1101. static void cm_dup_req_handler(struct cm_work *work,
  1102. struct cm_id_private *cm_id_priv)
  1103. {
  1104. struct ib_mad_send_buf *msg = NULL;
  1105. unsigned long flags;
  1106. int ret;
  1107. /* Quick state check to discard duplicate REQs. */
  1108. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1109. return;
  1110. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1111. if (ret)
  1112. return;
  1113. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1114. switch (cm_id_priv->id.state) {
  1115. case IB_CM_MRA_REQ_SENT:
  1116. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1117. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1118. cm_id_priv->private_data,
  1119. cm_id_priv->private_data_len);
  1120. break;
  1121. case IB_CM_TIMEWAIT:
  1122. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1123. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1124. break;
  1125. default:
  1126. goto unlock;
  1127. }
  1128. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1129. ret = ib_post_send_mad(msg, NULL);
  1130. if (ret)
  1131. goto free;
  1132. return;
  1133. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1134. free: cm_free_msg(msg);
  1135. }
  1136. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1137. struct cm_id_private *cm_id_priv)
  1138. {
  1139. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1140. struct cm_timewait_info *timewait_info;
  1141. struct cm_req_msg *req_msg;
  1142. unsigned long flags;
  1143. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1144. /* Check for duplicate REQ and stale connections. */
  1145. spin_lock_irqsave(&cm.lock, flags);
  1146. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1147. if (!timewait_info)
  1148. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1149. if (timewait_info) {
  1150. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1151. timewait_info->work.remote_id);
  1152. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1153. spin_unlock_irqrestore(&cm.lock, flags);
  1154. if (cur_cm_id_priv) {
  1155. cm_dup_req_handler(work, cur_cm_id_priv);
  1156. cm_deref_id(cur_cm_id_priv);
  1157. } else
  1158. cm_issue_rej(work->port, work->mad_recv_wc,
  1159. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1160. NULL, 0);
  1161. listen_cm_id_priv = NULL;
  1162. goto out;
  1163. }
  1164. /* Find matching listen request. */
  1165. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1166. req_msg->service_id,
  1167. req_msg->private_data);
  1168. if (!listen_cm_id_priv) {
  1169. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1170. spin_unlock_irqrestore(&cm.lock, flags);
  1171. cm_issue_rej(work->port, work->mad_recv_wc,
  1172. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1173. NULL, 0);
  1174. goto out;
  1175. }
  1176. atomic_inc(&listen_cm_id_priv->refcount);
  1177. atomic_inc(&cm_id_priv->refcount);
  1178. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1179. atomic_inc(&cm_id_priv->work_count);
  1180. spin_unlock_irqrestore(&cm.lock, flags);
  1181. out:
  1182. return listen_cm_id_priv;
  1183. }
  1184. static int cm_req_handler(struct cm_work *work)
  1185. {
  1186. struct ib_cm_id *cm_id;
  1187. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1188. struct cm_req_msg *req_msg;
  1189. int ret;
  1190. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1191. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1192. if (IS_ERR(cm_id))
  1193. return PTR_ERR(cm_id);
  1194. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1195. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1196. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1197. work->mad_recv_wc->recv_buf.grh,
  1198. &cm_id_priv->av);
  1199. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1200. id.local_id);
  1201. if (IS_ERR(cm_id_priv->timewait_info)) {
  1202. ret = PTR_ERR(cm_id_priv->timewait_info);
  1203. goto destroy;
  1204. }
  1205. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1206. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1207. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1208. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1209. if (!listen_cm_id_priv) {
  1210. ret = -EINVAL;
  1211. kfree(cm_id_priv->timewait_info);
  1212. goto destroy;
  1213. }
  1214. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1215. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1216. cm_id_priv->id.service_id = req_msg->service_id;
  1217. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1218. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1219. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1220. if (ret) {
  1221. ib_get_cached_gid(work->port->cm_dev->device,
  1222. work->port->port_num, 0, &work->path[0].sgid);
  1223. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
  1224. &work->path[0].sgid, sizeof work->path[0].sgid,
  1225. NULL, 0);
  1226. goto rejected;
  1227. }
  1228. if (req_msg->alt_local_lid) {
  1229. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1230. if (ret) {
  1231. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
  1232. &work->path[0].sgid,
  1233. sizeof work->path[0].sgid, NULL, 0);
  1234. goto rejected;
  1235. }
  1236. }
  1237. cm_id_priv->tid = req_msg->hdr.tid;
  1238. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1239. cm_req_get_local_resp_timeout(req_msg));
  1240. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1241. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1242. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1243. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1244. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1245. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1246. cm_id_priv->local_ack_timeout =
  1247. cm_req_get_primary_local_ack_timeout(req_msg);
  1248. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1249. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1250. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1251. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1252. cm_process_work(cm_id_priv, work);
  1253. cm_deref_id(listen_cm_id_priv);
  1254. return 0;
  1255. rejected:
  1256. atomic_dec(&cm_id_priv->refcount);
  1257. cm_deref_id(listen_cm_id_priv);
  1258. destroy:
  1259. ib_destroy_cm_id(cm_id);
  1260. return ret;
  1261. }
  1262. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1263. struct cm_id_private *cm_id_priv,
  1264. struct ib_cm_rep_param *param)
  1265. {
  1266. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1267. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1268. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1269. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1270. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1271. rep_msg->resp_resources = param->responder_resources;
  1272. rep_msg->initiator_depth = param->initiator_depth;
  1273. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1274. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1275. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1276. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1277. cm_rep_set_srq(rep_msg, param->srq);
  1278. rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  1279. if (param->private_data && param->private_data_len)
  1280. memcpy(rep_msg->private_data, param->private_data,
  1281. param->private_data_len);
  1282. }
  1283. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1284. struct ib_cm_rep_param *param)
  1285. {
  1286. struct cm_id_private *cm_id_priv;
  1287. struct ib_mad_send_buf *msg;
  1288. struct cm_rep_msg *rep_msg;
  1289. unsigned long flags;
  1290. int ret;
  1291. if (param->private_data &&
  1292. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1293. return -EINVAL;
  1294. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1295. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1296. if (cm_id->state != IB_CM_REQ_RCVD &&
  1297. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1298. ret = -EINVAL;
  1299. goto out;
  1300. }
  1301. ret = cm_alloc_msg(cm_id_priv, &msg);
  1302. if (ret)
  1303. goto out;
  1304. rep_msg = (struct cm_rep_msg *) msg->mad;
  1305. cm_format_rep(rep_msg, cm_id_priv, param);
  1306. msg->timeout_ms = cm_id_priv->timeout_ms;
  1307. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1308. ret = ib_post_send_mad(msg, NULL);
  1309. if (ret) {
  1310. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1311. cm_free_msg(msg);
  1312. return ret;
  1313. }
  1314. cm_id->state = IB_CM_REP_SENT;
  1315. cm_id_priv->msg = msg;
  1316. cm_id_priv->initiator_depth = param->initiator_depth;
  1317. cm_id_priv->responder_resources = param->responder_resources;
  1318. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1319. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1320. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1321. return ret;
  1322. }
  1323. EXPORT_SYMBOL(ib_send_cm_rep);
  1324. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1325. struct cm_id_private *cm_id_priv,
  1326. const void *private_data,
  1327. u8 private_data_len)
  1328. {
  1329. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1330. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1331. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1332. if (private_data && private_data_len)
  1333. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1334. }
  1335. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1336. const void *private_data,
  1337. u8 private_data_len)
  1338. {
  1339. struct cm_id_private *cm_id_priv;
  1340. struct ib_mad_send_buf *msg;
  1341. unsigned long flags;
  1342. void *data;
  1343. int ret;
  1344. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1345. return -EINVAL;
  1346. data = cm_copy_private_data(private_data, private_data_len);
  1347. if (IS_ERR(data))
  1348. return PTR_ERR(data);
  1349. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1350. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1351. if (cm_id->state != IB_CM_REP_RCVD &&
  1352. cm_id->state != IB_CM_MRA_REP_SENT) {
  1353. ret = -EINVAL;
  1354. goto error;
  1355. }
  1356. ret = cm_alloc_msg(cm_id_priv, &msg);
  1357. if (ret)
  1358. goto error;
  1359. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1360. private_data, private_data_len);
  1361. ret = ib_post_send_mad(msg, NULL);
  1362. if (ret) {
  1363. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1364. cm_free_msg(msg);
  1365. kfree(data);
  1366. return ret;
  1367. }
  1368. cm_id->state = IB_CM_ESTABLISHED;
  1369. cm_set_private_data(cm_id_priv, data, private_data_len);
  1370. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1371. return 0;
  1372. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1373. kfree(data);
  1374. return ret;
  1375. }
  1376. EXPORT_SYMBOL(ib_send_cm_rtu);
  1377. static void cm_format_rep_event(struct cm_work *work)
  1378. {
  1379. struct cm_rep_msg *rep_msg;
  1380. struct ib_cm_rep_event_param *param;
  1381. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1382. param = &work->cm_event.param.rep_rcvd;
  1383. param->remote_ca_guid = rep_msg->local_ca_guid;
  1384. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1385. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1386. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1387. param->responder_resources = rep_msg->initiator_depth;
  1388. param->initiator_depth = rep_msg->resp_resources;
  1389. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1390. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1391. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1392. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1393. param->srq = cm_rep_get_srq(rep_msg);
  1394. work->cm_event.private_data = &rep_msg->private_data;
  1395. }
  1396. static void cm_dup_rep_handler(struct cm_work *work)
  1397. {
  1398. struct cm_id_private *cm_id_priv;
  1399. struct cm_rep_msg *rep_msg;
  1400. struct ib_mad_send_buf *msg = NULL;
  1401. unsigned long flags;
  1402. int ret;
  1403. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1404. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1405. rep_msg->local_comm_id);
  1406. if (!cm_id_priv)
  1407. return;
  1408. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1409. if (ret)
  1410. goto deref;
  1411. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1412. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1413. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1414. cm_id_priv->private_data,
  1415. cm_id_priv->private_data_len);
  1416. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1417. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1418. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1419. cm_id_priv->private_data,
  1420. cm_id_priv->private_data_len);
  1421. else
  1422. goto unlock;
  1423. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1424. ret = ib_post_send_mad(msg, NULL);
  1425. if (ret)
  1426. goto free;
  1427. goto deref;
  1428. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1429. free: cm_free_msg(msg);
  1430. deref: cm_deref_id(cm_id_priv);
  1431. }
  1432. static int cm_rep_handler(struct cm_work *work)
  1433. {
  1434. struct cm_id_private *cm_id_priv;
  1435. struct cm_rep_msg *rep_msg;
  1436. unsigned long flags;
  1437. int ret;
  1438. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1439. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1440. if (!cm_id_priv) {
  1441. cm_dup_rep_handler(work);
  1442. return -EINVAL;
  1443. }
  1444. cm_format_rep_event(work);
  1445. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1446. switch (cm_id_priv->id.state) {
  1447. case IB_CM_REQ_SENT:
  1448. case IB_CM_MRA_REQ_RCVD:
  1449. break;
  1450. default:
  1451. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1452. ret = -EINVAL;
  1453. goto error;
  1454. }
  1455. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1456. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1457. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1458. spin_lock(&cm.lock);
  1459. /* Check for duplicate REP. */
  1460. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1461. spin_unlock(&cm.lock);
  1462. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1463. ret = -EINVAL;
  1464. goto error;
  1465. }
  1466. /* Check for a stale connection. */
  1467. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1468. rb_erase(&cm_id_priv->timewait_info->remote_id_node,
  1469. &cm.remote_id_table);
  1470. cm_id_priv->timewait_info->inserted_remote_id = 0;
  1471. spin_unlock(&cm.lock);
  1472. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1473. cm_issue_rej(work->port, work->mad_recv_wc,
  1474. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1475. NULL, 0);
  1476. ret = -EINVAL;
  1477. goto error;
  1478. }
  1479. spin_unlock(&cm.lock);
  1480. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1481. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1482. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1483. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1484. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1485. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1486. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1487. /* todo: handle peer_to_peer */
  1488. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1489. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1490. if (!ret)
  1491. list_add_tail(&work->list, &cm_id_priv->work_list);
  1492. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1493. if (ret)
  1494. cm_process_work(cm_id_priv, work);
  1495. else
  1496. cm_deref_id(cm_id_priv);
  1497. return 0;
  1498. error:
  1499. cm_deref_id(cm_id_priv);
  1500. return ret;
  1501. }
  1502. static int cm_establish_handler(struct cm_work *work)
  1503. {
  1504. struct cm_id_private *cm_id_priv;
  1505. unsigned long flags;
  1506. int ret;
  1507. /* See comment in ib_cm_establish about lookup. */
  1508. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1509. if (!cm_id_priv)
  1510. return -EINVAL;
  1511. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1512. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1513. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1514. goto out;
  1515. }
  1516. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1517. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1518. if (!ret)
  1519. list_add_tail(&work->list, &cm_id_priv->work_list);
  1520. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1521. if (ret)
  1522. cm_process_work(cm_id_priv, work);
  1523. else
  1524. cm_deref_id(cm_id_priv);
  1525. return 0;
  1526. out:
  1527. cm_deref_id(cm_id_priv);
  1528. return -EINVAL;
  1529. }
  1530. static int cm_rtu_handler(struct cm_work *work)
  1531. {
  1532. struct cm_id_private *cm_id_priv;
  1533. struct cm_rtu_msg *rtu_msg;
  1534. unsigned long flags;
  1535. int ret;
  1536. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1537. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1538. rtu_msg->local_comm_id);
  1539. if (!cm_id_priv)
  1540. return -EINVAL;
  1541. work->cm_event.private_data = &rtu_msg->private_data;
  1542. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1543. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1544. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1545. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1546. goto out;
  1547. }
  1548. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1549. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1550. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1551. if (!ret)
  1552. list_add_tail(&work->list, &cm_id_priv->work_list);
  1553. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1554. if (ret)
  1555. cm_process_work(cm_id_priv, work);
  1556. else
  1557. cm_deref_id(cm_id_priv);
  1558. return 0;
  1559. out:
  1560. cm_deref_id(cm_id_priv);
  1561. return -EINVAL;
  1562. }
  1563. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1564. struct cm_id_private *cm_id_priv,
  1565. const void *private_data,
  1566. u8 private_data_len)
  1567. {
  1568. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1569. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1570. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1571. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1572. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1573. if (private_data && private_data_len)
  1574. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1575. }
  1576. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1577. const void *private_data,
  1578. u8 private_data_len)
  1579. {
  1580. struct cm_id_private *cm_id_priv;
  1581. struct ib_mad_send_buf *msg;
  1582. unsigned long flags;
  1583. int ret;
  1584. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1585. return -EINVAL;
  1586. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1587. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1588. if (cm_id->state != IB_CM_ESTABLISHED) {
  1589. ret = -EINVAL;
  1590. goto out;
  1591. }
  1592. ret = cm_alloc_msg(cm_id_priv, &msg);
  1593. if (ret) {
  1594. cm_enter_timewait(cm_id_priv);
  1595. goto out;
  1596. }
  1597. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1598. private_data, private_data_len);
  1599. msg->timeout_ms = cm_id_priv->timeout_ms;
  1600. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1601. ret = ib_post_send_mad(msg, NULL);
  1602. if (ret) {
  1603. cm_enter_timewait(cm_id_priv);
  1604. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1605. cm_free_msg(msg);
  1606. return ret;
  1607. }
  1608. cm_id->state = IB_CM_DREQ_SENT;
  1609. cm_id_priv->msg = msg;
  1610. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1611. return ret;
  1612. }
  1613. EXPORT_SYMBOL(ib_send_cm_dreq);
  1614. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1615. struct cm_id_private *cm_id_priv,
  1616. const void *private_data,
  1617. u8 private_data_len)
  1618. {
  1619. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1620. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1621. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1622. if (private_data && private_data_len)
  1623. memcpy(drep_msg->private_data, private_data, private_data_len);
  1624. }
  1625. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1626. const void *private_data,
  1627. u8 private_data_len)
  1628. {
  1629. struct cm_id_private *cm_id_priv;
  1630. struct ib_mad_send_buf *msg;
  1631. unsigned long flags;
  1632. void *data;
  1633. int ret;
  1634. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1635. return -EINVAL;
  1636. data = cm_copy_private_data(private_data, private_data_len);
  1637. if (IS_ERR(data))
  1638. return PTR_ERR(data);
  1639. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1640. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1641. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1642. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1643. kfree(data);
  1644. return -EINVAL;
  1645. }
  1646. cm_set_private_data(cm_id_priv, data, private_data_len);
  1647. cm_enter_timewait(cm_id_priv);
  1648. ret = cm_alloc_msg(cm_id_priv, &msg);
  1649. if (ret)
  1650. goto out;
  1651. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1652. private_data, private_data_len);
  1653. ret = ib_post_send_mad(msg, NULL);
  1654. if (ret) {
  1655. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1656. cm_free_msg(msg);
  1657. return ret;
  1658. }
  1659. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1660. return ret;
  1661. }
  1662. EXPORT_SYMBOL(ib_send_cm_drep);
  1663. static int cm_issue_drep(struct cm_port *port,
  1664. struct ib_mad_recv_wc *mad_recv_wc)
  1665. {
  1666. struct ib_mad_send_buf *msg = NULL;
  1667. struct cm_dreq_msg *dreq_msg;
  1668. struct cm_drep_msg *drep_msg;
  1669. int ret;
  1670. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  1671. if (ret)
  1672. return ret;
  1673. dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
  1674. drep_msg = (struct cm_drep_msg *) msg->mad;
  1675. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
  1676. drep_msg->remote_comm_id = dreq_msg->local_comm_id;
  1677. drep_msg->local_comm_id = dreq_msg->remote_comm_id;
  1678. ret = ib_post_send_mad(msg, NULL);
  1679. if (ret)
  1680. cm_free_msg(msg);
  1681. return ret;
  1682. }
  1683. static int cm_dreq_handler(struct cm_work *work)
  1684. {
  1685. struct cm_id_private *cm_id_priv;
  1686. struct cm_dreq_msg *dreq_msg;
  1687. struct ib_mad_send_buf *msg = NULL;
  1688. unsigned long flags;
  1689. int ret;
  1690. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1691. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1692. dreq_msg->local_comm_id);
  1693. if (!cm_id_priv) {
  1694. cm_issue_drep(work->port, work->mad_recv_wc);
  1695. return -EINVAL;
  1696. }
  1697. work->cm_event.private_data = &dreq_msg->private_data;
  1698. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1699. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1700. goto unlock;
  1701. switch (cm_id_priv->id.state) {
  1702. case IB_CM_REP_SENT:
  1703. case IB_CM_DREQ_SENT:
  1704. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1705. break;
  1706. case IB_CM_ESTABLISHED:
  1707. case IB_CM_MRA_REP_RCVD:
  1708. break;
  1709. case IB_CM_TIMEWAIT:
  1710. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1711. goto unlock;
  1712. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1713. cm_id_priv->private_data,
  1714. cm_id_priv->private_data_len);
  1715. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1716. if (ib_post_send_mad(msg, NULL))
  1717. cm_free_msg(msg);
  1718. goto deref;
  1719. default:
  1720. goto unlock;
  1721. }
  1722. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1723. cm_id_priv->tid = dreq_msg->hdr.tid;
  1724. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1725. if (!ret)
  1726. list_add_tail(&work->list, &cm_id_priv->work_list);
  1727. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1728. if (ret)
  1729. cm_process_work(cm_id_priv, work);
  1730. else
  1731. cm_deref_id(cm_id_priv);
  1732. return 0;
  1733. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1734. deref: cm_deref_id(cm_id_priv);
  1735. return -EINVAL;
  1736. }
  1737. static int cm_drep_handler(struct cm_work *work)
  1738. {
  1739. struct cm_id_private *cm_id_priv;
  1740. struct cm_drep_msg *drep_msg;
  1741. unsigned long flags;
  1742. int ret;
  1743. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1744. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1745. drep_msg->local_comm_id);
  1746. if (!cm_id_priv)
  1747. return -EINVAL;
  1748. work->cm_event.private_data = &drep_msg->private_data;
  1749. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1750. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1751. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1752. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1753. goto out;
  1754. }
  1755. cm_enter_timewait(cm_id_priv);
  1756. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1757. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1758. if (!ret)
  1759. list_add_tail(&work->list, &cm_id_priv->work_list);
  1760. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1761. if (ret)
  1762. cm_process_work(cm_id_priv, work);
  1763. else
  1764. cm_deref_id(cm_id_priv);
  1765. return 0;
  1766. out:
  1767. cm_deref_id(cm_id_priv);
  1768. return -EINVAL;
  1769. }
  1770. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1771. enum ib_cm_rej_reason reason,
  1772. void *ari,
  1773. u8 ari_length,
  1774. const void *private_data,
  1775. u8 private_data_len)
  1776. {
  1777. struct cm_id_private *cm_id_priv;
  1778. struct ib_mad_send_buf *msg;
  1779. unsigned long flags;
  1780. int ret;
  1781. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1782. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1783. return -EINVAL;
  1784. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1785. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1786. switch (cm_id->state) {
  1787. case IB_CM_REQ_SENT:
  1788. case IB_CM_MRA_REQ_RCVD:
  1789. case IB_CM_REQ_RCVD:
  1790. case IB_CM_MRA_REQ_SENT:
  1791. case IB_CM_REP_RCVD:
  1792. case IB_CM_MRA_REP_SENT:
  1793. ret = cm_alloc_msg(cm_id_priv, &msg);
  1794. if (!ret)
  1795. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1796. cm_id_priv, reason, ari, ari_length,
  1797. private_data, private_data_len);
  1798. cm_reset_to_idle(cm_id_priv);
  1799. break;
  1800. case IB_CM_REP_SENT:
  1801. case IB_CM_MRA_REP_RCVD:
  1802. ret = cm_alloc_msg(cm_id_priv, &msg);
  1803. if (!ret)
  1804. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1805. cm_id_priv, reason, ari, ari_length,
  1806. private_data, private_data_len);
  1807. cm_enter_timewait(cm_id_priv);
  1808. break;
  1809. default:
  1810. ret = -EINVAL;
  1811. goto out;
  1812. }
  1813. if (ret)
  1814. goto out;
  1815. ret = ib_post_send_mad(msg, NULL);
  1816. if (ret)
  1817. cm_free_msg(msg);
  1818. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1819. return ret;
  1820. }
  1821. EXPORT_SYMBOL(ib_send_cm_rej);
  1822. static void cm_format_rej_event(struct cm_work *work)
  1823. {
  1824. struct cm_rej_msg *rej_msg;
  1825. struct ib_cm_rej_event_param *param;
  1826. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1827. param = &work->cm_event.param.rej_rcvd;
  1828. param->ari = rej_msg->ari;
  1829. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1830. param->reason = __be16_to_cpu(rej_msg->reason);
  1831. work->cm_event.private_data = &rej_msg->private_data;
  1832. }
  1833. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1834. {
  1835. struct cm_timewait_info *timewait_info;
  1836. struct cm_id_private *cm_id_priv;
  1837. unsigned long flags;
  1838. __be32 remote_id;
  1839. remote_id = rej_msg->local_comm_id;
  1840. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1841. spin_lock_irqsave(&cm.lock, flags);
  1842. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1843. remote_id);
  1844. if (!timewait_info) {
  1845. spin_unlock_irqrestore(&cm.lock, flags);
  1846. return NULL;
  1847. }
  1848. cm_id_priv = idr_find(&cm.local_id_table, (__force int)
  1849. (timewait_info->work.local_id ^
  1850. cm.random_id_operand));
  1851. if (cm_id_priv) {
  1852. if (cm_id_priv->id.remote_id == remote_id)
  1853. atomic_inc(&cm_id_priv->refcount);
  1854. else
  1855. cm_id_priv = NULL;
  1856. }
  1857. spin_unlock_irqrestore(&cm.lock, flags);
  1858. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1859. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1860. else
  1861. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1862. return cm_id_priv;
  1863. }
  1864. static int cm_rej_handler(struct cm_work *work)
  1865. {
  1866. struct cm_id_private *cm_id_priv;
  1867. struct cm_rej_msg *rej_msg;
  1868. unsigned long flags;
  1869. int ret;
  1870. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1871. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1872. if (!cm_id_priv)
  1873. return -EINVAL;
  1874. cm_format_rej_event(work);
  1875. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1876. switch (cm_id_priv->id.state) {
  1877. case IB_CM_REQ_SENT:
  1878. case IB_CM_MRA_REQ_RCVD:
  1879. case IB_CM_REP_SENT:
  1880. case IB_CM_MRA_REP_RCVD:
  1881. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1882. /* fall through */
  1883. case IB_CM_REQ_RCVD:
  1884. case IB_CM_MRA_REQ_SENT:
  1885. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1886. cm_enter_timewait(cm_id_priv);
  1887. else
  1888. cm_reset_to_idle(cm_id_priv);
  1889. break;
  1890. case IB_CM_DREQ_SENT:
  1891. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1892. /* fall through */
  1893. case IB_CM_REP_RCVD:
  1894. case IB_CM_MRA_REP_SENT:
  1895. case IB_CM_ESTABLISHED:
  1896. cm_enter_timewait(cm_id_priv);
  1897. break;
  1898. default:
  1899. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1900. ret = -EINVAL;
  1901. goto out;
  1902. }
  1903. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1904. if (!ret)
  1905. list_add_tail(&work->list, &cm_id_priv->work_list);
  1906. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1907. if (ret)
  1908. cm_process_work(cm_id_priv, work);
  1909. else
  1910. cm_deref_id(cm_id_priv);
  1911. return 0;
  1912. out:
  1913. cm_deref_id(cm_id_priv);
  1914. return -EINVAL;
  1915. }
  1916. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1917. u8 service_timeout,
  1918. const void *private_data,
  1919. u8 private_data_len)
  1920. {
  1921. struct cm_id_private *cm_id_priv;
  1922. struct ib_mad_send_buf *msg;
  1923. void *data;
  1924. unsigned long flags;
  1925. int ret;
  1926. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1927. return -EINVAL;
  1928. data = cm_copy_private_data(private_data, private_data_len);
  1929. if (IS_ERR(data))
  1930. return PTR_ERR(data);
  1931. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1932. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1933. switch(cm_id_priv->id.state) {
  1934. case IB_CM_REQ_RCVD:
  1935. ret = cm_alloc_msg(cm_id_priv, &msg);
  1936. if (ret)
  1937. goto error1;
  1938. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1939. CM_MSG_RESPONSE_REQ, service_timeout,
  1940. private_data, private_data_len);
  1941. ret = ib_post_send_mad(msg, NULL);
  1942. if (ret)
  1943. goto error2;
  1944. cm_id->state = IB_CM_MRA_REQ_SENT;
  1945. break;
  1946. case IB_CM_REP_RCVD:
  1947. ret = cm_alloc_msg(cm_id_priv, &msg);
  1948. if (ret)
  1949. goto error1;
  1950. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1951. CM_MSG_RESPONSE_REP, service_timeout,
  1952. private_data, private_data_len);
  1953. ret = ib_post_send_mad(msg, NULL);
  1954. if (ret)
  1955. goto error2;
  1956. cm_id->state = IB_CM_MRA_REP_SENT;
  1957. break;
  1958. case IB_CM_ESTABLISHED:
  1959. ret = cm_alloc_msg(cm_id_priv, &msg);
  1960. if (ret)
  1961. goto error1;
  1962. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1963. CM_MSG_RESPONSE_OTHER, service_timeout,
  1964. private_data, private_data_len);
  1965. ret = ib_post_send_mad(msg, NULL);
  1966. if (ret)
  1967. goto error2;
  1968. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1969. break;
  1970. default:
  1971. ret = -EINVAL;
  1972. goto error1;
  1973. }
  1974. cm_id_priv->service_timeout = service_timeout;
  1975. cm_set_private_data(cm_id_priv, data, private_data_len);
  1976. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1977. return 0;
  1978. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1979. kfree(data);
  1980. return ret;
  1981. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1982. kfree(data);
  1983. cm_free_msg(msg);
  1984. return ret;
  1985. }
  1986. EXPORT_SYMBOL(ib_send_cm_mra);
  1987. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1988. {
  1989. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1990. case CM_MSG_RESPONSE_REQ:
  1991. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1992. case CM_MSG_RESPONSE_REP:
  1993. case CM_MSG_RESPONSE_OTHER:
  1994. return cm_acquire_id(mra_msg->remote_comm_id,
  1995. mra_msg->local_comm_id);
  1996. default:
  1997. return NULL;
  1998. }
  1999. }
  2000. static int cm_mra_handler(struct cm_work *work)
  2001. {
  2002. struct cm_id_private *cm_id_priv;
  2003. struct cm_mra_msg *mra_msg;
  2004. unsigned long flags;
  2005. int timeout, ret;
  2006. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  2007. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  2008. if (!cm_id_priv)
  2009. return -EINVAL;
  2010. work->cm_event.private_data = &mra_msg->private_data;
  2011. work->cm_event.param.mra_rcvd.service_timeout =
  2012. cm_mra_get_service_timeout(mra_msg);
  2013. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  2014. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  2015. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2016. switch (cm_id_priv->id.state) {
  2017. case IB_CM_REQ_SENT:
  2018. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  2019. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2020. cm_id_priv->msg, timeout))
  2021. goto out;
  2022. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  2023. break;
  2024. case IB_CM_REP_SENT:
  2025. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  2026. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2027. cm_id_priv->msg, timeout))
  2028. goto out;
  2029. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  2030. break;
  2031. case IB_CM_ESTABLISHED:
  2032. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  2033. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  2034. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2035. cm_id_priv->msg, timeout))
  2036. goto out;
  2037. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  2038. break;
  2039. default:
  2040. goto out;
  2041. }
  2042. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  2043. cm_id_priv->id.state;
  2044. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2045. if (!ret)
  2046. list_add_tail(&work->list, &cm_id_priv->work_list);
  2047. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2048. if (ret)
  2049. cm_process_work(cm_id_priv, work);
  2050. else
  2051. cm_deref_id(cm_id_priv);
  2052. return 0;
  2053. out:
  2054. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2055. cm_deref_id(cm_id_priv);
  2056. return -EINVAL;
  2057. }
  2058. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  2059. struct cm_id_private *cm_id_priv,
  2060. struct ib_sa_path_rec *alternate_path,
  2061. const void *private_data,
  2062. u8 private_data_len)
  2063. {
  2064. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  2065. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  2066. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  2067. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2068. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  2069. /* todo: need remote CM response timeout */
  2070. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  2071. lap_msg->alt_local_lid = alternate_path->slid;
  2072. lap_msg->alt_remote_lid = alternate_path->dlid;
  2073. lap_msg->alt_local_gid = alternate_path->sgid;
  2074. lap_msg->alt_remote_gid = alternate_path->dgid;
  2075. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  2076. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  2077. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  2078. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  2079. cm_lap_set_sl(lap_msg, alternate_path->sl);
  2080. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  2081. cm_lap_set_local_ack_timeout(lap_msg,
  2082. min(31, alternate_path->packet_life_time + 1));
  2083. if (private_data && private_data_len)
  2084. memcpy(lap_msg->private_data, private_data, private_data_len);
  2085. }
  2086. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  2087. struct ib_sa_path_rec *alternate_path,
  2088. const void *private_data,
  2089. u8 private_data_len)
  2090. {
  2091. struct cm_id_private *cm_id_priv;
  2092. struct ib_mad_send_buf *msg;
  2093. unsigned long flags;
  2094. int ret;
  2095. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  2096. return -EINVAL;
  2097. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2098. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2099. if (cm_id->state != IB_CM_ESTABLISHED ||
  2100. cm_id->lap_state != IB_CM_LAP_IDLE) {
  2101. ret = -EINVAL;
  2102. goto out;
  2103. }
  2104. ret = cm_alloc_msg(cm_id_priv, &msg);
  2105. if (ret)
  2106. goto out;
  2107. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2108. alternate_path, private_data, private_data_len);
  2109. msg->timeout_ms = cm_id_priv->timeout_ms;
  2110. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2111. ret = ib_post_send_mad(msg, NULL);
  2112. if (ret) {
  2113. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2114. cm_free_msg(msg);
  2115. return ret;
  2116. }
  2117. cm_id->lap_state = IB_CM_LAP_SENT;
  2118. cm_id_priv->msg = msg;
  2119. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2120. return ret;
  2121. }
  2122. EXPORT_SYMBOL(ib_send_cm_lap);
  2123. static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
  2124. struct cm_lap_msg *lap_msg)
  2125. {
  2126. memset(path, 0, sizeof *path);
  2127. path->dgid = lap_msg->alt_local_gid;
  2128. path->sgid = lap_msg->alt_remote_gid;
  2129. path->dlid = lap_msg->alt_local_lid;
  2130. path->slid = lap_msg->alt_remote_lid;
  2131. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2132. path->hop_limit = lap_msg->alt_hop_limit;
  2133. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2134. path->reversible = 1;
  2135. /* pkey is same as in REQ */
  2136. path->sl = cm_lap_get_sl(lap_msg);
  2137. path->mtu_selector = IB_SA_EQ;
  2138. /* mtu is same as in REQ */
  2139. path->rate_selector = IB_SA_EQ;
  2140. path->rate = cm_lap_get_packet_rate(lap_msg);
  2141. path->packet_life_time_selector = IB_SA_EQ;
  2142. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2143. path->packet_life_time -= (path->packet_life_time > 0);
  2144. }
  2145. static int cm_lap_handler(struct cm_work *work)
  2146. {
  2147. struct cm_id_private *cm_id_priv;
  2148. struct cm_lap_msg *lap_msg;
  2149. struct ib_cm_lap_event_param *param;
  2150. struct ib_mad_send_buf *msg = NULL;
  2151. unsigned long flags;
  2152. int ret;
  2153. /* todo: verify LAP request and send reject APR if invalid. */
  2154. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2155. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2156. lap_msg->local_comm_id);
  2157. if (!cm_id_priv)
  2158. return -EINVAL;
  2159. param = &work->cm_event.param.lap_rcvd;
  2160. param->alternate_path = &work->path[0];
  2161. cm_format_path_from_lap(param->alternate_path, lap_msg);
  2162. work->cm_event.private_data = &lap_msg->private_data;
  2163. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2164. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2165. goto unlock;
  2166. switch (cm_id_priv->id.lap_state) {
  2167. case IB_CM_LAP_IDLE:
  2168. break;
  2169. case IB_CM_MRA_LAP_SENT:
  2170. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2171. goto unlock;
  2172. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2173. CM_MSG_RESPONSE_OTHER,
  2174. cm_id_priv->service_timeout,
  2175. cm_id_priv->private_data,
  2176. cm_id_priv->private_data_len);
  2177. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2178. if (ib_post_send_mad(msg, NULL))
  2179. cm_free_msg(msg);
  2180. goto deref;
  2181. default:
  2182. goto unlock;
  2183. }
  2184. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2185. cm_id_priv->tid = lap_msg->hdr.tid;
  2186. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2187. if (!ret)
  2188. list_add_tail(&work->list, &cm_id_priv->work_list);
  2189. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2190. if (ret)
  2191. cm_process_work(cm_id_priv, work);
  2192. else
  2193. cm_deref_id(cm_id_priv);
  2194. return 0;
  2195. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2196. deref: cm_deref_id(cm_id_priv);
  2197. return -EINVAL;
  2198. }
  2199. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2200. struct cm_id_private *cm_id_priv,
  2201. enum ib_cm_apr_status status,
  2202. void *info,
  2203. u8 info_length,
  2204. const void *private_data,
  2205. u8 private_data_len)
  2206. {
  2207. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2208. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2209. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2210. apr_msg->ap_status = (u8) status;
  2211. if (info && info_length) {
  2212. apr_msg->info_length = info_length;
  2213. memcpy(apr_msg->info, info, info_length);
  2214. }
  2215. if (private_data && private_data_len)
  2216. memcpy(apr_msg->private_data, private_data, private_data_len);
  2217. }
  2218. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2219. enum ib_cm_apr_status status,
  2220. void *info,
  2221. u8 info_length,
  2222. const void *private_data,
  2223. u8 private_data_len)
  2224. {
  2225. struct cm_id_private *cm_id_priv;
  2226. struct ib_mad_send_buf *msg;
  2227. unsigned long flags;
  2228. int ret;
  2229. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2230. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2231. return -EINVAL;
  2232. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2233. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2234. if (cm_id->state != IB_CM_ESTABLISHED ||
  2235. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2236. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2237. ret = -EINVAL;
  2238. goto out;
  2239. }
  2240. ret = cm_alloc_msg(cm_id_priv, &msg);
  2241. if (ret)
  2242. goto out;
  2243. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2244. info, info_length, private_data, private_data_len);
  2245. ret = ib_post_send_mad(msg, NULL);
  2246. if (ret) {
  2247. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2248. cm_free_msg(msg);
  2249. return ret;
  2250. }
  2251. cm_id->lap_state = IB_CM_LAP_IDLE;
  2252. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2253. return ret;
  2254. }
  2255. EXPORT_SYMBOL(ib_send_cm_apr);
  2256. static int cm_apr_handler(struct cm_work *work)
  2257. {
  2258. struct cm_id_private *cm_id_priv;
  2259. struct cm_apr_msg *apr_msg;
  2260. unsigned long flags;
  2261. int ret;
  2262. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2263. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2264. apr_msg->local_comm_id);
  2265. if (!cm_id_priv)
  2266. return -EINVAL; /* Unmatched reply. */
  2267. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2268. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2269. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2270. work->cm_event.private_data = &apr_msg->private_data;
  2271. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2272. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2273. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2274. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2275. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2276. goto out;
  2277. }
  2278. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2279. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2280. cm_id_priv->msg = NULL;
  2281. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2282. if (!ret)
  2283. list_add_tail(&work->list, &cm_id_priv->work_list);
  2284. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2285. if (ret)
  2286. cm_process_work(cm_id_priv, work);
  2287. else
  2288. cm_deref_id(cm_id_priv);
  2289. return 0;
  2290. out:
  2291. cm_deref_id(cm_id_priv);
  2292. return -EINVAL;
  2293. }
  2294. static int cm_timewait_handler(struct cm_work *work)
  2295. {
  2296. struct cm_timewait_info *timewait_info;
  2297. struct cm_id_private *cm_id_priv;
  2298. int ret;
  2299. timewait_info = (struct cm_timewait_info *)work;
  2300. spin_lock_irq(&cm.lock);
  2301. list_del(&timewait_info->list);
  2302. spin_unlock_irq(&cm.lock);
  2303. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2304. timewait_info->work.remote_id);
  2305. if (!cm_id_priv)
  2306. return -EINVAL;
  2307. spin_lock_irq(&cm_id_priv->lock);
  2308. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2309. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2310. spin_unlock_irq(&cm_id_priv->lock);
  2311. goto out;
  2312. }
  2313. cm_id_priv->id.state = IB_CM_IDLE;
  2314. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2315. if (!ret)
  2316. list_add_tail(&work->list, &cm_id_priv->work_list);
  2317. spin_unlock_irq(&cm_id_priv->lock);
  2318. if (ret)
  2319. cm_process_work(cm_id_priv, work);
  2320. else
  2321. cm_deref_id(cm_id_priv);
  2322. return 0;
  2323. out:
  2324. cm_deref_id(cm_id_priv);
  2325. return -EINVAL;
  2326. }
  2327. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2328. struct cm_id_private *cm_id_priv,
  2329. struct ib_cm_sidr_req_param *param)
  2330. {
  2331. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2332. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2333. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2334. sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
  2335. sidr_req_msg->service_id = param->service_id;
  2336. if (param->private_data && param->private_data_len)
  2337. memcpy(sidr_req_msg->private_data, param->private_data,
  2338. param->private_data_len);
  2339. }
  2340. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2341. struct ib_cm_sidr_req_param *param)
  2342. {
  2343. struct cm_id_private *cm_id_priv;
  2344. struct ib_mad_send_buf *msg;
  2345. unsigned long flags;
  2346. int ret;
  2347. if (!param->path || (param->private_data &&
  2348. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2349. return -EINVAL;
  2350. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2351. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2352. if (ret)
  2353. goto out;
  2354. cm_id->service_id = param->service_id;
  2355. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2356. cm_id_priv->timeout_ms = param->timeout_ms;
  2357. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2358. ret = cm_alloc_msg(cm_id_priv, &msg);
  2359. if (ret)
  2360. goto out;
  2361. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2362. param);
  2363. msg->timeout_ms = cm_id_priv->timeout_ms;
  2364. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2365. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2366. if (cm_id->state == IB_CM_IDLE)
  2367. ret = ib_post_send_mad(msg, NULL);
  2368. else
  2369. ret = -EINVAL;
  2370. if (ret) {
  2371. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2372. cm_free_msg(msg);
  2373. goto out;
  2374. }
  2375. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2376. cm_id_priv->msg = msg;
  2377. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2378. out:
  2379. return ret;
  2380. }
  2381. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2382. static void cm_format_sidr_req_event(struct cm_work *work,
  2383. struct ib_cm_id *listen_id)
  2384. {
  2385. struct cm_sidr_req_msg *sidr_req_msg;
  2386. struct ib_cm_sidr_req_event_param *param;
  2387. sidr_req_msg = (struct cm_sidr_req_msg *)
  2388. work->mad_recv_wc->recv_buf.mad;
  2389. param = &work->cm_event.param.sidr_req_rcvd;
  2390. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2391. param->listen_id = listen_id;
  2392. param->port = work->port->port_num;
  2393. work->cm_event.private_data = &sidr_req_msg->private_data;
  2394. }
  2395. static int cm_sidr_req_handler(struct cm_work *work)
  2396. {
  2397. struct ib_cm_id *cm_id;
  2398. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2399. struct cm_sidr_req_msg *sidr_req_msg;
  2400. struct ib_wc *wc;
  2401. unsigned long flags;
  2402. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2403. if (IS_ERR(cm_id))
  2404. return PTR_ERR(cm_id);
  2405. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2406. /* Record SGID/SLID and request ID for lookup. */
  2407. sidr_req_msg = (struct cm_sidr_req_msg *)
  2408. work->mad_recv_wc->recv_buf.mad;
  2409. wc = work->mad_recv_wc->wc;
  2410. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2411. cm_id_priv->av.dgid.global.interface_id = 0;
  2412. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2413. work->mad_recv_wc->recv_buf.grh,
  2414. &cm_id_priv->av);
  2415. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2416. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2417. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2418. atomic_inc(&cm_id_priv->work_count);
  2419. spin_lock_irqsave(&cm.lock, flags);
  2420. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2421. if (cur_cm_id_priv) {
  2422. spin_unlock_irqrestore(&cm.lock, flags);
  2423. goto out; /* Duplicate message. */
  2424. }
  2425. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2426. sidr_req_msg->service_id,
  2427. sidr_req_msg->private_data);
  2428. if (!cur_cm_id_priv) {
  2429. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2430. spin_unlock_irqrestore(&cm.lock, flags);
  2431. /* todo: reply with no match */
  2432. goto out; /* No match. */
  2433. }
  2434. atomic_inc(&cur_cm_id_priv->refcount);
  2435. spin_unlock_irqrestore(&cm.lock, flags);
  2436. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2437. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2438. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2439. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2440. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2441. cm_process_work(cm_id_priv, work);
  2442. cm_deref_id(cur_cm_id_priv);
  2443. return 0;
  2444. out:
  2445. ib_destroy_cm_id(&cm_id_priv->id);
  2446. return -EINVAL;
  2447. }
  2448. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2449. struct cm_id_private *cm_id_priv,
  2450. struct ib_cm_sidr_rep_param *param)
  2451. {
  2452. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2453. cm_id_priv->tid);
  2454. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2455. sidr_rep_msg->status = param->status;
  2456. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2457. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2458. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2459. if (param->info && param->info_length)
  2460. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2461. if (param->private_data && param->private_data_len)
  2462. memcpy(sidr_rep_msg->private_data, param->private_data,
  2463. param->private_data_len);
  2464. }
  2465. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2466. struct ib_cm_sidr_rep_param *param)
  2467. {
  2468. struct cm_id_private *cm_id_priv;
  2469. struct ib_mad_send_buf *msg;
  2470. unsigned long flags;
  2471. int ret;
  2472. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2473. (param->private_data &&
  2474. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2475. return -EINVAL;
  2476. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2477. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2478. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2479. ret = -EINVAL;
  2480. goto error;
  2481. }
  2482. ret = cm_alloc_msg(cm_id_priv, &msg);
  2483. if (ret)
  2484. goto error;
  2485. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2486. param);
  2487. ret = ib_post_send_mad(msg, NULL);
  2488. if (ret) {
  2489. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2490. cm_free_msg(msg);
  2491. return ret;
  2492. }
  2493. cm_id->state = IB_CM_IDLE;
  2494. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2495. spin_lock_irqsave(&cm.lock, flags);
  2496. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2497. spin_unlock_irqrestore(&cm.lock, flags);
  2498. return 0;
  2499. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2500. return ret;
  2501. }
  2502. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2503. static void cm_format_sidr_rep_event(struct cm_work *work)
  2504. {
  2505. struct cm_sidr_rep_msg *sidr_rep_msg;
  2506. struct ib_cm_sidr_rep_event_param *param;
  2507. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2508. work->mad_recv_wc->recv_buf.mad;
  2509. param = &work->cm_event.param.sidr_rep_rcvd;
  2510. param->status = sidr_rep_msg->status;
  2511. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2512. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2513. param->info = &sidr_rep_msg->info;
  2514. param->info_len = sidr_rep_msg->info_length;
  2515. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2516. }
  2517. static int cm_sidr_rep_handler(struct cm_work *work)
  2518. {
  2519. struct cm_sidr_rep_msg *sidr_rep_msg;
  2520. struct cm_id_private *cm_id_priv;
  2521. unsigned long flags;
  2522. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2523. work->mad_recv_wc->recv_buf.mad;
  2524. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2525. if (!cm_id_priv)
  2526. return -EINVAL; /* Unmatched reply. */
  2527. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2528. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2529. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2530. goto out;
  2531. }
  2532. cm_id_priv->id.state = IB_CM_IDLE;
  2533. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2534. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2535. cm_format_sidr_rep_event(work);
  2536. cm_process_work(cm_id_priv, work);
  2537. return 0;
  2538. out:
  2539. cm_deref_id(cm_id_priv);
  2540. return -EINVAL;
  2541. }
  2542. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2543. enum ib_wc_status wc_status)
  2544. {
  2545. struct cm_id_private *cm_id_priv;
  2546. struct ib_cm_event cm_event;
  2547. enum ib_cm_state state;
  2548. unsigned long flags;
  2549. int ret;
  2550. memset(&cm_event, 0, sizeof cm_event);
  2551. cm_id_priv = msg->context[0];
  2552. /* Discard old sends or ones without a response. */
  2553. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2554. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2555. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2556. goto discard;
  2557. switch (state) {
  2558. case IB_CM_REQ_SENT:
  2559. case IB_CM_MRA_REQ_RCVD:
  2560. cm_reset_to_idle(cm_id_priv);
  2561. cm_event.event = IB_CM_REQ_ERROR;
  2562. break;
  2563. case IB_CM_REP_SENT:
  2564. case IB_CM_MRA_REP_RCVD:
  2565. cm_reset_to_idle(cm_id_priv);
  2566. cm_event.event = IB_CM_REP_ERROR;
  2567. break;
  2568. case IB_CM_DREQ_SENT:
  2569. cm_enter_timewait(cm_id_priv);
  2570. cm_event.event = IB_CM_DREQ_ERROR;
  2571. break;
  2572. case IB_CM_SIDR_REQ_SENT:
  2573. cm_id_priv->id.state = IB_CM_IDLE;
  2574. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2575. break;
  2576. default:
  2577. goto discard;
  2578. }
  2579. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2580. cm_event.param.send_status = wc_status;
  2581. /* No other events can occur on the cm_id at this point. */
  2582. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2583. cm_free_msg(msg);
  2584. if (ret)
  2585. ib_destroy_cm_id(&cm_id_priv->id);
  2586. return;
  2587. discard:
  2588. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2589. cm_free_msg(msg);
  2590. }
  2591. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2592. struct ib_mad_send_wc *mad_send_wc)
  2593. {
  2594. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  2595. switch (mad_send_wc->status) {
  2596. case IB_WC_SUCCESS:
  2597. case IB_WC_WR_FLUSH_ERR:
  2598. cm_free_msg(msg);
  2599. break;
  2600. default:
  2601. if (msg->context[0] && msg->context[1])
  2602. cm_process_send_error(msg, mad_send_wc->status);
  2603. else
  2604. cm_free_msg(msg);
  2605. break;
  2606. }
  2607. }
  2608. static void cm_work_handler(void *data)
  2609. {
  2610. struct cm_work *work = data;
  2611. int ret;
  2612. switch (work->cm_event.event) {
  2613. case IB_CM_REQ_RECEIVED:
  2614. ret = cm_req_handler(work);
  2615. break;
  2616. case IB_CM_MRA_RECEIVED:
  2617. ret = cm_mra_handler(work);
  2618. break;
  2619. case IB_CM_REJ_RECEIVED:
  2620. ret = cm_rej_handler(work);
  2621. break;
  2622. case IB_CM_REP_RECEIVED:
  2623. ret = cm_rep_handler(work);
  2624. break;
  2625. case IB_CM_RTU_RECEIVED:
  2626. ret = cm_rtu_handler(work);
  2627. break;
  2628. case IB_CM_USER_ESTABLISHED:
  2629. ret = cm_establish_handler(work);
  2630. break;
  2631. case IB_CM_DREQ_RECEIVED:
  2632. ret = cm_dreq_handler(work);
  2633. break;
  2634. case IB_CM_DREP_RECEIVED:
  2635. ret = cm_drep_handler(work);
  2636. break;
  2637. case IB_CM_SIDR_REQ_RECEIVED:
  2638. ret = cm_sidr_req_handler(work);
  2639. break;
  2640. case IB_CM_SIDR_REP_RECEIVED:
  2641. ret = cm_sidr_rep_handler(work);
  2642. break;
  2643. case IB_CM_LAP_RECEIVED:
  2644. ret = cm_lap_handler(work);
  2645. break;
  2646. case IB_CM_APR_RECEIVED:
  2647. ret = cm_apr_handler(work);
  2648. break;
  2649. case IB_CM_TIMEWAIT_EXIT:
  2650. ret = cm_timewait_handler(work);
  2651. break;
  2652. default:
  2653. ret = -EINVAL;
  2654. break;
  2655. }
  2656. if (ret)
  2657. cm_free_work(work);
  2658. }
  2659. int ib_cm_establish(struct ib_cm_id *cm_id)
  2660. {
  2661. struct cm_id_private *cm_id_priv;
  2662. struct cm_work *work;
  2663. unsigned long flags;
  2664. int ret = 0;
  2665. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2666. if (!work)
  2667. return -ENOMEM;
  2668. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2669. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2670. switch (cm_id->state)
  2671. {
  2672. case IB_CM_REP_SENT:
  2673. case IB_CM_MRA_REP_RCVD:
  2674. cm_id->state = IB_CM_ESTABLISHED;
  2675. break;
  2676. case IB_CM_ESTABLISHED:
  2677. ret = -EISCONN;
  2678. break;
  2679. default:
  2680. ret = -EINVAL;
  2681. break;
  2682. }
  2683. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2684. if (ret) {
  2685. kfree(work);
  2686. goto out;
  2687. }
  2688. /*
  2689. * The CM worker thread may try to destroy the cm_id before it
  2690. * can execute this work item. To prevent potential deadlock,
  2691. * we need to find the cm_id once we're in the context of the
  2692. * worker thread, rather than holding a reference on it.
  2693. */
  2694. INIT_WORK(&work->work, cm_work_handler, work);
  2695. work->local_id = cm_id->local_id;
  2696. work->remote_id = cm_id->remote_id;
  2697. work->mad_recv_wc = NULL;
  2698. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2699. queue_work(cm.wq, &work->work);
  2700. out:
  2701. return ret;
  2702. }
  2703. EXPORT_SYMBOL(ib_cm_establish);
  2704. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2705. struct ib_mad_recv_wc *mad_recv_wc)
  2706. {
  2707. struct cm_work *work;
  2708. enum ib_cm_event_type event;
  2709. int paths = 0;
  2710. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2711. case CM_REQ_ATTR_ID:
  2712. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2713. alt_local_lid != 0);
  2714. event = IB_CM_REQ_RECEIVED;
  2715. break;
  2716. case CM_MRA_ATTR_ID:
  2717. event = IB_CM_MRA_RECEIVED;
  2718. break;
  2719. case CM_REJ_ATTR_ID:
  2720. event = IB_CM_REJ_RECEIVED;
  2721. break;
  2722. case CM_REP_ATTR_ID:
  2723. event = IB_CM_REP_RECEIVED;
  2724. break;
  2725. case CM_RTU_ATTR_ID:
  2726. event = IB_CM_RTU_RECEIVED;
  2727. break;
  2728. case CM_DREQ_ATTR_ID:
  2729. event = IB_CM_DREQ_RECEIVED;
  2730. break;
  2731. case CM_DREP_ATTR_ID:
  2732. event = IB_CM_DREP_RECEIVED;
  2733. break;
  2734. case CM_SIDR_REQ_ATTR_ID:
  2735. event = IB_CM_SIDR_REQ_RECEIVED;
  2736. break;
  2737. case CM_SIDR_REP_ATTR_ID:
  2738. event = IB_CM_SIDR_REP_RECEIVED;
  2739. break;
  2740. case CM_LAP_ATTR_ID:
  2741. paths = 1;
  2742. event = IB_CM_LAP_RECEIVED;
  2743. break;
  2744. case CM_APR_ATTR_ID:
  2745. event = IB_CM_APR_RECEIVED;
  2746. break;
  2747. default:
  2748. ib_free_recv_mad(mad_recv_wc);
  2749. return;
  2750. }
  2751. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2752. GFP_KERNEL);
  2753. if (!work) {
  2754. ib_free_recv_mad(mad_recv_wc);
  2755. return;
  2756. }
  2757. INIT_WORK(&work->work, cm_work_handler, work);
  2758. work->cm_event.event = event;
  2759. work->mad_recv_wc = mad_recv_wc;
  2760. work->port = (struct cm_port *)mad_agent->context;
  2761. queue_work(cm.wq, &work->work);
  2762. }
  2763. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2764. struct ib_qp_attr *qp_attr,
  2765. int *qp_attr_mask)
  2766. {
  2767. unsigned long flags;
  2768. int ret;
  2769. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2770. switch (cm_id_priv->id.state) {
  2771. case IB_CM_REQ_SENT:
  2772. case IB_CM_MRA_REQ_RCVD:
  2773. case IB_CM_REQ_RCVD:
  2774. case IB_CM_MRA_REQ_SENT:
  2775. case IB_CM_REP_RCVD:
  2776. case IB_CM_MRA_REP_SENT:
  2777. case IB_CM_REP_SENT:
  2778. case IB_CM_MRA_REP_RCVD:
  2779. case IB_CM_ESTABLISHED:
  2780. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2781. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2782. qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
  2783. IB_ACCESS_REMOTE_WRITE;
  2784. if (cm_id_priv->responder_resources)
  2785. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
  2786. IB_ACCESS_REMOTE_ATOMIC;
  2787. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2788. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2789. ret = 0;
  2790. break;
  2791. default:
  2792. ret = -EINVAL;
  2793. break;
  2794. }
  2795. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2796. return ret;
  2797. }
  2798. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2799. struct ib_qp_attr *qp_attr,
  2800. int *qp_attr_mask)
  2801. {
  2802. unsigned long flags;
  2803. int ret;
  2804. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2805. switch (cm_id_priv->id.state) {
  2806. case IB_CM_REQ_RCVD:
  2807. case IB_CM_MRA_REQ_SENT:
  2808. case IB_CM_REP_RCVD:
  2809. case IB_CM_MRA_REP_SENT:
  2810. case IB_CM_REP_SENT:
  2811. case IB_CM_MRA_REP_RCVD:
  2812. case IB_CM_ESTABLISHED:
  2813. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2814. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  2815. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2816. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2817. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2818. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2819. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2820. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  2821. IB_QP_MIN_RNR_TIMER;
  2822. qp_attr->max_dest_rd_atomic =
  2823. cm_id_priv->responder_resources;
  2824. qp_attr->min_rnr_timer = 0;
  2825. }
  2826. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2827. *qp_attr_mask |= IB_QP_ALT_PATH;
  2828. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  2829. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2830. }
  2831. ret = 0;
  2832. break;
  2833. default:
  2834. ret = -EINVAL;
  2835. break;
  2836. }
  2837. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2838. return ret;
  2839. }
  2840. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2841. struct ib_qp_attr *qp_attr,
  2842. int *qp_attr_mask)
  2843. {
  2844. unsigned long flags;
  2845. int ret;
  2846. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2847. switch (cm_id_priv->id.state) {
  2848. case IB_CM_REP_RCVD:
  2849. case IB_CM_MRA_REP_SENT:
  2850. case IB_CM_REP_SENT:
  2851. case IB_CM_MRA_REP_RCVD:
  2852. case IB_CM_ESTABLISHED:
  2853. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  2854. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2855. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2856. *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2857. IB_QP_RNR_RETRY |
  2858. IB_QP_MAX_QP_RD_ATOMIC;
  2859. qp_attr->timeout = cm_id_priv->local_ack_timeout;
  2860. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2861. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2862. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  2863. }
  2864. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2865. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2866. qp_attr->path_mig_state = IB_MIG_REARM;
  2867. }
  2868. ret = 0;
  2869. break;
  2870. default:
  2871. ret = -EINVAL;
  2872. break;
  2873. }
  2874. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2875. return ret;
  2876. }
  2877. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2878. struct ib_qp_attr *qp_attr,
  2879. int *qp_attr_mask)
  2880. {
  2881. struct cm_id_private *cm_id_priv;
  2882. int ret;
  2883. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2884. switch (qp_attr->qp_state) {
  2885. case IB_QPS_INIT:
  2886. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2887. break;
  2888. case IB_QPS_RTR:
  2889. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2890. break;
  2891. case IB_QPS_RTS:
  2892. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2893. break;
  2894. default:
  2895. ret = -EINVAL;
  2896. break;
  2897. }
  2898. return ret;
  2899. }
  2900. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2901. static void cm_add_one(struct ib_device *device)
  2902. {
  2903. struct cm_device *cm_dev;
  2904. struct cm_port *port;
  2905. struct ib_mad_reg_req reg_req = {
  2906. .mgmt_class = IB_MGMT_CLASS_CM,
  2907. .mgmt_class_version = IB_CM_CLASS_VERSION
  2908. };
  2909. struct ib_port_modify port_modify = {
  2910. .set_port_cap_mask = IB_PORT_CM_SUP
  2911. };
  2912. unsigned long flags;
  2913. int ret;
  2914. u8 i;
  2915. if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
  2916. return;
  2917. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2918. device->phys_port_cnt, GFP_KERNEL);
  2919. if (!cm_dev)
  2920. return;
  2921. cm_dev->device = device;
  2922. cm_dev->ca_guid = device->node_guid;
  2923. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2924. for (i = 1; i <= device->phys_port_cnt; i++) {
  2925. port = &cm_dev->port[i-1];
  2926. port->cm_dev = cm_dev;
  2927. port->port_num = i;
  2928. port->mad_agent = ib_register_mad_agent(device, i,
  2929. IB_QPT_GSI,
  2930. &reg_req,
  2931. 0,
  2932. cm_send_handler,
  2933. cm_recv_handler,
  2934. port);
  2935. if (IS_ERR(port->mad_agent))
  2936. goto error1;
  2937. ret = ib_modify_port(device, i, 0, &port_modify);
  2938. if (ret)
  2939. goto error2;
  2940. }
  2941. ib_set_client_data(device, &cm_client, cm_dev);
  2942. write_lock_irqsave(&cm.device_lock, flags);
  2943. list_add_tail(&cm_dev->list, &cm.device_list);
  2944. write_unlock_irqrestore(&cm.device_lock, flags);
  2945. return;
  2946. error2:
  2947. ib_unregister_mad_agent(port->mad_agent);
  2948. error1:
  2949. port_modify.set_port_cap_mask = 0;
  2950. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  2951. while (--i) {
  2952. port = &cm_dev->port[i-1];
  2953. ib_modify_port(device, port->port_num, 0, &port_modify);
  2954. ib_unregister_mad_agent(port->mad_agent);
  2955. }
  2956. kfree(cm_dev);
  2957. }
  2958. static void cm_remove_one(struct ib_device *device)
  2959. {
  2960. struct cm_device *cm_dev;
  2961. struct cm_port *port;
  2962. struct ib_port_modify port_modify = {
  2963. .clr_port_cap_mask = IB_PORT_CM_SUP
  2964. };
  2965. unsigned long flags;
  2966. int i;
  2967. cm_dev = ib_get_client_data(device, &cm_client);
  2968. if (!cm_dev)
  2969. return;
  2970. write_lock_irqsave(&cm.device_lock, flags);
  2971. list_del(&cm_dev->list);
  2972. write_unlock_irqrestore(&cm.device_lock, flags);
  2973. for (i = 1; i <= device->phys_port_cnt; i++) {
  2974. port = &cm_dev->port[i-1];
  2975. ib_modify_port(device, port->port_num, 0, &port_modify);
  2976. ib_unregister_mad_agent(port->mad_agent);
  2977. }
  2978. kfree(cm_dev);
  2979. }
  2980. static int __init ib_cm_init(void)
  2981. {
  2982. int ret;
  2983. memset(&cm, 0, sizeof cm);
  2984. INIT_LIST_HEAD(&cm.device_list);
  2985. rwlock_init(&cm.device_lock);
  2986. spin_lock_init(&cm.lock);
  2987. cm.listen_service_table = RB_ROOT;
  2988. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  2989. cm.remote_id_table = RB_ROOT;
  2990. cm.remote_qp_table = RB_ROOT;
  2991. cm.remote_sidr_table = RB_ROOT;
  2992. idr_init(&cm.local_id_table);
  2993. get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
  2994. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  2995. INIT_LIST_HEAD(&cm.timewait_list);
  2996. cm.wq = create_workqueue("ib_cm");
  2997. if (!cm.wq)
  2998. return -ENOMEM;
  2999. ret = ib_register_client(&cm_client);
  3000. if (ret)
  3001. goto error;
  3002. return 0;
  3003. error:
  3004. destroy_workqueue(cm.wq);
  3005. return ret;
  3006. }
  3007. static void __exit ib_cm_cleanup(void)
  3008. {
  3009. struct cm_timewait_info *timewait_info, *tmp;
  3010. spin_lock_irq(&cm.lock);
  3011. list_for_each_entry(timewait_info, &cm.timewait_list, list)
  3012. cancel_delayed_work(&timewait_info->work.work);
  3013. spin_unlock_irq(&cm.lock);
  3014. destroy_workqueue(cm.wq);
  3015. list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
  3016. list_del(&timewait_info->list);
  3017. kfree(timewait_info);
  3018. }
  3019. ib_unregister_client(&cm_client);
  3020. idr_destroy(&cm.local_id_table);
  3021. }
  3022. module_init(ib_cm_init);
  3023. module_exit(ib_cm_cleanup);