cma.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663
  1. /*
  2. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  3. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/completion.h>
  36. #include <linux/in.h>
  37. #include <linux/in6.h>
  38. #include <linux/mutex.h>
  39. #include <linux/random.h>
  40. #include <linux/idr.h>
  41. #include <linux/inetdevice.h>
  42. #include <linux/slab.h>
  43. #include <linux/module.h>
  44. #include <net/route.h>
  45. #include <net/tcp.h>
  46. #include <net/ipv6.h>
  47. #include <rdma/rdma_cm.h>
  48. #include <rdma/rdma_cm_ib.h>
  49. #include <rdma/rdma_netlink.h>
  50. #include <rdma/ib.h>
  51. #include <rdma/ib_cache.h>
  52. #include <rdma/ib_cm.h>
  53. #include <rdma/ib_sa.h>
  54. #include <rdma/iw_cm.h>
  55. MODULE_AUTHOR("Sean Hefty");
  56. MODULE_DESCRIPTION("Generic RDMA CM Agent");
  57. MODULE_LICENSE("Dual BSD/GPL");
  58. #define CMA_CM_RESPONSE_TIMEOUT 20
  59. #define CMA_MAX_CM_RETRIES 15
  60. #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  61. #define CMA_IBOE_PACKET_LIFETIME 18
  62. static void cma_add_one(struct ib_device *device);
  63. static void cma_remove_one(struct ib_device *device);
  64. static struct ib_client cma_client = {
  65. .name = "cma",
  66. .add = cma_add_one,
  67. .remove = cma_remove_one
  68. };
  69. static struct ib_sa_client sa_client;
  70. static struct rdma_addr_client addr_client;
  71. static LIST_HEAD(dev_list);
  72. static LIST_HEAD(listen_any_list);
  73. static DEFINE_MUTEX(lock);
  74. static struct workqueue_struct *cma_wq;
  75. static DEFINE_IDR(tcp_ps);
  76. static DEFINE_IDR(udp_ps);
  77. static DEFINE_IDR(ipoib_ps);
  78. static DEFINE_IDR(ib_ps);
  79. struct cma_device {
  80. struct list_head list;
  81. struct ib_device *device;
  82. struct completion comp;
  83. atomic_t refcount;
  84. struct list_head id_list;
  85. };
  86. struct rdma_bind_list {
  87. struct idr *ps;
  88. struct hlist_head owners;
  89. unsigned short port;
  90. };
  91. enum {
  92. CMA_OPTION_AFONLY,
  93. };
  94. /*
  95. * Device removal can occur at anytime, so we need extra handling to
  96. * serialize notifying the user of device removal with other callbacks.
  97. * We do this by disabling removal notification while a callback is in process,
  98. * and reporting it after the callback completes.
  99. */
  100. struct rdma_id_private {
  101. struct rdma_cm_id id;
  102. struct rdma_bind_list *bind_list;
  103. struct hlist_node node;
  104. struct list_head list; /* listen_any_list or cma_device.list */
  105. struct list_head listen_list; /* per device listens */
  106. struct cma_device *cma_dev;
  107. struct list_head mc_list;
  108. int internal_id;
  109. enum rdma_cm_state state;
  110. spinlock_t lock;
  111. struct mutex qp_mutex;
  112. struct completion comp;
  113. atomic_t refcount;
  114. struct mutex handler_mutex;
  115. int backlog;
  116. int timeout_ms;
  117. struct ib_sa_query *query;
  118. int query_id;
  119. union {
  120. struct ib_cm_id *ib;
  121. struct iw_cm_id *iw;
  122. } cm_id;
  123. u32 seq_num;
  124. u32 qkey;
  125. u32 qp_num;
  126. pid_t owner;
  127. u32 options;
  128. u8 srq;
  129. u8 tos;
  130. u8 reuseaddr;
  131. u8 afonly;
  132. };
  133. struct cma_multicast {
  134. struct rdma_id_private *id_priv;
  135. union {
  136. struct ib_sa_multicast *ib;
  137. } multicast;
  138. struct list_head list;
  139. void *context;
  140. struct sockaddr_storage addr;
  141. struct kref mcref;
  142. };
  143. struct cma_work {
  144. struct work_struct work;
  145. struct rdma_id_private *id;
  146. enum rdma_cm_state old_state;
  147. enum rdma_cm_state new_state;
  148. struct rdma_cm_event event;
  149. };
  150. struct cma_ndev_work {
  151. struct work_struct work;
  152. struct rdma_id_private *id;
  153. struct rdma_cm_event event;
  154. };
  155. struct iboe_mcast_work {
  156. struct work_struct work;
  157. struct rdma_id_private *id;
  158. struct cma_multicast *mc;
  159. };
  160. union cma_ip_addr {
  161. struct in6_addr ip6;
  162. struct {
  163. __be32 pad[3];
  164. __be32 addr;
  165. } ip4;
  166. };
  167. struct cma_hdr {
  168. u8 cma_version;
  169. u8 ip_version; /* IP version: 7:4 */
  170. __be16 port;
  171. union cma_ip_addr src_addr;
  172. union cma_ip_addr dst_addr;
  173. };
  174. #define CMA_VERSION 0x00
  175. static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
  176. {
  177. unsigned long flags;
  178. int ret;
  179. spin_lock_irqsave(&id_priv->lock, flags);
  180. ret = (id_priv->state == comp);
  181. spin_unlock_irqrestore(&id_priv->lock, flags);
  182. return ret;
  183. }
  184. static int cma_comp_exch(struct rdma_id_private *id_priv,
  185. enum rdma_cm_state comp, enum rdma_cm_state exch)
  186. {
  187. unsigned long flags;
  188. int ret;
  189. spin_lock_irqsave(&id_priv->lock, flags);
  190. if ((ret = (id_priv->state == comp)))
  191. id_priv->state = exch;
  192. spin_unlock_irqrestore(&id_priv->lock, flags);
  193. return ret;
  194. }
  195. static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
  196. enum rdma_cm_state exch)
  197. {
  198. unsigned long flags;
  199. enum rdma_cm_state old;
  200. spin_lock_irqsave(&id_priv->lock, flags);
  201. old = id_priv->state;
  202. id_priv->state = exch;
  203. spin_unlock_irqrestore(&id_priv->lock, flags);
  204. return old;
  205. }
  206. static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
  207. {
  208. return hdr->ip_version >> 4;
  209. }
  210. static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
  211. {
  212. hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
  213. }
  214. static void cma_attach_to_dev(struct rdma_id_private *id_priv,
  215. struct cma_device *cma_dev)
  216. {
  217. atomic_inc(&cma_dev->refcount);
  218. id_priv->cma_dev = cma_dev;
  219. id_priv->id.device = cma_dev->device;
  220. id_priv->id.route.addr.dev_addr.transport =
  221. rdma_node_get_transport(cma_dev->device->node_type);
  222. list_add_tail(&id_priv->list, &cma_dev->id_list);
  223. }
  224. static inline void cma_deref_dev(struct cma_device *cma_dev)
  225. {
  226. if (atomic_dec_and_test(&cma_dev->refcount))
  227. complete(&cma_dev->comp);
  228. }
  229. static inline void release_mc(struct kref *kref)
  230. {
  231. struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
  232. kfree(mc->multicast.ib);
  233. kfree(mc);
  234. }
  235. static void cma_release_dev(struct rdma_id_private *id_priv)
  236. {
  237. mutex_lock(&lock);
  238. list_del(&id_priv->list);
  239. cma_deref_dev(id_priv->cma_dev);
  240. id_priv->cma_dev = NULL;
  241. mutex_unlock(&lock);
  242. }
  243. static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
  244. {
  245. return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
  246. }
  247. static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
  248. {
  249. return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
  250. }
  251. static inline unsigned short cma_family(struct rdma_id_private *id_priv)
  252. {
  253. return id_priv->id.route.addr.src_addr.ss_family;
  254. }
  255. static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
  256. {
  257. struct ib_sa_mcmember_rec rec;
  258. int ret = 0;
  259. if (id_priv->qkey) {
  260. if (qkey && id_priv->qkey != qkey)
  261. return -EINVAL;
  262. return 0;
  263. }
  264. if (qkey) {
  265. id_priv->qkey = qkey;
  266. return 0;
  267. }
  268. switch (id_priv->id.ps) {
  269. case RDMA_PS_UDP:
  270. case RDMA_PS_IB:
  271. id_priv->qkey = RDMA_UDP_QKEY;
  272. break;
  273. case RDMA_PS_IPOIB:
  274. ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
  275. ret = ib_sa_get_mcmember_rec(id_priv->id.device,
  276. id_priv->id.port_num, &rec.mgid,
  277. &rec);
  278. if (!ret)
  279. id_priv->qkey = be32_to_cpu(rec.qkey);
  280. break;
  281. default:
  282. break;
  283. }
  284. return ret;
  285. }
  286. static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
  287. {
  288. int i;
  289. int err;
  290. struct ib_port_attr props;
  291. union ib_gid tmp;
  292. err = ib_query_port(device, port_num, &props);
  293. if (err)
  294. return err;
  295. for (i = 0; i < props.gid_tbl_len; ++i) {
  296. err = ib_query_gid(device, port_num, i, &tmp);
  297. if (err)
  298. return err;
  299. if (!memcmp(&tmp, gid, sizeof tmp))
  300. return 0;
  301. }
  302. return -EADDRNOTAVAIL;
  303. }
  304. static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
  305. {
  306. dev_addr->dev_type = ARPHRD_INFINIBAND;
  307. rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
  308. ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
  309. }
  310. static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
  311. {
  312. int ret;
  313. if (addr->sa_family != AF_IB) {
  314. ret = rdma_translate_ip(addr, dev_addr);
  315. } else {
  316. cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
  317. ret = 0;
  318. }
  319. return ret;
  320. }
  321. static int cma_acquire_dev(struct rdma_id_private *id_priv)
  322. {
  323. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  324. struct cma_device *cma_dev;
  325. union ib_gid gid, iboe_gid;
  326. int ret = -ENODEV;
  327. u8 port;
  328. enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
  329. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  330. if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
  331. id_priv->id.ps == RDMA_PS_IPOIB)
  332. return -EINVAL;
  333. mutex_lock(&lock);
  334. iboe_addr_get_sgid(dev_addr, &iboe_gid);
  335. memcpy(&gid, dev_addr->src_dev_addr +
  336. rdma_addr_gid_offset(dev_addr), sizeof gid);
  337. list_for_each_entry(cma_dev, &dev_list, list) {
  338. for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
  339. if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
  340. if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
  341. rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
  342. ret = find_gid_port(cma_dev->device, &iboe_gid, port);
  343. else
  344. ret = find_gid_port(cma_dev->device, &gid, port);
  345. if (!ret) {
  346. id_priv->id.port_num = port;
  347. goto out;
  348. }
  349. }
  350. }
  351. }
  352. out:
  353. if (!ret)
  354. cma_attach_to_dev(id_priv, cma_dev);
  355. mutex_unlock(&lock);
  356. return ret;
  357. }
  358. /*
  359. * Select the source IB device and address to reach the destination IB address.
  360. */
  361. static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
  362. {
  363. struct cma_device *cma_dev, *cur_dev;
  364. struct sockaddr_ib *addr;
  365. union ib_gid gid, sgid, *dgid;
  366. u16 pkey, index;
  367. u8 p;
  368. int i;
  369. cma_dev = NULL;
  370. addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
  371. dgid = (union ib_gid *) &addr->sib_addr;
  372. pkey = ntohs(addr->sib_pkey);
  373. list_for_each_entry(cur_dev, &dev_list, list) {
  374. if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
  375. continue;
  376. for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
  377. if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
  378. continue;
  379. for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
  380. if (!memcmp(&gid, dgid, sizeof(gid))) {
  381. cma_dev = cur_dev;
  382. sgid = gid;
  383. id_priv->id.port_num = p;
  384. goto found;
  385. }
  386. if (!cma_dev && (gid.global.subnet_prefix ==
  387. dgid->global.subnet_prefix)) {
  388. cma_dev = cur_dev;
  389. sgid = gid;
  390. id_priv->id.port_num = p;
  391. }
  392. }
  393. }
  394. }
  395. if (!cma_dev)
  396. return -ENODEV;
  397. found:
  398. cma_attach_to_dev(id_priv, cma_dev);
  399. addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
  400. memcpy(&addr->sib_addr, &sgid, sizeof sgid);
  401. cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
  402. return 0;
  403. }
  404. static void cma_deref_id(struct rdma_id_private *id_priv)
  405. {
  406. if (atomic_dec_and_test(&id_priv->refcount))
  407. complete(&id_priv->comp);
  408. }
  409. static int cma_disable_callback(struct rdma_id_private *id_priv,
  410. enum rdma_cm_state state)
  411. {
  412. mutex_lock(&id_priv->handler_mutex);
  413. if (id_priv->state != state) {
  414. mutex_unlock(&id_priv->handler_mutex);
  415. return -EINVAL;
  416. }
  417. return 0;
  418. }
  419. struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
  420. void *context, enum rdma_port_space ps,
  421. enum ib_qp_type qp_type)
  422. {
  423. struct rdma_id_private *id_priv;
  424. id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
  425. if (!id_priv)
  426. return ERR_PTR(-ENOMEM);
  427. id_priv->owner = task_pid_nr(current);
  428. id_priv->state = RDMA_CM_IDLE;
  429. id_priv->id.context = context;
  430. id_priv->id.event_handler = event_handler;
  431. id_priv->id.ps = ps;
  432. id_priv->id.qp_type = qp_type;
  433. spin_lock_init(&id_priv->lock);
  434. mutex_init(&id_priv->qp_mutex);
  435. init_completion(&id_priv->comp);
  436. atomic_set(&id_priv->refcount, 1);
  437. mutex_init(&id_priv->handler_mutex);
  438. INIT_LIST_HEAD(&id_priv->listen_list);
  439. INIT_LIST_HEAD(&id_priv->mc_list);
  440. get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
  441. return &id_priv->id;
  442. }
  443. EXPORT_SYMBOL(rdma_create_id);
  444. static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  445. {
  446. struct ib_qp_attr qp_attr;
  447. int qp_attr_mask, ret;
  448. qp_attr.qp_state = IB_QPS_INIT;
  449. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  450. if (ret)
  451. return ret;
  452. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  453. if (ret)
  454. return ret;
  455. qp_attr.qp_state = IB_QPS_RTR;
  456. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  457. if (ret)
  458. return ret;
  459. qp_attr.qp_state = IB_QPS_RTS;
  460. qp_attr.sq_psn = 0;
  461. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
  462. return ret;
  463. }
  464. static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  465. {
  466. struct ib_qp_attr qp_attr;
  467. int qp_attr_mask, ret;
  468. qp_attr.qp_state = IB_QPS_INIT;
  469. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  470. if (ret)
  471. return ret;
  472. return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  473. }
  474. int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
  475. struct ib_qp_init_attr *qp_init_attr)
  476. {
  477. struct rdma_id_private *id_priv;
  478. struct ib_qp *qp;
  479. int ret;
  480. id_priv = container_of(id, struct rdma_id_private, id);
  481. if (id->device != pd->device)
  482. return -EINVAL;
  483. qp = ib_create_qp(pd, qp_init_attr);
  484. if (IS_ERR(qp))
  485. return PTR_ERR(qp);
  486. if (id->qp_type == IB_QPT_UD)
  487. ret = cma_init_ud_qp(id_priv, qp);
  488. else
  489. ret = cma_init_conn_qp(id_priv, qp);
  490. if (ret)
  491. goto err;
  492. id->qp = qp;
  493. id_priv->qp_num = qp->qp_num;
  494. id_priv->srq = (qp->srq != NULL);
  495. return 0;
  496. err:
  497. ib_destroy_qp(qp);
  498. return ret;
  499. }
  500. EXPORT_SYMBOL(rdma_create_qp);
  501. void rdma_destroy_qp(struct rdma_cm_id *id)
  502. {
  503. struct rdma_id_private *id_priv;
  504. id_priv = container_of(id, struct rdma_id_private, id);
  505. mutex_lock(&id_priv->qp_mutex);
  506. ib_destroy_qp(id_priv->id.qp);
  507. id_priv->id.qp = NULL;
  508. mutex_unlock(&id_priv->qp_mutex);
  509. }
  510. EXPORT_SYMBOL(rdma_destroy_qp);
  511. static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
  512. struct rdma_conn_param *conn_param)
  513. {
  514. struct ib_qp_attr qp_attr;
  515. int qp_attr_mask, ret;
  516. mutex_lock(&id_priv->qp_mutex);
  517. if (!id_priv->id.qp) {
  518. ret = 0;
  519. goto out;
  520. }
  521. /* Need to update QP attributes from default values. */
  522. qp_attr.qp_state = IB_QPS_INIT;
  523. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  524. if (ret)
  525. goto out;
  526. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  527. if (ret)
  528. goto out;
  529. qp_attr.qp_state = IB_QPS_RTR;
  530. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  531. if (ret)
  532. goto out;
  533. if (conn_param)
  534. qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
  535. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  536. out:
  537. mutex_unlock(&id_priv->qp_mutex);
  538. return ret;
  539. }
  540. static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
  541. struct rdma_conn_param *conn_param)
  542. {
  543. struct ib_qp_attr qp_attr;
  544. int qp_attr_mask, ret;
  545. mutex_lock(&id_priv->qp_mutex);
  546. if (!id_priv->id.qp) {
  547. ret = 0;
  548. goto out;
  549. }
  550. qp_attr.qp_state = IB_QPS_RTS;
  551. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  552. if (ret)
  553. goto out;
  554. if (conn_param)
  555. qp_attr.max_rd_atomic = conn_param->initiator_depth;
  556. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  557. out:
  558. mutex_unlock(&id_priv->qp_mutex);
  559. return ret;
  560. }
  561. static int cma_modify_qp_err(struct rdma_id_private *id_priv)
  562. {
  563. struct ib_qp_attr qp_attr;
  564. int ret;
  565. mutex_lock(&id_priv->qp_mutex);
  566. if (!id_priv->id.qp) {
  567. ret = 0;
  568. goto out;
  569. }
  570. qp_attr.qp_state = IB_QPS_ERR;
  571. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
  572. out:
  573. mutex_unlock(&id_priv->qp_mutex);
  574. return ret;
  575. }
  576. static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
  577. struct ib_qp_attr *qp_attr, int *qp_attr_mask)
  578. {
  579. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  580. int ret;
  581. u16 pkey;
  582. if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
  583. IB_LINK_LAYER_INFINIBAND)
  584. pkey = ib_addr_get_pkey(dev_addr);
  585. else
  586. pkey = 0xffff;
  587. ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
  588. pkey, &qp_attr->pkey_index);
  589. if (ret)
  590. return ret;
  591. qp_attr->port_num = id_priv->id.port_num;
  592. *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
  593. if (id_priv->id.qp_type == IB_QPT_UD) {
  594. ret = cma_set_qkey(id_priv, 0);
  595. if (ret)
  596. return ret;
  597. qp_attr->qkey = id_priv->qkey;
  598. *qp_attr_mask |= IB_QP_QKEY;
  599. } else {
  600. qp_attr->qp_access_flags = 0;
  601. *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
  602. }
  603. return 0;
  604. }
  605. int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
  606. int *qp_attr_mask)
  607. {
  608. struct rdma_id_private *id_priv;
  609. int ret = 0;
  610. id_priv = container_of(id, struct rdma_id_private, id);
  611. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  612. case RDMA_TRANSPORT_IB:
  613. if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
  614. ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
  615. else
  616. ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
  617. qp_attr_mask);
  618. if (qp_attr->qp_state == IB_QPS_RTR)
  619. qp_attr->rq_psn = id_priv->seq_num;
  620. break;
  621. case RDMA_TRANSPORT_IWARP:
  622. if (!id_priv->cm_id.iw) {
  623. qp_attr->qp_access_flags = 0;
  624. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
  625. } else
  626. ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
  627. qp_attr_mask);
  628. break;
  629. default:
  630. ret = -ENOSYS;
  631. break;
  632. }
  633. return ret;
  634. }
  635. EXPORT_SYMBOL(rdma_init_qp_attr);
  636. static inline int cma_zero_addr(struct sockaddr *addr)
  637. {
  638. switch (addr->sa_family) {
  639. case AF_INET:
  640. return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
  641. case AF_INET6:
  642. return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
  643. case AF_IB:
  644. return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
  645. default:
  646. return 0;
  647. }
  648. }
  649. static inline int cma_loopback_addr(struct sockaddr *addr)
  650. {
  651. switch (addr->sa_family) {
  652. case AF_INET:
  653. return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
  654. case AF_INET6:
  655. return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
  656. case AF_IB:
  657. return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
  658. default:
  659. return 0;
  660. }
  661. }
  662. static inline int cma_any_addr(struct sockaddr *addr)
  663. {
  664. return cma_zero_addr(addr) || cma_loopback_addr(addr);
  665. }
  666. static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
  667. {
  668. if (src->sa_family != dst->sa_family)
  669. return -1;
  670. switch (src->sa_family) {
  671. case AF_INET:
  672. return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
  673. ((struct sockaddr_in *) dst)->sin_addr.s_addr;
  674. case AF_INET6:
  675. return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
  676. &((struct sockaddr_in6 *) dst)->sin6_addr);
  677. default:
  678. return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
  679. &((struct sockaddr_ib *) dst)->sib_addr);
  680. }
  681. }
  682. static __be16 cma_port(struct sockaddr *addr)
  683. {
  684. struct sockaddr_ib *sib;
  685. switch (addr->sa_family) {
  686. case AF_INET:
  687. return ((struct sockaddr_in *) addr)->sin_port;
  688. case AF_INET6:
  689. return ((struct sockaddr_in6 *) addr)->sin6_port;
  690. case AF_IB:
  691. sib = (struct sockaddr_ib *) addr;
  692. return htons((u16) (be64_to_cpu(sib->sib_sid) &
  693. be64_to_cpu(sib->sib_sid_mask)));
  694. default:
  695. return 0;
  696. }
  697. }
  698. static inline int cma_any_port(struct sockaddr *addr)
  699. {
  700. return !cma_port(addr);
  701. }
  702. static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  703. struct ib_sa_path_rec *path)
  704. {
  705. struct sockaddr_ib *listen_ib, *ib;
  706. listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
  707. ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
  708. ib->sib_family = listen_ib->sib_family;
  709. ib->sib_pkey = path->pkey;
  710. ib->sib_flowinfo = path->flow_label;
  711. memcpy(&ib->sib_addr, &path->sgid, 16);
  712. ib->sib_sid = listen_ib->sib_sid;
  713. ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
  714. ib->sib_scope_id = listen_ib->sib_scope_id;
  715. ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
  716. ib->sib_family = listen_ib->sib_family;
  717. ib->sib_pkey = path->pkey;
  718. ib->sib_flowinfo = path->flow_label;
  719. memcpy(&ib->sib_addr, &path->dgid, 16);
  720. }
  721. static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  722. struct cma_hdr *hdr)
  723. {
  724. struct sockaddr_in *listen4, *ip4;
  725. listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
  726. ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
  727. ip4->sin_family = listen4->sin_family;
  728. ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
  729. ip4->sin_port = listen4->sin_port;
  730. ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
  731. ip4->sin_family = listen4->sin_family;
  732. ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
  733. ip4->sin_port = hdr->port;
  734. }
  735. static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  736. struct cma_hdr *hdr)
  737. {
  738. struct sockaddr_in6 *listen6, *ip6;
  739. listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
  740. ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
  741. ip6->sin6_family = listen6->sin6_family;
  742. ip6->sin6_addr = hdr->dst_addr.ip6;
  743. ip6->sin6_port = listen6->sin6_port;
  744. ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
  745. ip6->sin6_family = listen6->sin6_family;
  746. ip6->sin6_addr = hdr->src_addr.ip6;
  747. ip6->sin6_port = hdr->port;
  748. }
  749. static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  750. struct ib_cm_event *ib_event)
  751. {
  752. struct cma_hdr *hdr;
  753. if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
  754. (ib_event->event == IB_CM_REQ_RECEIVED)) {
  755. cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
  756. return 0;
  757. }
  758. hdr = ib_event->private_data;
  759. if (hdr->cma_version != CMA_VERSION)
  760. return -EINVAL;
  761. switch (cma_get_ip_ver(hdr)) {
  762. case 4:
  763. cma_save_ip4_info(id, listen_id, hdr);
  764. break;
  765. case 6:
  766. cma_save_ip6_info(id, listen_id, hdr);
  767. break;
  768. default:
  769. return -EINVAL;
  770. }
  771. return 0;
  772. }
  773. static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
  774. {
  775. return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
  776. }
  777. static void cma_cancel_route(struct rdma_id_private *id_priv)
  778. {
  779. switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
  780. case IB_LINK_LAYER_INFINIBAND:
  781. if (id_priv->query)
  782. ib_sa_cancel_query(id_priv->query_id, id_priv->query);
  783. break;
  784. default:
  785. break;
  786. }
  787. }
  788. static void cma_cancel_listens(struct rdma_id_private *id_priv)
  789. {
  790. struct rdma_id_private *dev_id_priv;
  791. /*
  792. * Remove from listen_any_list to prevent added devices from spawning
  793. * additional listen requests.
  794. */
  795. mutex_lock(&lock);
  796. list_del(&id_priv->list);
  797. while (!list_empty(&id_priv->listen_list)) {
  798. dev_id_priv = list_entry(id_priv->listen_list.next,
  799. struct rdma_id_private, listen_list);
  800. /* sync with device removal to avoid duplicate destruction */
  801. list_del_init(&dev_id_priv->list);
  802. list_del(&dev_id_priv->listen_list);
  803. mutex_unlock(&lock);
  804. rdma_destroy_id(&dev_id_priv->id);
  805. mutex_lock(&lock);
  806. }
  807. mutex_unlock(&lock);
  808. }
  809. static void cma_cancel_operation(struct rdma_id_private *id_priv,
  810. enum rdma_cm_state state)
  811. {
  812. switch (state) {
  813. case RDMA_CM_ADDR_QUERY:
  814. rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
  815. break;
  816. case RDMA_CM_ROUTE_QUERY:
  817. cma_cancel_route(id_priv);
  818. break;
  819. case RDMA_CM_LISTEN:
  820. if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
  821. cma_cancel_listens(id_priv);
  822. break;
  823. default:
  824. break;
  825. }
  826. }
  827. static void cma_release_port(struct rdma_id_private *id_priv)
  828. {
  829. struct rdma_bind_list *bind_list = id_priv->bind_list;
  830. if (!bind_list)
  831. return;
  832. mutex_lock(&lock);
  833. hlist_del(&id_priv->node);
  834. if (hlist_empty(&bind_list->owners)) {
  835. idr_remove(bind_list->ps, bind_list->port);
  836. kfree(bind_list);
  837. }
  838. mutex_unlock(&lock);
  839. }
  840. static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
  841. {
  842. struct cma_multicast *mc;
  843. while (!list_empty(&id_priv->mc_list)) {
  844. mc = container_of(id_priv->mc_list.next,
  845. struct cma_multicast, list);
  846. list_del(&mc->list);
  847. switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
  848. case IB_LINK_LAYER_INFINIBAND:
  849. ib_sa_free_multicast(mc->multicast.ib);
  850. kfree(mc);
  851. break;
  852. case IB_LINK_LAYER_ETHERNET:
  853. kref_put(&mc->mcref, release_mc);
  854. break;
  855. default:
  856. break;
  857. }
  858. }
  859. }
  860. void rdma_destroy_id(struct rdma_cm_id *id)
  861. {
  862. struct rdma_id_private *id_priv;
  863. enum rdma_cm_state state;
  864. id_priv = container_of(id, struct rdma_id_private, id);
  865. state = cma_exch(id_priv, RDMA_CM_DESTROYING);
  866. cma_cancel_operation(id_priv, state);
  867. /*
  868. * Wait for any active callback to finish. New callbacks will find
  869. * the id_priv state set to destroying and abort.
  870. */
  871. mutex_lock(&id_priv->handler_mutex);
  872. mutex_unlock(&id_priv->handler_mutex);
  873. if (id_priv->cma_dev) {
  874. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  875. case RDMA_TRANSPORT_IB:
  876. if (id_priv->cm_id.ib)
  877. ib_destroy_cm_id(id_priv->cm_id.ib);
  878. break;
  879. case RDMA_TRANSPORT_IWARP:
  880. if (id_priv->cm_id.iw)
  881. iw_destroy_cm_id(id_priv->cm_id.iw);
  882. break;
  883. default:
  884. break;
  885. }
  886. cma_leave_mc_groups(id_priv);
  887. cma_release_dev(id_priv);
  888. }
  889. cma_release_port(id_priv);
  890. cma_deref_id(id_priv);
  891. wait_for_completion(&id_priv->comp);
  892. if (id_priv->internal_id)
  893. cma_deref_id(id_priv->id.context);
  894. kfree(id_priv->id.route.path_rec);
  895. kfree(id_priv);
  896. }
  897. EXPORT_SYMBOL(rdma_destroy_id);
  898. static int cma_rep_recv(struct rdma_id_private *id_priv)
  899. {
  900. int ret;
  901. ret = cma_modify_qp_rtr(id_priv, NULL);
  902. if (ret)
  903. goto reject;
  904. ret = cma_modify_qp_rts(id_priv, NULL);
  905. if (ret)
  906. goto reject;
  907. ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
  908. if (ret)
  909. goto reject;
  910. return 0;
  911. reject:
  912. cma_modify_qp_err(id_priv);
  913. ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
  914. NULL, 0, NULL, 0);
  915. return ret;
  916. }
  917. static void cma_set_rep_event_data(struct rdma_cm_event *event,
  918. struct ib_cm_rep_event_param *rep_data,
  919. void *private_data)
  920. {
  921. event->param.conn.private_data = private_data;
  922. event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
  923. event->param.conn.responder_resources = rep_data->responder_resources;
  924. event->param.conn.initiator_depth = rep_data->initiator_depth;
  925. event->param.conn.flow_control = rep_data->flow_control;
  926. event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
  927. event->param.conn.srq = rep_data->srq;
  928. event->param.conn.qp_num = rep_data->remote_qpn;
  929. }
  930. static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  931. {
  932. struct rdma_id_private *id_priv = cm_id->context;
  933. struct rdma_cm_event event;
  934. int ret = 0;
  935. if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
  936. cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
  937. (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
  938. cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
  939. return 0;
  940. memset(&event, 0, sizeof event);
  941. switch (ib_event->event) {
  942. case IB_CM_REQ_ERROR:
  943. case IB_CM_REP_ERROR:
  944. event.event = RDMA_CM_EVENT_UNREACHABLE;
  945. event.status = -ETIMEDOUT;
  946. break;
  947. case IB_CM_REP_RECEIVED:
  948. if (id_priv->id.qp) {
  949. event.status = cma_rep_recv(id_priv);
  950. event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
  951. RDMA_CM_EVENT_ESTABLISHED;
  952. } else {
  953. event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
  954. }
  955. cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
  956. ib_event->private_data);
  957. break;
  958. case IB_CM_RTU_RECEIVED:
  959. case IB_CM_USER_ESTABLISHED:
  960. event.event = RDMA_CM_EVENT_ESTABLISHED;
  961. break;
  962. case IB_CM_DREQ_ERROR:
  963. event.status = -ETIMEDOUT; /* fall through */
  964. case IB_CM_DREQ_RECEIVED:
  965. case IB_CM_DREP_RECEIVED:
  966. if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
  967. RDMA_CM_DISCONNECT))
  968. goto out;
  969. event.event = RDMA_CM_EVENT_DISCONNECTED;
  970. break;
  971. case IB_CM_TIMEWAIT_EXIT:
  972. event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
  973. break;
  974. case IB_CM_MRA_RECEIVED:
  975. /* ignore event */
  976. goto out;
  977. case IB_CM_REJ_RECEIVED:
  978. cma_modify_qp_err(id_priv);
  979. event.status = ib_event->param.rej_rcvd.reason;
  980. event.event = RDMA_CM_EVENT_REJECTED;
  981. event.param.conn.private_data = ib_event->private_data;
  982. event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
  983. break;
  984. default:
  985. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  986. ib_event->event);
  987. goto out;
  988. }
  989. ret = id_priv->id.event_handler(&id_priv->id, &event);
  990. if (ret) {
  991. /* Destroy the CM ID by returning a non-zero value. */
  992. id_priv->cm_id.ib = NULL;
  993. cma_exch(id_priv, RDMA_CM_DESTROYING);
  994. mutex_unlock(&id_priv->handler_mutex);
  995. rdma_destroy_id(&id_priv->id);
  996. return ret;
  997. }
  998. out:
  999. mutex_unlock(&id_priv->handler_mutex);
  1000. return ret;
  1001. }
  1002. static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
  1003. struct ib_cm_event *ib_event)
  1004. {
  1005. struct rdma_id_private *id_priv;
  1006. struct rdma_cm_id *id;
  1007. struct rdma_route *rt;
  1008. int ret;
  1009. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  1010. listen_id->ps, ib_event->param.req_rcvd.qp_type);
  1011. if (IS_ERR(id))
  1012. return NULL;
  1013. id_priv = container_of(id, struct rdma_id_private, id);
  1014. if (cma_save_net_info(id, listen_id, ib_event))
  1015. goto err;
  1016. rt = &id->route;
  1017. rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
  1018. rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
  1019. GFP_KERNEL);
  1020. if (!rt->path_rec)
  1021. goto err;
  1022. rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
  1023. if (rt->num_paths == 2)
  1024. rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
  1025. if (cma_any_addr(cma_src_addr(id_priv))) {
  1026. rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
  1027. rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
  1028. ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
  1029. } else {
  1030. ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
  1031. if (ret)
  1032. goto err;
  1033. }
  1034. rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
  1035. id_priv->state = RDMA_CM_CONNECT;
  1036. return id_priv;
  1037. err:
  1038. rdma_destroy_id(id);
  1039. return NULL;
  1040. }
  1041. static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
  1042. struct ib_cm_event *ib_event)
  1043. {
  1044. struct rdma_id_private *id_priv;
  1045. struct rdma_cm_id *id;
  1046. int ret;
  1047. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  1048. listen_id->ps, IB_QPT_UD);
  1049. if (IS_ERR(id))
  1050. return NULL;
  1051. id_priv = container_of(id, struct rdma_id_private, id);
  1052. if (cma_save_net_info(id, listen_id, ib_event))
  1053. goto err;
  1054. if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
  1055. ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
  1056. if (ret)
  1057. goto err;
  1058. }
  1059. id_priv->state = RDMA_CM_CONNECT;
  1060. return id_priv;
  1061. err:
  1062. rdma_destroy_id(id);
  1063. return NULL;
  1064. }
  1065. static void cma_set_req_event_data(struct rdma_cm_event *event,
  1066. struct ib_cm_req_event_param *req_data,
  1067. void *private_data, int offset)
  1068. {
  1069. event->param.conn.private_data = private_data + offset;
  1070. event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
  1071. event->param.conn.responder_resources = req_data->responder_resources;
  1072. event->param.conn.initiator_depth = req_data->initiator_depth;
  1073. event->param.conn.flow_control = req_data->flow_control;
  1074. event->param.conn.retry_count = req_data->retry_count;
  1075. event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
  1076. event->param.conn.srq = req_data->srq;
  1077. event->param.conn.qp_num = req_data->remote_qpn;
  1078. }
  1079. static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
  1080. {
  1081. return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
  1082. (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
  1083. ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
  1084. (id->qp_type == IB_QPT_UD)) ||
  1085. (!id->qp_type));
  1086. }
  1087. static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  1088. {
  1089. struct rdma_id_private *listen_id, *conn_id;
  1090. struct rdma_cm_event event;
  1091. int offset, ret;
  1092. listen_id = cm_id->context;
  1093. if (!cma_check_req_qp_type(&listen_id->id, ib_event))
  1094. return -EINVAL;
  1095. if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
  1096. return -ECONNABORTED;
  1097. memset(&event, 0, sizeof event);
  1098. offset = cma_user_data_offset(listen_id);
  1099. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1100. if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
  1101. conn_id = cma_new_udp_id(&listen_id->id, ib_event);
  1102. event.param.ud.private_data = ib_event->private_data + offset;
  1103. event.param.ud.private_data_len =
  1104. IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
  1105. } else {
  1106. conn_id = cma_new_conn_id(&listen_id->id, ib_event);
  1107. cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
  1108. ib_event->private_data, offset);
  1109. }
  1110. if (!conn_id) {
  1111. ret = -ENOMEM;
  1112. goto err1;
  1113. }
  1114. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1115. ret = cma_acquire_dev(conn_id);
  1116. if (ret)
  1117. goto err2;
  1118. conn_id->cm_id.ib = cm_id;
  1119. cm_id->context = conn_id;
  1120. cm_id->cm_handler = cma_ib_handler;
  1121. /*
  1122. * Protect against the user destroying conn_id from another thread
  1123. * until we're done accessing it.
  1124. */
  1125. atomic_inc(&conn_id->refcount);
  1126. ret = conn_id->id.event_handler(&conn_id->id, &event);
  1127. if (ret)
  1128. goto err3;
  1129. /*
  1130. * Acquire mutex to prevent user executing rdma_destroy_id()
  1131. * while we're accessing the cm_id.
  1132. */
  1133. mutex_lock(&lock);
  1134. if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
  1135. ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
  1136. mutex_unlock(&lock);
  1137. mutex_unlock(&conn_id->handler_mutex);
  1138. mutex_unlock(&listen_id->handler_mutex);
  1139. cma_deref_id(conn_id);
  1140. return 0;
  1141. err3:
  1142. cma_deref_id(conn_id);
  1143. /* Destroy the CM ID by returning a non-zero value. */
  1144. conn_id->cm_id.ib = NULL;
  1145. err2:
  1146. cma_exch(conn_id, RDMA_CM_DESTROYING);
  1147. mutex_unlock(&conn_id->handler_mutex);
  1148. err1:
  1149. mutex_unlock(&listen_id->handler_mutex);
  1150. if (conn_id)
  1151. rdma_destroy_id(&conn_id->id);
  1152. return ret;
  1153. }
  1154. __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
  1155. {
  1156. if (addr->sa_family == AF_IB)
  1157. return ((struct sockaddr_ib *) addr)->sib_sid;
  1158. return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
  1159. }
  1160. EXPORT_SYMBOL(rdma_get_service_id);
  1161. static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
  1162. struct ib_cm_compare_data *compare)
  1163. {
  1164. struct cma_hdr *cma_data, *cma_mask;
  1165. __be32 ip4_addr;
  1166. struct in6_addr ip6_addr;
  1167. memset(compare, 0, sizeof *compare);
  1168. cma_data = (void *) compare->data;
  1169. cma_mask = (void *) compare->mask;
  1170. switch (addr->sa_family) {
  1171. case AF_INET:
  1172. ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
  1173. cma_set_ip_ver(cma_data, 4);
  1174. cma_set_ip_ver(cma_mask, 0xF);
  1175. if (!cma_any_addr(addr)) {
  1176. cma_data->dst_addr.ip4.addr = ip4_addr;
  1177. cma_mask->dst_addr.ip4.addr = htonl(~0);
  1178. }
  1179. break;
  1180. case AF_INET6:
  1181. ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
  1182. cma_set_ip_ver(cma_data, 6);
  1183. cma_set_ip_ver(cma_mask, 0xF);
  1184. if (!cma_any_addr(addr)) {
  1185. cma_data->dst_addr.ip6 = ip6_addr;
  1186. memset(&cma_mask->dst_addr.ip6, 0xFF,
  1187. sizeof cma_mask->dst_addr.ip6);
  1188. }
  1189. break;
  1190. default:
  1191. break;
  1192. }
  1193. }
  1194. static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
  1195. {
  1196. struct rdma_id_private *id_priv = iw_id->context;
  1197. struct rdma_cm_event event;
  1198. int ret = 0;
  1199. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  1200. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  1201. if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
  1202. return 0;
  1203. memset(&event, 0, sizeof event);
  1204. switch (iw_event->event) {
  1205. case IW_CM_EVENT_CLOSE:
  1206. event.event = RDMA_CM_EVENT_DISCONNECTED;
  1207. break;
  1208. case IW_CM_EVENT_CONNECT_REPLY:
  1209. memcpy(cma_src_addr(id_priv), laddr,
  1210. rdma_addr_size(laddr));
  1211. memcpy(cma_dst_addr(id_priv), raddr,
  1212. rdma_addr_size(raddr));
  1213. switch (iw_event->status) {
  1214. case 0:
  1215. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1216. event.param.conn.initiator_depth = iw_event->ird;
  1217. event.param.conn.responder_resources = iw_event->ord;
  1218. break;
  1219. case -ECONNRESET:
  1220. case -ECONNREFUSED:
  1221. event.event = RDMA_CM_EVENT_REJECTED;
  1222. break;
  1223. case -ETIMEDOUT:
  1224. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1225. break;
  1226. default:
  1227. event.event = RDMA_CM_EVENT_CONNECT_ERROR;
  1228. break;
  1229. }
  1230. break;
  1231. case IW_CM_EVENT_ESTABLISHED:
  1232. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1233. event.param.conn.initiator_depth = iw_event->ird;
  1234. event.param.conn.responder_resources = iw_event->ord;
  1235. break;
  1236. default:
  1237. BUG_ON(1);
  1238. }
  1239. event.status = iw_event->status;
  1240. event.param.conn.private_data = iw_event->private_data;
  1241. event.param.conn.private_data_len = iw_event->private_data_len;
  1242. ret = id_priv->id.event_handler(&id_priv->id, &event);
  1243. if (ret) {
  1244. /* Destroy the CM ID by returning a non-zero value. */
  1245. id_priv->cm_id.iw = NULL;
  1246. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1247. mutex_unlock(&id_priv->handler_mutex);
  1248. rdma_destroy_id(&id_priv->id);
  1249. return ret;
  1250. }
  1251. mutex_unlock(&id_priv->handler_mutex);
  1252. return ret;
  1253. }
  1254. static int iw_conn_req_handler(struct iw_cm_id *cm_id,
  1255. struct iw_cm_event *iw_event)
  1256. {
  1257. struct rdma_cm_id *new_cm_id;
  1258. struct rdma_id_private *listen_id, *conn_id;
  1259. struct net_device *dev = NULL;
  1260. struct rdma_cm_event event;
  1261. int ret;
  1262. struct ib_device_attr attr;
  1263. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  1264. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  1265. listen_id = cm_id->context;
  1266. if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
  1267. return -ECONNABORTED;
  1268. /* Create a new RDMA id for the new IW CM ID */
  1269. new_cm_id = rdma_create_id(listen_id->id.event_handler,
  1270. listen_id->id.context,
  1271. RDMA_PS_TCP, IB_QPT_RC);
  1272. if (IS_ERR(new_cm_id)) {
  1273. ret = -ENOMEM;
  1274. goto out;
  1275. }
  1276. conn_id = container_of(new_cm_id, struct rdma_id_private, id);
  1277. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1278. conn_id->state = RDMA_CM_CONNECT;
  1279. ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
  1280. if (ret) {
  1281. mutex_unlock(&conn_id->handler_mutex);
  1282. rdma_destroy_id(new_cm_id);
  1283. goto out;
  1284. }
  1285. ret = cma_acquire_dev(conn_id);
  1286. if (ret) {
  1287. mutex_unlock(&conn_id->handler_mutex);
  1288. rdma_destroy_id(new_cm_id);
  1289. goto out;
  1290. }
  1291. conn_id->cm_id.iw = cm_id;
  1292. cm_id->context = conn_id;
  1293. cm_id->cm_handler = cma_iw_handler;
  1294. memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
  1295. memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
  1296. ret = ib_query_device(conn_id->id.device, &attr);
  1297. if (ret) {
  1298. mutex_unlock(&conn_id->handler_mutex);
  1299. rdma_destroy_id(new_cm_id);
  1300. goto out;
  1301. }
  1302. memset(&event, 0, sizeof event);
  1303. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1304. event.param.conn.private_data = iw_event->private_data;
  1305. event.param.conn.private_data_len = iw_event->private_data_len;
  1306. event.param.conn.initiator_depth = iw_event->ird;
  1307. event.param.conn.responder_resources = iw_event->ord;
  1308. /*
  1309. * Protect against the user destroying conn_id from another thread
  1310. * until we're done accessing it.
  1311. */
  1312. atomic_inc(&conn_id->refcount);
  1313. ret = conn_id->id.event_handler(&conn_id->id, &event);
  1314. if (ret) {
  1315. /* User wants to destroy the CM ID */
  1316. conn_id->cm_id.iw = NULL;
  1317. cma_exch(conn_id, RDMA_CM_DESTROYING);
  1318. mutex_unlock(&conn_id->handler_mutex);
  1319. cma_deref_id(conn_id);
  1320. rdma_destroy_id(&conn_id->id);
  1321. goto out;
  1322. }
  1323. mutex_unlock(&conn_id->handler_mutex);
  1324. cma_deref_id(conn_id);
  1325. out:
  1326. if (dev)
  1327. dev_put(dev);
  1328. mutex_unlock(&listen_id->handler_mutex);
  1329. return ret;
  1330. }
  1331. static int cma_ib_listen(struct rdma_id_private *id_priv)
  1332. {
  1333. struct ib_cm_compare_data compare_data;
  1334. struct sockaddr *addr;
  1335. struct ib_cm_id *id;
  1336. __be64 svc_id;
  1337. int ret;
  1338. id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
  1339. if (IS_ERR(id))
  1340. return PTR_ERR(id);
  1341. id_priv->cm_id.ib = id;
  1342. addr = cma_src_addr(id_priv);
  1343. svc_id = rdma_get_service_id(&id_priv->id, addr);
  1344. if (cma_any_addr(addr) && !id_priv->afonly)
  1345. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
  1346. else {
  1347. cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
  1348. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
  1349. }
  1350. if (ret) {
  1351. ib_destroy_cm_id(id_priv->cm_id.ib);
  1352. id_priv->cm_id.ib = NULL;
  1353. }
  1354. return ret;
  1355. }
  1356. static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
  1357. {
  1358. int ret;
  1359. struct iw_cm_id *id;
  1360. id = iw_create_cm_id(id_priv->id.device,
  1361. iw_conn_req_handler,
  1362. id_priv);
  1363. if (IS_ERR(id))
  1364. return PTR_ERR(id);
  1365. id_priv->cm_id.iw = id;
  1366. memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
  1367. rdma_addr_size(cma_src_addr(id_priv)));
  1368. ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
  1369. if (ret) {
  1370. iw_destroy_cm_id(id_priv->cm_id.iw);
  1371. id_priv->cm_id.iw = NULL;
  1372. }
  1373. return ret;
  1374. }
  1375. static int cma_listen_handler(struct rdma_cm_id *id,
  1376. struct rdma_cm_event *event)
  1377. {
  1378. struct rdma_id_private *id_priv = id->context;
  1379. id->context = id_priv->id.context;
  1380. id->event_handler = id_priv->id.event_handler;
  1381. return id_priv->id.event_handler(id, event);
  1382. }
  1383. static void cma_listen_on_dev(struct rdma_id_private *id_priv,
  1384. struct cma_device *cma_dev)
  1385. {
  1386. struct rdma_id_private *dev_id_priv;
  1387. struct rdma_cm_id *id;
  1388. int ret;
  1389. if (cma_family(id_priv) == AF_IB &&
  1390. rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
  1391. return;
  1392. id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
  1393. id_priv->id.qp_type);
  1394. if (IS_ERR(id))
  1395. return;
  1396. dev_id_priv = container_of(id, struct rdma_id_private, id);
  1397. dev_id_priv->state = RDMA_CM_ADDR_BOUND;
  1398. memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
  1399. rdma_addr_size(cma_src_addr(id_priv)));
  1400. cma_attach_to_dev(dev_id_priv, cma_dev);
  1401. list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
  1402. atomic_inc(&id_priv->refcount);
  1403. dev_id_priv->internal_id = 1;
  1404. dev_id_priv->afonly = id_priv->afonly;
  1405. ret = rdma_listen(id, id_priv->backlog);
  1406. if (ret)
  1407. printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
  1408. "listening on device %s\n", ret, cma_dev->device->name);
  1409. }
  1410. static void cma_listen_on_all(struct rdma_id_private *id_priv)
  1411. {
  1412. struct cma_device *cma_dev;
  1413. mutex_lock(&lock);
  1414. list_add_tail(&id_priv->list, &listen_any_list);
  1415. list_for_each_entry(cma_dev, &dev_list, list)
  1416. cma_listen_on_dev(id_priv, cma_dev);
  1417. mutex_unlock(&lock);
  1418. }
  1419. void rdma_set_service_type(struct rdma_cm_id *id, int tos)
  1420. {
  1421. struct rdma_id_private *id_priv;
  1422. id_priv = container_of(id, struct rdma_id_private, id);
  1423. id_priv->tos = (u8) tos;
  1424. }
  1425. EXPORT_SYMBOL(rdma_set_service_type);
  1426. static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
  1427. void *context)
  1428. {
  1429. struct cma_work *work = context;
  1430. struct rdma_route *route;
  1431. route = &work->id->id.route;
  1432. if (!status) {
  1433. route->num_paths = 1;
  1434. *route->path_rec = *path_rec;
  1435. } else {
  1436. work->old_state = RDMA_CM_ROUTE_QUERY;
  1437. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1438. work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
  1439. work->event.status = status;
  1440. }
  1441. queue_work(cma_wq, &work->work);
  1442. }
  1443. static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
  1444. struct cma_work *work)
  1445. {
  1446. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  1447. struct ib_sa_path_rec path_rec;
  1448. ib_sa_comp_mask comp_mask;
  1449. struct sockaddr_in6 *sin6;
  1450. struct sockaddr_ib *sib;
  1451. memset(&path_rec, 0, sizeof path_rec);
  1452. rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
  1453. rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
  1454. path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  1455. path_rec.numb_path = 1;
  1456. path_rec.reversible = 1;
  1457. path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  1458. comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
  1459. IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
  1460. IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
  1461. switch (cma_family(id_priv)) {
  1462. case AF_INET:
  1463. path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
  1464. comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
  1465. break;
  1466. case AF_INET6:
  1467. sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  1468. path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
  1469. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  1470. break;
  1471. case AF_IB:
  1472. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  1473. path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
  1474. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  1475. break;
  1476. }
  1477. id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
  1478. id_priv->id.port_num, &path_rec,
  1479. comp_mask, timeout_ms,
  1480. GFP_KERNEL, cma_query_handler,
  1481. work, &id_priv->query);
  1482. return (id_priv->query_id < 0) ? id_priv->query_id : 0;
  1483. }
  1484. static void cma_work_handler(struct work_struct *_work)
  1485. {
  1486. struct cma_work *work = container_of(_work, struct cma_work, work);
  1487. struct rdma_id_private *id_priv = work->id;
  1488. int destroy = 0;
  1489. mutex_lock(&id_priv->handler_mutex);
  1490. if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
  1491. goto out;
  1492. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1493. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1494. destroy = 1;
  1495. }
  1496. out:
  1497. mutex_unlock(&id_priv->handler_mutex);
  1498. cma_deref_id(id_priv);
  1499. if (destroy)
  1500. rdma_destroy_id(&id_priv->id);
  1501. kfree(work);
  1502. }
  1503. static void cma_ndev_work_handler(struct work_struct *_work)
  1504. {
  1505. struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
  1506. struct rdma_id_private *id_priv = work->id;
  1507. int destroy = 0;
  1508. mutex_lock(&id_priv->handler_mutex);
  1509. if (id_priv->state == RDMA_CM_DESTROYING ||
  1510. id_priv->state == RDMA_CM_DEVICE_REMOVAL)
  1511. goto out;
  1512. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1513. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1514. destroy = 1;
  1515. }
  1516. out:
  1517. mutex_unlock(&id_priv->handler_mutex);
  1518. cma_deref_id(id_priv);
  1519. if (destroy)
  1520. rdma_destroy_id(&id_priv->id);
  1521. kfree(work);
  1522. }
  1523. static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
  1524. {
  1525. struct rdma_route *route = &id_priv->id.route;
  1526. struct cma_work *work;
  1527. int ret;
  1528. work = kzalloc(sizeof *work, GFP_KERNEL);
  1529. if (!work)
  1530. return -ENOMEM;
  1531. work->id = id_priv;
  1532. INIT_WORK(&work->work, cma_work_handler);
  1533. work->old_state = RDMA_CM_ROUTE_QUERY;
  1534. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1535. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1536. route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
  1537. if (!route->path_rec) {
  1538. ret = -ENOMEM;
  1539. goto err1;
  1540. }
  1541. ret = cma_query_ib_route(id_priv, timeout_ms, work);
  1542. if (ret)
  1543. goto err2;
  1544. return 0;
  1545. err2:
  1546. kfree(route->path_rec);
  1547. route->path_rec = NULL;
  1548. err1:
  1549. kfree(work);
  1550. return ret;
  1551. }
  1552. int rdma_set_ib_paths(struct rdma_cm_id *id,
  1553. struct ib_sa_path_rec *path_rec, int num_paths)
  1554. {
  1555. struct rdma_id_private *id_priv;
  1556. int ret;
  1557. id_priv = container_of(id, struct rdma_id_private, id);
  1558. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  1559. RDMA_CM_ROUTE_RESOLVED))
  1560. return -EINVAL;
  1561. id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
  1562. GFP_KERNEL);
  1563. if (!id->route.path_rec) {
  1564. ret = -ENOMEM;
  1565. goto err;
  1566. }
  1567. id->route.num_paths = num_paths;
  1568. return 0;
  1569. err:
  1570. cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
  1571. return ret;
  1572. }
  1573. EXPORT_SYMBOL(rdma_set_ib_paths);
  1574. static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
  1575. {
  1576. struct cma_work *work;
  1577. work = kzalloc(sizeof *work, GFP_KERNEL);
  1578. if (!work)
  1579. return -ENOMEM;
  1580. work->id = id_priv;
  1581. INIT_WORK(&work->work, cma_work_handler);
  1582. work->old_state = RDMA_CM_ROUTE_QUERY;
  1583. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1584. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1585. queue_work(cma_wq, &work->work);
  1586. return 0;
  1587. }
  1588. static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
  1589. {
  1590. struct rdma_route *route = &id_priv->id.route;
  1591. struct rdma_addr *addr = &route->addr;
  1592. struct cma_work *work;
  1593. int ret;
  1594. struct net_device *ndev = NULL;
  1595. u16 vid;
  1596. work = kzalloc(sizeof *work, GFP_KERNEL);
  1597. if (!work)
  1598. return -ENOMEM;
  1599. work->id = id_priv;
  1600. INIT_WORK(&work->work, cma_work_handler);
  1601. route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
  1602. if (!route->path_rec) {
  1603. ret = -ENOMEM;
  1604. goto err1;
  1605. }
  1606. route->num_paths = 1;
  1607. if (addr->dev_addr.bound_dev_if)
  1608. ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
  1609. if (!ndev) {
  1610. ret = -ENODEV;
  1611. goto err2;
  1612. }
  1613. vid = rdma_vlan_dev_vlan_id(ndev);
  1614. iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
  1615. iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
  1616. route->path_rec->hop_limit = 1;
  1617. route->path_rec->reversible = 1;
  1618. route->path_rec->pkey = cpu_to_be16(0xffff);
  1619. route->path_rec->mtu_selector = IB_SA_EQ;
  1620. route->path_rec->sl = netdev_get_prio_tc_map(
  1621. ndev->priv_flags & IFF_802_1Q_VLAN ?
  1622. vlan_dev_real_dev(ndev) : ndev,
  1623. rt_tos2priority(id_priv->tos));
  1624. route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
  1625. route->path_rec->rate_selector = IB_SA_EQ;
  1626. route->path_rec->rate = iboe_get_rate(ndev);
  1627. dev_put(ndev);
  1628. route->path_rec->packet_life_time_selector = IB_SA_EQ;
  1629. route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
  1630. if (!route->path_rec->mtu) {
  1631. ret = -EINVAL;
  1632. goto err2;
  1633. }
  1634. work->old_state = RDMA_CM_ROUTE_QUERY;
  1635. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1636. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1637. work->event.status = 0;
  1638. queue_work(cma_wq, &work->work);
  1639. return 0;
  1640. err2:
  1641. kfree(route->path_rec);
  1642. route->path_rec = NULL;
  1643. err1:
  1644. kfree(work);
  1645. return ret;
  1646. }
  1647. int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
  1648. {
  1649. struct rdma_id_private *id_priv;
  1650. int ret;
  1651. id_priv = container_of(id, struct rdma_id_private, id);
  1652. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
  1653. return -EINVAL;
  1654. atomic_inc(&id_priv->refcount);
  1655. switch (rdma_node_get_transport(id->device->node_type)) {
  1656. case RDMA_TRANSPORT_IB:
  1657. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  1658. case IB_LINK_LAYER_INFINIBAND:
  1659. ret = cma_resolve_ib_route(id_priv, timeout_ms);
  1660. break;
  1661. case IB_LINK_LAYER_ETHERNET:
  1662. ret = cma_resolve_iboe_route(id_priv);
  1663. break;
  1664. default:
  1665. ret = -ENOSYS;
  1666. }
  1667. break;
  1668. case RDMA_TRANSPORT_IWARP:
  1669. ret = cma_resolve_iw_route(id_priv, timeout_ms);
  1670. break;
  1671. default:
  1672. ret = -ENOSYS;
  1673. break;
  1674. }
  1675. if (ret)
  1676. goto err;
  1677. return 0;
  1678. err:
  1679. cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
  1680. cma_deref_id(id_priv);
  1681. return ret;
  1682. }
  1683. EXPORT_SYMBOL(rdma_resolve_route);
  1684. static void cma_set_loopback(struct sockaddr *addr)
  1685. {
  1686. switch (addr->sa_family) {
  1687. case AF_INET:
  1688. ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  1689. break;
  1690. case AF_INET6:
  1691. ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
  1692. 0, 0, 0, htonl(1));
  1693. break;
  1694. default:
  1695. ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
  1696. 0, 0, 0, htonl(1));
  1697. break;
  1698. }
  1699. }
  1700. static int cma_bind_loopback(struct rdma_id_private *id_priv)
  1701. {
  1702. struct cma_device *cma_dev, *cur_dev;
  1703. struct ib_port_attr port_attr;
  1704. union ib_gid gid;
  1705. u16 pkey;
  1706. int ret;
  1707. u8 p;
  1708. cma_dev = NULL;
  1709. mutex_lock(&lock);
  1710. list_for_each_entry(cur_dev, &dev_list, list) {
  1711. if (cma_family(id_priv) == AF_IB &&
  1712. rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
  1713. continue;
  1714. if (!cma_dev)
  1715. cma_dev = cur_dev;
  1716. for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
  1717. if (!ib_query_port(cur_dev->device, p, &port_attr) &&
  1718. port_attr.state == IB_PORT_ACTIVE) {
  1719. cma_dev = cur_dev;
  1720. goto port_found;
  1721. }
  1722. }
  1723. }
  1724. if (!cma_dev) {
  1725. ret = -ENODEV;
  1726. goto out;
  1727. }
  1728. p = 1;
  1729. port_found:
  1730. ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
  1731. if (ret)
  1732. goto out;
  1733. ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
  1734. if (ret)
  1735. goto out;
  1736. id_priv->id.route.addr.dev_addr.dev_type =
  1737. (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
  1738. ARPHRD_INFINIBAND : ARPHRD_ETHER;
  1739. rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1740. ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
  1741. id_priv->id.port_num = p;
  1742. cma_attach_to_dev(id_priv, cma_dev);
  1743. cma_set_loopback(cma_src_addr(id_priv));
  1744. out:
  1745. mutex_unlock(&lock);
  1746. return ret;
  1747. }
  1748. static void addr_handler(int status, struct sockaddr *src_addr,
  1749. struct rdma_dev_addr *dev_addr, void *context)
  1750. {
  1751. struct rdma_id_private *id_priv = context;
  1752. struct rdma_cm_event event;
  1753. memset(&event, 0, sizeof event);
  1754. mutex_lock(&id_priv->handler_mutex);
  1755. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
  1756. RDMA_CM_ADDR_RESOLVED))
  1757. goto out;
  1758. if (!status && !id_priv->cma_dev)
  1759. status = cma_acquire_dev(id_priv);
  1760. if (status) {
  1761. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  1762. RDMA_CM_ADDR_BOUND))
  1763. goto out;
  1764. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  1765. event.status = status;
  1766. } else {
  1767. memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
  1768. event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1769. }
  1770. if (id_priv->id.event_handler(&id_priv->id, &event)) {
  1771. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1772. mutex_unlock(&id_priv->handler_mutex);
  1773. cma_deref_id(id_priv);
  1774. rdma_destroy_id(&id_priv->id);
  1775. return;
  1776. }
  1777. out:
  1778. mutex_unlock(&id_priv->handler_mutex);
  1779. cma_deref_id(id_priv);
  1780. }
  1781. static int cma_resolve_loopback(struct rdma_id_private *id_priv)
  1782. {
  1783. struct cma_work *work;
  1784. union ib_gid gid;
  1785. int ret;
  1786. work = kzalloc(sizeof *work, GFP_KERNEL);
  1787. if (!work)
  1788. return -ENOMEM;
  1789. if (!id_priv->cma_dev) {
  1790. ret = cma_bind_loopback(id_priv);
  1791. if (ret)
  1792. goto err;
  1793. }
  1794. rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1795. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
  1796. work->id = id_priv;
  1797. INIT_WORK(&work->work, cma_work_handler);
  1798. work->old_state = RDMA_CM_ADDR_QUERY;
  1799. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1800. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1801. queue_work(cma_wq, &work->work);
  1802. return 0;
  1803. err:
  1804. kfree(work);
  1805. return ret;
  1806. }
  1807. static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
  1808. {
  1809. struct cma_work *work;
  1810. int ret;
  1811. work = kzalloc(sizeof *work, GFP_KERNEL);
  1812. if (!work)
  1813. return -ENOMEM;
  1814. if (!id_priv->cma_dev) {
  1815. ret = cma_resolve_ib_dev(id_priv);
  1816. if (ret)
  1817. goto err;
  1818. }
  1819. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
  1820. &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
  1821. work->id = id_priv;
  1822. INIT_WORK(&work->work, cma_work_handler);
  1823. work->old_state = RDMA_CM_ADDR_QUERY;
  1824. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1825. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1826. queue_work(cma_wq, &work->work);
  1827. return 0;
  1828. err:
  1829. kfree(work);
  1830. return ret;
  1831. }
  1832. static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1833. struct sockaddr *dst_addr)
  1834. {
  1835. if (!src_addr || !src_addr->sa_family) {
  1836. src_addr = (struct sockaddr *) &id->route.addr.src_addr;
  1837. src_addr->sa_family = dst_addr->sa_family;
  1838. if (dst_addr->sa_family == AF_INET6) {
  1839. ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
  1840. ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
  1841. } else if (dst_addr->sa_family == AF_IB) {
  1842. ((struct sockaddr_ib *) src_addr)->sib_pkey =
  1843. ((struct sockaddr_ib *) dst_addr)->sib_pkey;
  1844. }
  1845. }
  1846. return rdma_bind_addr(id, src_addr);
  1847. }
  1848. int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1849. struct sockaddr *dst_addr, int timeout_ms)
  1850. {
  1851. struct rdma_id_private *id_priv;
  1852. int ret;
  1853. id_priv = container_of(id, struct rdma_id_private, id);
  1854. if (id_priv->state == RDMA_CM_IDLE) {
  1855. ret = cma_bind_addr(id, src_addr, dst_addr);
  1856. if (ret)
  1857. return ret;
  1858. }
  1859. if (cma_family(id_priv) != dst_addr->sa_family)
  1860. return -EINVAL;
  1861. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
  1862. return -EINVAL;
  1863. atomic_inc(&id_priv->refcount);
  1864. memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
  1865. if (cma_any_addr(dst_addr)) {
  1866. ret = cma_resolve_loopback(id_priv);
  1867. } else {
  1868. if (dst_addr->sa_family == AF_IB) {
  1869. ret = cma_resolve_ib_addr(id_priv);
  1870. } else {
  1871. ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
  1872. dst_addr, &id->route.addr.dev_addr,
  1873. timeout_ms, addr_handler, id_priv);
  1874. }
  1875. }
  1876. if (ret)
  1877. goto err;
  1878. return 0;
  1879. err:
  1880. cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
  1881. cma_deref_id(id_priv);
  1882. return ret;
  1883. }
  1884. EXPORT_SYMBOL(rdma_resolve_addr);
  1885. int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
  1886. {
  1887. struct rdma_id_private *id_priv;
  1888. unsigned long flags;
  1889. int ret;
  1890. id_priv = container_of(id, struct rdma_id_private, id);
  1891. spin_lock_irqsave(&id_priv->lock, flags);
  1892. if (reuse || id_priv->state == RDMA_CM_IDLE) {
  1893. id_priv->reuseaddr = reuse;
  1894. ret = 0;
  1895. } else {
  1896. ret = -EINVAL;
  1897. }
  1898. spin_unlock_irqrestore(&id_priv->lock, flags);
  1899. return ret;
  1900. }
  1901. EXPORT_SYMBOL(rdma_set_reuseaddr);
  1902. int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
  1903. {
  1904. struct rdma_id_private *id_priv;
  1905. unsigned long flags;
  1906. int ret;
  1907. id_priv = container_of(id, struct rdma_id_private, id);
  1908. spin_lock_irqsave(&id_priv->lock, flags);
  1909. if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
  1910. id_priv->options |= (1 << CMA_OPTION_AFONLY);
  1911. id_priv->afonly = afonly;
  1912. ret = 0;
  1913. } else {
  1914. ret = -EINVAL;
  1915. }
  1916. spin_unlock_irqrestore(&id_priv->lock, flags);
  1917. return ret;
  1918. }
  1919. EXPORT_SYMBOL(rdma_set_afonly);
  1920. static void cma_bind_port(struct rdma_bind_list *bind_list,
  1921. struct rdma_id_private *id_priv)
  1922. {
  1923. struct sockaddr *addr;
  1924. struct sockaddr_ib *sib;
  1925. u64 sid, mask;
  1926. __be16 port;
  1927. addr = cma_src_addr(id_priv);
  1928. port = htons(bind_list->port);
  1929. switch (addr->sa_family) {
  1930. case AF_INET:
  1931. ((struct sockaddr_in *) addr)->sin_port = port;
  1932. break;
  1933. case AF_INET6:
  1934. ((struct sockaddr_in6 *) addr)->sin6_port = port;
  1935. break;
  1936. case AF_IB:
  1937. sib = (struct sockaddr_ib *) addr;
  1938. sid = be64_to_cpu(sib->sib_sid);
  1939. mask = be64_to_cpu(sib->sib_sid_mask);
  1940. sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
  1941. sib->sib_sid_mask = cpu_to_be64(~0ULL);
  1942. break;
  1943. }
  1944. id_priv->bind_list = bind_list;
  1945. hlist_add_head(&id_priv->node, &bind_list->owners);
  1946. }
  1947. static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
  1948. unsigned short snum)
  1949. {
  1950. struct rdma_bind_list *bind_list;
  1951. int ret;
  1952. bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
  1953. if (!bind_list)
  1954. return -ENOMEM;
  1955. ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
  1956. if (ret < 0)
  1957. goto err;
  1958. bind_list->ps = ps;
  1959. bind_list->port = (unsigned short)ret;
  1960. cma_bind_port(bind_list, id_priv);
  1961. return 0;
  1962. err:
  1963. kfree(bind_list);
  1964. return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
  1965. }
  1966. static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
  1967. {
  1968. static unsigned int last_used_port;
  1969. int low, high, remaining;
  1970. unsigned int rover;
  1971. inet_get_local_port_range(&low, &high);
  1972. remaining = (high - low) + 1;
  1973. rover = net_random() % remaining + low;
  1974. retry:
  1975. if (last_used_port != rover &&
  1976. !idr_find(ps, (unsigned short) rover)) {
  1977. int ret = cma_alloc_port(ps, id_priv, rover);
  1978. /*
  1979. * Remember previously used port number in order to avoid
  1980. * re-using same port immediately after it is closed.
  1981. */
  1982. if (!ret)
  1983. last_used_port = rover;
  1984. if (ret != -EADDRNOTAVAIL)
  1985. return ret;
  1986. }
  1987. if (--remaining) {
  1988. rover++;
  1989. if ((rover < low) || (rover > high))
  1990. rover = low;
  1991. goto retry;
  1992. }
  1993. return -EADDRNOTAVAIL;
  1994. }
  1995. /*
  1996. * Check that the requested port is available. This is called when trying to
  1997. * bind to a specific port, or when trying to listen on a bound port. In
  1998. * the latter case, the provided id_priv may already be on the bind_list, but
  1999. * we still need to check that it's okay to start listening.
  2000. */
  2001. static int cma_check_port(struct rdma_bind_list *bind_list,
  2002. struct rdma_id_private *id_priv, uint8_t reuseaddr)
  2003. {
  2004. struct rdma_id_private *cur_id;
  2005. struct sockaddr *addr, *cur_addr;
  2006. addr = cma_src_addr(id_priv);
  2007. hlist_for_each_entry(cur_id, &bind_list->owners, node) {
  2008. if (id_priv == cur_id)
  2009. continue;
  2010. if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
  2011. cur_id->reuseaddr)
  2012. continue;
  2013. cur_addr = cma_src_addr(cur_id);
  2014. if (id_priv->afonly && cur_id->afonly &&
  2015. (addr->sa_family != cur_addr->sa_family))
  2016. continue;
  2017. if (cma_any_addr(addr) || cma_any_addr(cur_addr))
  2018. return -EADDRNOTAVAIL;
  2019. if (!cma_addr_cmp(addr, cur_addr))
  2020. return -EADDRINUSE;
  2021. }
  2022. return 0;
  2023. }
  2024. static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
  2025. {
  2026. struct rdma_bind_list *bind_list;
  2027. unsigned short snum;
  2028. int ret;
  2029. snum = ntohs(cma_port(cma_src_addr(id_priv)));
  2030. if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
  2031. return -EACCES;
  2032. bind_list = idr_find(ps, snum);
  2033. if (!bind_list) {
  2034. ret = cma_alloc_port(ps, id_priv, snum);
  2035. } else {
  2036. ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
  2037. if (!ret)
  2038. cma_bind_port(bind_list, id_priv);
  2039. }
  2040. return ret;
  2041. }
  2042. static int cma_bind_listen(struct rdma_id_private *id_priv)
  2043. {
  2044. struct rdma_bind_list *bind_list = id_priv->bind_list;
  2045. int ret = 0;
  2046. mutex_lock(&lock);
  2047. if (bind_list->owners.first->next)
  2048. ret = cma_check_port(bind_list, id_priv, 0);
  2049. mutex_unlock(&lock);
  2050. return ret;
  2051. }
  2052. static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
  2053. {
  2054. switch (id_priv->id.ps) {
  2055. case RDMA_PS_TCP:
  2056. return &tcp_ps;
  2057. case RDMA_PS_UDP:
  2058. return &udp_ps;
  2059. case RDMA_PS_IPOIB:
  2060. return &ipoib_ps;
  2061. case RDMA_PS_IB:
  2062. return &ib_ps;
  2063. default:
  2064. return NULL;
  2065. }
  2066. }
  2067. static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
  2068. {
  2069. struct idr *ps = NULL;
  2070. struct sockaddr_ib *sib;
  2071. u64 sid_ps, mask, sid;
  2072. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  2073. mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
  2074. sid = be64_to_cpu(sib->sib_sid) & mask;
  2075. if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
  2076. sid_ps = RDMA_IB_IP_PS_IB;
  2077. ps = &ib_ps;
  2078. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
  2079. (sid == (RDMA_IB_IP_PS_TCP & mask))) {
  2080. sid_ps = RDMA_IB_IP_PS_TCP;
  2081. ps = &tcp_ps;
  2082. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
  2083. (sid == (RDMA_IB_IP_PS_UDP & mask))) {
  2084. sid_ps = RDMA_IB_IP_PS_UDP;
  2085. ps = &udp_ps;
  2086. }
  2087. if (ps) {
  2088. sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
  2089. sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
  2090. be64_to_cpu(sib->sib_sid_mask));
  2091. }
  2092. return ps;
  2093. }
  2094. static int cma_get_port(struct rdma_id_private *id_priv)
  2095. {
  2096. struct idr *ps;
  2097. int ret;
  2098. if (cma_family(id_priv) != AF_IB)
  2099. ps = cma_select_inet_ps(id_priv);
  2100. else
  2101. ps = cma_select_ib_ps(id_priv);
  2102. if (!ps)
  2103. return -EPROTONOSUPPORT;
  2104. mutex_lock(&lock);
  2105. if (cma_any_port(cma_src_addr(id_priv)))
  2106. ret = cma_alloc_any_port(ps, id_priv);
  2107. else
  2108. ret = cma_use_port(ps, id_priv);
  2109. mutex_unlock(&lock);
  2110. return ret;
  2111. }
  2112. static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
  2113. struct sockaddr *addr)
  2114. {
  2115. #if IS_ENABLED(CONFIG_IPV6)
  2116. struct sockaddr_in6 *sin6;
  2117. if (addr->sa_family != AF_INET6)
  2118. return 0;
  2119. sin6 = (struct sockaddr_in6 *) addr;
  2120. if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
  2121. !sin6->sin6_scope_id)
  2122. return -EINVAL;
  2123. dev_addr->bound_dev_if = sin6->sin6_scope_id;
  2124. #endif
  2125. return 0;
  2126. }
  2127. int rdma_listen(struct rdma_cm_id *id, int backlog)
  2128. {
  2129. struct rdma_id_private *id_priv;
  2130. int ret;
  2131. id_priv = container_of(id, struct rdma_id_private, id);
  2132. if (id_priv->state == RDMA_CM_IDLE) {
  2133. id->route.addr.src_addr.ss_family = AF_INET;
  2134. ret = rdma_bind_addr(id, cma_src_addr(id_priv));
  2135. if (ret)
  2136. return ret;
  2137. }
  2138. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
  2139. return -EINVAL;
  2140. if (id_priv->reuseaddr) {
  2141. ret = cma_bind_listen(id_priv);
  2142. if (ret)
  2143. goto err;
  2144. }
  2145. id_priv->backlog = backlog;
  2146. if (id->device) {
  2147. switch (rdma_node_get_transport(id->device->node_type)) {
  2148. case RDMA_TRANSPORT_IB:
  2149. ret = cma_ib_listen(id_priv);
  2150. if (ret)
  2151. goto err;
  2152. break;
  2153. case RDMA_TRANSPORT_IWARP:
  2154. ret = cma_iw_listen(id_priv, backlog);
  2155. if (ret)
  2156. goto err;
  2157. break;
  2158. default:
  2159. ret = -ENOSYS;
  2160. goto err;
  2161. }
  2162. } else
  2163. cma_listen_on_all(id_priv);
  2164. return 0;
  2165. err:
  2166. id_priv->backlog = 0;
  2167. cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
  2168. return ret;
  2169. }
  2170. EXPORT_SYMBOL(rdma_listen);
  2171. int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
  2172. {
  2173. struct rdma_id_private *id_priv;
  2174. int ret;
  2175. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
  2176. addr->sa_family != AF_IB)
  2177. return -EAFNOSUPPORT;
  2178. id_priv = container_of(id, struct rdma_id_private, id);
  2179. if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
  2180. return -EINVAL;
  2181. ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
  2182. if (ret)
  2183. goto err1;
  2184. if (!cma_any_addr(addr)) {
  2185. ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
  2186. if (ret)
  2187. goto err1;
  2188. ret = cma_acquire_dev(id_priv);
  2189. if (ret)
  2190. goto err1;
  2191. }
  2192. memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
  2193. if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
  2194. if (addr->sa_family == AF_INET)
  2195. id_priv->afonly = 1;
  2196. #if IS_ENABLED(CONFIG_IPV6)
  2197. else if (addr->sa_family == AF_INET6)
  2198. id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
  2199. #endif
  2200. }
  2201. ret = cma_get_port(id_priv);
  2202. if (ret)
  2203. goto err2;
  2204. return 0;
  2205. err2:
  2206. if (id_priv->cma_dev)
  2207. cma_release_dev(id_priv);
  2208. err1:
  2209. cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
  2210. return ret;
  2211. }
  2212. EXPORT_SYMBOL(rdma_bind_addr);
  2213. static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
  2214. {
  2215. struct cma_hdr *cma_hdr;
  2216. cma_hdr = hdr;
  2217. cma_hdr->cma_version = CMA_VERSION;
  2218. if (cma_family(id_priv) == AF_INET) {
  2219. struct sockaddr_in *src4, *dst4;
  2220. src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
  2221. dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
  2222. cma_set_ip_ver(cma_hdr, 4);
  2223. cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
  2224. cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
  2225. cma_hdr->port = src4->sin_port;
  2226. } else if (cma_family(id_priv) == AF_INET6) {
  2227. struct sockaddr_in6 *src6, *dst6;
  2228. src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  2229. dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
  2230. cma_set_ip_ver(cma_hdr, 6);
  2231. cma_hdr->src_addr.ip6 = src6->sin6_addr;
  2232. cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
  2233. cma_hdr->port = src6->sin6_port;
  2234. }
  2235. return 0;
  2236. }
  2237. static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
  2238. struct ib_cm_event *ib_event)
  2239. {
  2240. struct rdma_id_private *id_priv = cm_id->context;
  2241. struct rdma_cm_event event;
  2242. struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
  2243. int ret = 0;
  2244. if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
  2245. return 0;
  2246. memset(&event, 0, sizeof event);
  2247. switch (ib_event->event) {
  2248. case IB_CM_SIDR_REQ_ERROR:
  2249. event.event = RDMA_CM_EVENT_UNREACHABLE;
  2250. event.status = -ETIMEDOUT;
  2251. break;
  2252. case IB_CM_SIDR_REP_RECEIVED:
  2253. event.param.ud.private_data = ib_event->private_data;
  2254. event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
  2255. if (rep->status != IB_SIDR_SUCCESS) {
  2256. event.event = RDMA_CM_EVENT_UNREACHABLE;
  2257. event.status = ib_event->param.sidr_rep_rcvd.status;
  2258. break;
  2259. }
  2260. ret = cma_set_qkey(id_priv, rep->qkey);
  2261. if (ret) {
  2262. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  2263. event.status = ret;
  2264. break;
  2265. }
  2266. ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
  2267. id_priv->id.route.path_rec,
  2268. &event.param.ud.ah_attr);
  2269. event.param.ud.qp_num = rep->qpn;
  2270. event.param.ud.qkey = rep->qkey;
  2271. event.event = RDMA_CM_EVENT_ESTABLISHED;
  2272. event.status = 0;
  2273. break;
  2274. default:
  2275. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  2276. ib_event->event);
  2277. goto out;
  2278. }
  2279. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2280. if (ret) {
  2281. /* Destroy the CM ID by returning a non-zero value. */
  2282. id_priv->cm_id.ib = NULL;
  2283. cma_exch(id_priv, RDMA_CM_DESTROYING);
  2284. mutex_unlock(&id_priv->handler_mutex);
  2285. rdma_destroy_id(&id_priv->id);
  2286. return ret;
  2287. }
  2288. out:
  2289. mutex_unlock(&id_priv->handler_mutex);
  2290. return ret;
  2291. }
  2292. static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
  2293. struct rdma_conn_param *conn_param)
  2294. {
  2295. struct ib_cm_sidr_req_param req;
  2296. struct ib_cm_id *id;
  2297. void *private_data;
  2298. int offset, ret;
  2299. memset(&req, 0, sizeof req);
  2300. offset = cma_user_data_offset(id_priv);
  2301. req.private_data_len = offset + conn_param->private_data_len;
  2302. if (req.private_data_len < conn_param->private_data_len)
  2303. return -EINVAL;
  2304. if (req.private_data_len) {
  2305. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  2306. if (!private_data)
  2307. return -ENOMEM;
  2308. } else {
  2309. private_data = NULL;
  2310. }
  2311. if (conn_param->private_data && conn_param->private_data_len)
  2312. memcpy(private_data + offset, conn_param->private_data,
  2313. conn_param->private_data_len);
  2314. if (private_data) {
  2315. ret = cma_format_hdr(private_data, id_priv);
  2316. if (ret)
  2317. goto out;
  2318. req.private_data = private_data;
  2319. }
  2320. id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
  2321. id_priv);
  2322. if (IS_ERR(id)) {
  2323. ret = PTR_ERR(id);
  2324. goto out;
  2325. }
  2326. id_priv->cm_id.ib = id;
  2327. req.path = id_priv->id.route.path_rec;
  2328. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  2329. req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
  2330. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  2331. ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
  2332. if (ret) {
  2333. ib_destroy_cm_id(id_priv->cm_id.ib);
  2334. id_priv->cm_id.ib = NULL;
  2335. }
  2336. out:
  2337. kfree(private_data);
  2338. return ret;
  2339. }
  2340. static int cma_connect_ib(struct rdma_id_private *id_priv,
  2341. struct rdma_conn_param *conn_param)
  2342. {
  2343. struct ib_cm_req_param req;
  2344. struct rdma_route *route;
  2345. void *private_data;
  2346. struct ib_cm_id *id;
  2347. int offset, ret;
  2348. memset(&req, 0, sizeof req);
  2349. offset = cma_user_data_offset(id_priv);
  2350. req.private_data_len = offset + conn_param->private_data_len;
  2351. if (req.private_data_len < conn_param->private_data_len)
  2352. return -EINVAL;
  2353. if (req.private_data_len) {
  2354. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  2355. if (!private_data)
  2356. return -ENOMEM;
  2357. } else {
  2358. private_data = NULL;
  2359. }
  2360. if (conn_param->private_data && conn_param->private_data_len)
  2361. memcpy(private_data + offset, conn_param->private_data,
  2362. conn_param->private_data_len);
  2363. id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
  2364. if (IS_ERR(id)) {
  2365. ret = PTR_ERR(id);
  2366. goto out;
  2367. }
  2368. id_priv->cm_id.ib = id;
  2369. route = &id_priv->id.route;
  2370. if (private_data) {
  2371. ret = cma_format_hdr(private_data, id_priv);
  2372. if (ret)
  2373. goto out;
  2374. req.private_data = private_data;
  2375. }
  2376. req.primary_path = &route->path_rec[0];
  2377. if (route->num_paths == 2)
  2378. req.alternate_path = &route->path_rec[1];
  2379. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  2380. req.qp_num = id_priv->qp_num;
  2381. req.qp_type = id_priv->id.qp_type;
  2382. req.starting_psn = id_priv->seq_num;
  2383. req.responder_resources = conn_param->responder_resources;
  2384. req.initiator_depth = conn_param->initiator_depth;
  2385. req.flow_control = conn_param->flow_control;
  2386. req.retry_count = min_t(u8, 7, conn_param->retry_count);
  2387. req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  2388. req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2389. req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2390. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  2391. req.srq = id_priv->srq ? 1 : 0;
  2392. ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
  2393. out:
  2394. if (ret && !IS_ERR(id)) {
  2395. ib_destroy_cm_id(id);
  2396. id_priv->cm_id.ib = NULL;
  2397. }
  2398. kfree(private_data);
  2399. return ret;
  2400. }
  2401. static int cma_connect_iw(struct rdma_id_private *id_priv,
  2402. struct rdma_conn_param *conn_param)
  2403. {
  2404. struct iw_cm_id *cm_id;
  2405. int ret;
  2406. struct iw_cm_conn_param iw_param;
  2407. cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
  2408. if (IS_ERR(cm_id))
  2409. return PTR_ERR(cm_id);
  2410. id_priv->cm_id.iw = cm_id;
  2411. memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
  2412. rdma_addr_size(cma_src_addr(id_priv)));
  2413. memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
  2414. rdma_addr_size(cma_dst_addr(id_priv)));
  2415. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2416. if (ret)
  2417. goto out;
  2418. if (conn_param) {
  2419. iw_param.ord = conn_param->initiator_depth;
  2420. iw_param.ird = conn_param->responder_resources;
  2421. iw_param.private_data = conn_param->private_data;
  2422. iw_param.private_data_len = conn_param->private_data_len;
  2423. iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
  2424. } else {
  2425. memset(&iw_param, 0, sizeof iw_param);
  2426. iw_param.qpn = id_priv->qp_num;
  2427. }
  2428. ret = iw_cm_connect(cm_id, &iw_param);
  2429. out:
  2430. if (ret) {
  2431. iw_destroy_cm_id(cm_id);
  2432. id_priv->cm_id.iw = NULL;
  2433. }
  2434. return ret;
  2435. }
  2436. int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2437. {
  2438. struct rdma_id_private *id_priv;
  2439. int ret;
  2440. id_priv = container_of(id, struct rdma_id_private, id);
  2441. if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
  2442. return -EINVAL;
  2443. if (!id->qp) {
  2444. id_priv->qp_num = conn_param->qp_num;
  2445. id_priv->srq = conn_param->srq;
  2446. }
  2447. switch (rdma_node_get_transport(id->device->node_type)) {
  2448. case RDMA_TRANSPORT_IB:
  2449. if (id->qp_type == IB_QPT_UD)
  2450. ret = cma_resolve_ib_udp(id_priv, conn_param);
  2451. else
  2452. ret = cma_connect_ib(id_priv, conn_param);
  2453. break;
  2454. case RDMA_TRANSPORT_IWARP:
  2455. ret = cma_connect_iw(id_priv, conn_param);
  2456. break;
  2457. default:
  2458. ret = -ENOSYS;
  2459. break;
  2460. }
  2461. if (ret)
  2462. goto err;
  2463. return 0;
  2464. err:
  2465. cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
  2466. return ret;
  2467. }
  2468. EXPORT_SYMBOL(rdma_connect);
  2469. static int cma_accept_ib(struct rdma_id_private *id_priv,
  2470. struct rdma_conn_param *conn_param)
  2471. {
  2472. struct ib_cm_rep_param rep;
  2473. int ret;
  2474. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2475. if (ret)
  2476. goto out;
  2477. ret = cma_modify_qp_rts(id_priv, conn_param);
  2478. if (ret)
  2479. goto out;
  2480. memset(&rep, 0, sizeof rep);
  2481. rep.qp_num = id_priv->qp_num;
  2482. rep.starting_psn = id_priv->seq_num;
  2483. rep.private_data = conn_param->private_data;
  2484. rep.private_data_len = conn_param->private_data_len;
  2485. rep.responder_resources = conn_param->responder_resources;
  2486. rep.initiator_depth = conn_param->initiator_depth;
  2487. rep.failover_accepted = 0;
  2488. rep.flow_control = conn_param->flow_control;
  2489. rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  2490. rep.srq = id_priv->srq ? 1 : 0;
  2491. ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
  2492. out:
  2493. return ret;
  2494. }
  2495. static int cma_accept_iw(struct rdma_id_private *id_priv,
  2496. struct rdma_conn_param *conn_param)
  2497. {
  2498. struct iw_cm_conn_param iw_param;
  2499. int ret;
  2500. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2501. if (ret)
  2502. return ret;
  2503. iw_param.ord = conn_param->initiator_depth;
  2504. iw_param.ird = conn_param->responder_resources;
  2505. iw_param.private_data = conn_param->private_data;
  2506. iw_param.private_data_len = conn_param->private_data_len;
  2507. if (id_priv->id.qp) {
  2508. iw_param.qpn = id_priv->qp_num;
  2509. } else
  2510. iw_param.qpn = conn_param->qp_num;
  2511. return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
  2512. }
  2513. static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
  2514. enum ib_cm_sidr_status status, u32 qkey,
  2515. const void *private_data, int private_data_len)
  2516. {
  2517. struct ib_cm_sidr_rep_param rep;
  2518. int ret;
  2519. memset(&rep, 0, sizeof rep);
  2520. rep.status = status;
  2521. if (status == IB_SIDR_SUCCESS) {
  2522. ret = cma_set_qkey(id_priv, qkey);
  2523. if (ret)
  2524. return ret;
  2525. rep.qp_num = id_priv->qp_num;
  2526. rep.qkey = id_priv->qkey;
  2527. }
  2528. rep.private_data = private_data;
  2529. rep.private_data_len = private_data_len;
  2530. return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
  2531. }
  2532. int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2533. {
  2534. struct rdma_id_private *id_priv;
  2535. int ret;
  2536. id_priv = container_of(id, struct rdma_id_private, id);
  2537. id_priv->owner = task_pid_nr(current);
  2538. if (!cma_comp(id_priv, RDMA_CM_CONNECT))
  2539. return -EINVAL;
  2540. if (!id->qp && conn_param) {
  2541. id_priv->qp_num = conn_param->qp_num;
  2542. id_priv->srq = conn_param->srq;
  2543. }
  2544. switch (rdma_node_get_transport(id->device->node_type)) {
  2545. case RDMA_TRANSPORT_IB:
  2546. if (id->qp_type == IB_QPT_UD) {
  2547. if (conn_param)
  2548. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  2549. conn_param->qkey,
  2550. conn_param->private_data,
  2551. conn_param->private_data_len);
  2552. else
  2553. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  2554. 0, NULL, 0);
  2555. } else {
  2556. if (conn_param)
  2557. ret = cma_accept_ib(id_priv, conn_param);
  2558. else
  2559. ret = cma_rep_recv(id_priv);
  2560. }
  2561. break;
  2562. case RDMA_TRANSPORT_IWARP:
  2563. ret = cma_accept_iw(id_priv, conn_param);
  2564. break;
  2565. default:
  2566. ret = -ENOSYS;
  2567. break;
  2568. }
  2569. if (ret)
  2570. goto reject;
  2571. return 0;
  2572. reject:
  2573. cma_modify_qp_err(id_priv);
  2574. rdma_reject(id, NULL, 0);
  2575. return ret;
  2576. }
  2577. EXPORT_SYMBOL(rdma_accept);
  2578. int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
  2579. {
  2580. struct rdma_id_private *id_priv;
  2581. int ret;
  2582. id_priv = container_of(id, struct rdma_id_private, id);
  2583. if (!id_priv->cm_id.ib)
  2584. return -EINVAL;
  2585. switch (id->device->node_type) {
  2586. case RDMA_NODE_IB_CA:
  2587. ret = ib_cm_notify(id_priv->cm_id.ib, event);
  2588. break;
  2589. default:
  2590. ret = 0;
  2591. break;
  2592. }
  2593. return ret;
  2594. }
  2595. EXPORT_SYMBOL(rdma_notify);
  2596. int rdma_reject(struct rdma_cm_id *id, const void *private_data,
  2597. u8 private_data_len)
  2598. {
  2599. struct rdma_id_private *id_priv;
  2600. int ret;
  2601. id_priv = container_of(id, struct rdma_id_private, id);
  2602. if (!id_priv->cm_id.ib)
  2603. return -EINVAL;
  2604. switch (rdma_node_get_transport(id->device->node_type)) {
  2605. case RDMA_TRANSPORT_IB:
  2606. if (id->qp_type == IB_QPT_UD)
  2607. ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
  2608. private_data, private_data_len);
  2609. else
  2610. ret = ib_send_cm_rej(id_priv->cm_id.ib,
  2611. IB_CM_REJ_CONSUMER_DEFINED, NULL,
  2612. 0, private_data, private_data_len);
  2613. break;
  2614. case RDMA_TRANSPORT_IWARP:
  2615. ret = iw_cm_reject(id_priv->cm_id.iw,
  2616. private_data, private_data_len);
  2617. break;
  2618. default:
  2619. ret = -ENOSYS;
  2620. break;
  2621. }
  2622. return ret;
  2623. }
  2624. EXPORT_SYMBOL(rdma_reject);
  2625. int rdma_disconnect(struct rdma_cm_id *id)
  2626. {
  2627. struct rdma_id_private *id_priv;
  2628. int ret;
  2629. id_priv = container_of(id, struct rdma_id_private, id);
  2630. if (!id_priv->cm_id.ib)
  2631. return -EINVAL;
  2632. switch (rdma_node_get_transport(id->device->node_type)) {
  2633. case RDMA_TRANSPORT_IB:
  2634. ret = cma_modify_qp_err(id_priv);
  2635. if (ret)
  2636. goto out;
  2637. /* Initiate or respond to a disconnect. */
  2638. if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
  2639. ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
  2640. break;
  2641. case RDMA_TRANSPORT_IWARP:
  2642. ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
  2643. break;
  2644. default:
  2645. ret = -EINVAL;
  2646. break;
  2647. }
  2648. out:
  2649. return ret;
  2650. }
  2651. EXPORT_SYMBOL(rdma_disconnect);
  2652. static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
  2653. {
  2654. struct rdma_id_private *id_priv;
  2655. struct cma_multicast *mc = multicast->context;
  2656. struct rdma_cm_event event;
  2657. int ret;
  2658. id_priv = mc->id_priv;
  2659. if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
  2660. cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
  2661. return 0;
  2662. if (!status)
  2663. status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
  2664. mutex_lock(&id_priv->qp_mutex);
  2665. if (!status && id_priv->id.qp)
  2666. status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
  2667. be16_to_cpu(multicast->rec.mlid));
  2668. mutex_unlock(&id_priv->qp_mutex);
  2669. memset(&event, 0, sizeof event);
  2670. event.status = status;
  2671. event.param.ud.private_data = mc->context;
  2672. if (!status) {
  2673. event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
  2674. ib_init_ah_from_mcmember(id_priv->id.device,
  2675. id_priv->id.port_num, &multicast->rec,
  2676. &event.param.ud.ah_attr);
  2677. event.param.ud.qp_num = 0xFFFFFF;
  2678. event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
  2679. } else
  2680. event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
  2681. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2682. if (ret) {
  2683. cma_exch(id_priv, RDMA_CM_DESTROYING);
  2684. mutex_unlock(&id_priv->handler_mutex);
  2685. rdma_destroy_id(&id_priv->id);
  2686. return 0;
  2687. }
  2688. mutex_unlock(&id_priv->handler_mutex);
  2689. return 0;
  2690. }
  2691. static void cma_set_mgid(struct rdma_id_private *id_priv,
  2692. struct sockaddr *addr, union ib_gid *mgid)
  2693. {
  2694. unsigned char mc_map[MAX_ADDR_LEN];
  2695. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2696. struct sockaddr_in *sin = (struct sockaddr_in *) addr;
  2697. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
  2698. if (cma_any_addr(addr)) {
  2699. memset(mgid, 0, sizeof *mgid);
  2700. } else if ((addr->sa_family == AF_INET6) &&
  2701. ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
  2702. 0xFF10A01B)) {
  2703. /* IPv6 address is an SA assigned MGID. */
  2704. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  2705. } else if (addr->sa_family == AF_IB) {
  2706. memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
  2707. } else if ((addr->sa_family == AF_INET6)) {
  2708. ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
  2709. if (id_priv->id.ps == RDMA_PS_UDP)
  2710. mc_map[7] = 0x01; /* Use RDMA CM signature */
  2711. *mgid = *(union ib_gid *) (mc_map + 4);
  2712. } else {
  2713. ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
  2714. if (id_priv->id.ps == RDMA_PS_UDP)
  2715. mc_map[7] = 0x01; /* Use RDMA CM signature */
  2716. *mgid = *(union ib_gid *) (mc_map + 4);
  2717. }
  2718. }
  2719. static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
  2720. struct cma_multicast *mc)
  2721. {
  2722. struct ib_sa_mcmember_rec rec;
  2723. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2724. ib_sa_comp_mask comp_mask;
  2725. int ret;
  2726. ib_addr_get_mgid(dev_addr, &rec.mgid);
  2727. ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
  2728. &rec.mgid, &rec);
  2729. if (ret)
  2730. return ret;
  2731. ret = cma_set_qkey(id_priv, 0);
  2732. if (ret)
  2733. return ret;
  2734. cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
  2735. rec.qkey = cpu_to_be32(id_priv->qkey);
  2736. rdma_addr_get_sgid(dev_addr, &rec.port_gid);
  2737. rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  2738. rec.join_state = 1;
  2739. comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
  2740. IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
  2741. IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
  2742. IB_SA_MCMEMBER_REC_FLOW_LABEL |
  2743. IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
  2744. if (id_priv->id.ps == RDMA_PS_IPOIB)
  2745. comp_mask |= IB_SA_MCMEMBER_REC_RATE |
  2746. IB_SA_MCMEMBER_REC_RATE_SELECTOR |
  2747. IB_SA_MCMEMBER_REC_MTU_SELECTOR |
  2748. IB_SA_MCMEMBER_REC_MTU |
  2749. IB_SA_MCMEMBER_REC_HOP_LIMIT;
  2750. mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
  2751. id_priv->id.port_num, &rec,
  2752. comp_mask, GFP_KERNEL,
  2753. cma_ib_mc_handler, mc);
  2754. return PTR_ERR_OR_ZERO(mc->multicast.ib);
  2755. }
  2756. static void iboe_mcast_work_handler(struct work_struct *work)
  2757. {
  2758. struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
  2759. struct cma_multicast *mc = mw->mc;
  2760. struct ib_sa_multicast *m = mc->multicast.ib;
  2761. mc->multicast.ib->context = mc;
  2762. cma_ib_mc_handler(0, m);
  2763. kref_put(&mc->mcref, release_mc);
  2764. kfree(mw);
  2765. }
  2766. static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
  2767. {
  2768. struct sockaddr_in *sin = (struct sockaddr_in *)addr;
  2769. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
  2770. if (cma_any_addr(addr)) {
  2771. memset(mgid, 0, sizeof *mgid);
  2772. } else if (addr->sa_family == AF_INET6) {
  2773. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  2774. } else {
  2775. mgid->raw[0] = 0xff;
  2776. mgid->raw[1] = 0x0e;
  2777. mgid->raw[2] = 0;
  2778. mgid->raw[3] = 0;
  2779. mgid->raw[4] = 0;
  2780. mgid->raw[5] = 0;
  2781. mgid->raw[6] = 0;
  2782. mgid->raw[7] = 0;
  2783. mgid->raw[8] = 0;
  2784. mgid->raw[9] = 0;
  2785. mgid->raw[10] = 0xff;
  2786. mgid->raw[11] = 0xff;
  2787. *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
  2788. }
  2789. }
  2790. static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
  2791. struct cma_multicast *mc)
  2792. {
  2793. struct iboe_mcast_work *work;
  2794. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2795. int err;
  2796. struct sockaddr *addr = (struct sockaddr *)&mc->addr;
  2797. struct net_device *ndev = NULL;
  2798. if (cma_zero_addr((struct sockaddr *)&mc->addr))
  2799. return -EINVAL;
  2800. work = kzalloc(sizeof *work, GFP_KERNEL);
  2801. if (!work)
  2802. return -ENOMEM;
  2803. mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
  2804. if (!mc->multicast.ib) {
  2805. err = -ENOMEM;
  2806. goto out1;
  2807. }
  2808. cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
  2809. mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
  2810. if (id_priv->id.ps == RDMA_PS_UDP)
  2811. mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
  2812. if (dev_addr->bound_dev_if)
  2813. ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  2814. if (!ndev) {
  2815. err = -ENODEV;
  2816. goto out2;
  2817. }
  2818. mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
  2819. mc->multicast.ib->rec.hop_limit = 1;
  2820. mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
  2821. dev_put(ndev);
  2822. if (!mc->multicast.ib->rec.mtu) {
  2823. err = -EINVAL;
  2824. goto out2;
  2825. }
  2826. iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
  2827. work->id = id_priv;
  2828. work->mc = mc;
  2829. INIT_WORK(&work->work, iboe_mcast_work_handler);
  2830. kref_get(&mc->mcref);
  2831. queue_work(cma_wq, &work->work);
  2832. return 0;
  2833. out2:
  2834. kfree(mc->multicast.ib);
  2835. out1:
  2836. kfree(work);
  2837. return err;
  2838. }
  2839. int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
  2840. void *context)
  2841. {
  2842. struct rdma_id_private *id_priv;
  2843. struct cma_multicast *mc;
  2844. int ret;
  2845. id_priv = container_of(id, struct rdma_id_private, id);
  2846. if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
  2847. !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
  2848. return -EINVAL;
  2849. mc = kmalloc(sizeof *mc, GFP_KERNEL);
  2850. if (!mc)
  2851. return -ENOMEM;
  2852. memcpy(&mc->addr, addr, rdma_addr_size(addr));
  2853. mc->context = context;
  2854. mc->id_priv = id_priv;
  2855. spin_lock(&id_priv->lock);
  2856. list_add(&mc->list, &id_priv->mc_list);
  2857. spin_unlock(&id_priv->lock);
  2858. switch (rdma_node_get_transport(id->device->node_type)) {
  2859. case RDMA_TRANSPORT_IB:
  2860. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  2861. case IB_LINK_LAYER_INFINIBAND:
  2862. ret = cma_join_ib_multicast(id_priv, mc);
  2863. break;
  2864. case IB_LINK_LAYER_ETHERNET:
  2865. kref_init(&mc->mcref);
  2866. ret = cma_iboe_join_multicast(id_priv, mc);
  2867. break;
  2868. default:
  2869. ret = -EINVAL;
  2870. }
  2871. break;
  2872. default:
  2873. ret = -ENOSYS;
  2874. break;
  2875. }
  2876. if (ret) {
  2877. spin_lock_irq(&id_priv->lock);
  2878. list_del(&mc->list);
  2879. spin_unlock_irq(&id_priv->lock);
  2880. kfree(mc);
  2881. }
  2882. return ret;
  2883. }
  2884. EXPORT_SYMBOL(rdma_join_multicast);
  2885. void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
  2886. {
  2887. struct rdma_id_private *id_priv;
  2888. struct cma_multicast *mc;
  2889. id_priv = container_of(id, struct rdma_id_private, id);
  2890. spin_lock_irq(&id_priv->lock);
  2891. list_for_each_entry(mc, &id_priv->mc_list, list) {
  2892. if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
  2893. list_del(&mc->list);
  2894. spin_unlock_irq(&id_priv->lock);
  2895. if (id->qp)
  2896. ib_detach_mcast(id->qp,
  2897. &mc->multicast.ib->rec.mgid,
  2898. be16_to_cpu(mc->multicast.ib->rec.mlid));
  2899. if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
  2900. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  2901. case IB_LINK_LAYER_INFINIBAND:
  2902. ib_sa_free_multicast(mc->multicast.ib);
  2903. kfree(mc);
  2904. break;
  2905. case IB_LINK_LAYER_ETHERNET:
  2906. kref_put(&mc->mcref, release_mc);
  2907. break;
  2908. default:
  2909. break;
  2910. }
  2911. }
  2912. return;
  2913. }
  2914. }
  2915. spin_unlock_irq(&id_priv->lock);
  2916. }
  2917. EXPORT_SYMBOL(rdma_leave_multicast);
  2918. static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
  2919. {
  2920. struct rdma_dev_addr *dev_addr;
  2921. struct cma_ndev_work *work;
  2922. dev_addr = &id_priv->id.route.addr.dev_addr;
  2923. if ((dev_addr->bound_dev_if == ndev->ifindex) &&
  2924. memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
  2925. printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
  2926. ndev->name, &id_priv->id);
  2927. work = kzalloc(sizeof *work, GFP_KERNEL);
  2928. if (!work)
  2929. return -ENOMEM;
  2930. INIT_WORK(&work->work, cma_ndev_work_handler);
  2931. work->id = id_priv;
  2932. work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
  2933. atomic_inc(&id_priv->refcount);
  2934. queue_work(cma_wq, &work->work);
  2935. }
  2936. return 0;
  2937. }
  2938. static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
  2939. void *ptr)
  2940. {
  2941. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  2942. struct cma_device *cma_dev;
  2943. struct rdma_id_private *id_priv;
  2944. int ret = NOTIFY_DONE;
  2945. if (dev_net(ndev) != &init_net)
  2946. return NOTIFY_DONE;
  2947. if (event != NETDEV_BONDING_FAILOVER)
  2948. return NOTIFY_DONE;
  2949. if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
  2950. return NOTIFY_DONE;
  2951. mutex_lock(&lock);
  2952. list_for_each_entry(cma_dev, &dev_list, list)
  2953. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  2954. ret = cma_netdev_change(ndev, id_priv);
  2955. if (ret)
  2956. goto out;
  2957. }
  2958. out:
  2959. mutex_unlock(&lock);
  2960. return ret;
  2961. }
  2962. static struct notifier_block cma_nb = {
  2963. .notifier_call = cma_netdev_callback
  2964. };
  2965. static void cma_add_one(struct ib_device *device)
  2966. {
  2967. struct cma_device *cma_dev;
  2968. struct rdma_id_private *id_priv;
  2969. cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
  2970. if (!cma_dev)
  2971. return;
  2972. cma_dev->device = device;
  2973. init_completion(&cma_dev->comp);
  2974. atomic_set(&cma_dev->refcount, 1);
  2975. INIT_LIST_HEAD(&cma_dev->id_list);
  2976. ib_set_client_data(device, &cma_client, cma_dev);
  2977. mutex_lock(&lock);
  2978. list_add_tail(&cma_dev->list, &dev_list);
  2979. list_for_each_entry(id_priv, &listen_any_list, list)
  2980. cma_listen_on_dev(id_priv, cma_dev);
  2981. mutex_unlock(&lock);
  2982. }
  2983. static int cma_remove_id_dev(struct rdma_id_private *id_priv)
  2984. {
  2985. struct rdma_cm_event event;
  2986. enum rdma_cm_state state;
  2987. int ret = 0;
  2988. /* Record that we want to remove the device */
  2989. state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
  2990. if (state == RDMA_CM_DESTROYING)
  2991. return 0;
  2992. cma_cancel_operation(id_priv, state);
  2993. mutex_lock(&id_priv->handler_mutex);
  2994. /* Check for destruction from another callback. */
  2995. if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
  2996. goto out;
  2997. memset(&event, 0, sizeof event);
  2998. event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
  2999. ret = id_priv->id.event_handler(&id_priv->id, &event);
  3000. out:
  3001. mutex_unlock(&id_priv->handler_mutex);
  3002. return ret;
  3003. }
  3004. static void cma_process_remove(struct cma_device *cma_dev)
  3005. {
  3006. struct rdma_id_private *id_priv;
  3007. int ret;
  3008. mutex_lock(&lock);
  3009. while (!list_empty(&cma_dev->id_list)) {
  3010. id_priv = list_entry(cma_dev->id_list.next,
  3011. struct rdma_id_private, list);
  3012. list_del(&id_priv->listen_list);
  3013. list_del_init(&id_priv->list);
  3014. atomic_inc(&id_priv->refcount);
  3015. mutex_unlock(&lock);
  3016. ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
  3017. cma_deref_id(id_priv);
  3018. if (ret)
  3019. rdma_destroy_id(&id_priv->id);
  3020. mutex_lock(&lock);
  3021. }
  3022. mutex_unlock(&lock);
  3023. cma_deref_dev(cma_dev);
  3024. wait_for_completion(&cma_dev->comp);
  3025. }
  3026. static void cma_remove_one(struct ib_device *device)
  3027. {
  3028. struct cma_device *cma_dev;
  3029. cma_dev = ib_get_client_data(device, &cma_client);
  3030. if (!cma_dev)
  3031. return;
  3032. mutex_lock(&lock);
  3033. list_del(&cma_dev->list);
  3034. mutex_unlock(&lock);
  3035. cma_process_remove(cma_dev);
  3036. kfree(cma_dev);
  3037. }
  3038. static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
  3039. {
  3040. struct nlmsghdr *nlh;
  3041. struct rdma_cm_id_stats *id_stats;
  3042. struct rdma_id_private *id_priv;
  3043. struct rdma_cm_id *id = NULL;
  3044. struct cma_device *cma_dev;
  3045. int i_dev = 0, i_id = 0;
  3046. /*
  3047. * We export all of the IDs as a sequence of messages. Each
  3048. * ID gets its own netlink message.
  3049. */
  3050. mutex_lock(&lock);
  3051. list_for_each_entry(cma_dev, &dev_list, list) {
  3052. if (i_dev < cb->args[0]) {
  3053. i_dev++;
  3054. continue;
  3055. }
  3056. i_id = 0;
  3057. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  3058. if (i_id < cb->args[1]) {
  3059. i_id++;
  3060. continue;
  3061. }
  3062. id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
  3063. sizeof *id_stats, RDMA_NL_RDMA_CM,
  3064. RDMA_NL_RDMA_CM_ID_STATS);
  3065. if (!id_stats)
  3066. goto out;
  3067. memset(id_stats, 0, sizeof *id_stats);
  3068. id = &id_priv->id;
  3069. id_stats->node_type = id->route.addr.dev_addr.dev_type;
  3070. id_stats->port_num = id->port_num;
  3071. id_stats->bound_dev_if =
  3072. id->route.addr.dev_addr.bound_dev_if;
  3073. if (ibnl_put_attr(skb, nlh,
  3074. rdma_addr_size(cma_src_addr(id_priv)),
  3075. cma_src_addr(id_priv),
  3076. RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
  3077. goto out;
  3078. if (ibnl_put_attr(skb, nlh,
  3079. rdma_addr_size(cma_src_addr(id_priv)),
  3080. cma_dst_addr(id_priv),
  3081. RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
  3082. goto out;
  3083. id_stats->pid = id_priv->owner;
  3084. id_stats->port_space = id->ps;
  3085. id_stats->cm_state = id_priv->state;
  3086. id_stats->qp_num = id_priv->qp_num;
  3087. id_stats->qp_type = id->qp_type;
  3088. i_id++;
  3089. }
  3090. cb->args[1] = 0;
  3091. i_dev++;
  3092. }
  3093. out:
  3094. mutex_unlock(&lock);
  3095. cb->args[0] = i_dev;
  3096. cb->args[1] = i_id;
  3097. return skb->len;
  3098. }
  3099. static const struct ibnl_client_cbs cma_cb_table[] = {
  3100. [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
  3101. .module = THIS_MODULE },
  3102. };
  3103. static int __init cma_init(void)
  3104. {
  3105. int ret;
  3106. cma_wq = create_singlethread_workqueue("rdma_cm");
  3107. if (!cma_wq)
  3108. return -ENOMEM;
  3109. ib_sa_register_client(&sa_client);
  3110. rdma_addr_register_client(&addr_client);
  3111. register_netdevice_notifier(&cma_nb);
  3112. ret = ib_register_client(&cma_client);
  3113. if (ret)
  3114. goto err;
  3115. if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
  3116. printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
  3117. return 0;
  3118. err:
  3119. unregister_netdevice_notifier(&cma_nb);
  3120. rdma_addr_unregister_client(&addr_client);
  3121. ib_sa_unregister_client(&sa_client);
  3122. destroy_workqueue(cma_wq);
  3123. return ret;
  3124. }
  3125. static void __exit cma_cleanup(void)
  3126. {
  3127. ibnl_remove_client(RDMA_NL_RDMA_CM);
  3128. ib_unregister_client(&cma_client);
  3129. unregister_netdevice_notifier(&cma_nb);
  3130. rdma_addr_unregister_client(&addr_client);
  3131. ib_sa_unregister_client(&sa_client);
  3132. destroy_workqueue(cma_wq);
  3133. idr_destroy(&tcp_ps);
  3134. idr_destroy(&udp_ps);
  3135. idr_destroy(&ipoib_ps);
  3136. idr_destroy(&ib_ps);
  3137. }
  3138. module_init(cma_init);
  3139. module_exit(cma_cleanup);