cma.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994
  1. /*
  2. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  3. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/completion.h>
  36. #include <linux/in.h>
  37. #include <linux/in6.h>
  38. #include <linux/mutex.h>
  39. #include <linux/random.h>
  40. #include <linux/idr.h>
  41. #include <linux/inetdevice.h>
  42. #include <net/tcp.h>
  43. #include <net/ipv6.h>
  44. #include <rdma/rdma_cm.h>
  45. #include <rdma/rdma_cm_ib.h>
  46. #include <rdma/ib_cache.h>
  47. #include <rdma/ib_cm.h>
  48. #include <rdma/ib_sa.h>
  49. #include <rdma/iw_cm.h>
  50. MODULE_AUTHOR("Sean Hefty");
  51. MODULE_DESCRIPTION("Generic RDMA CM Agent");
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. #define CMA_CM_RESPONSE_TIMEOUT 20
  54. #define CMA_MAX_CM_RETRIES 15
  55. #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  56. static void cma_add_one(struct ib_device *device);
  57. static void cma_remove_one(struct ib_device *device);
  58. static struct ib_client cma_client = {
  59. .name = "cma",
  60. .add = cma_add_one,
  61. .remove = cma_remove_one
  62. };
  63. static struct ib_sa_client sa_client;
  64. static struct rdma_addr_client addr_client;
  65. static LIST_HEAD(dev_list);
  66. static LIST_HEAD(listen_any_list);
  67. static DEFINE_MUTEX(lock);
  68. static struct workqueue_struct *cma_wq;
  69. static DEFINE_IDR(sdp_ps);
  70. static DEFINE_IDR(tcp_ps);
  71. static DEFINE_IDR(udp_ps);
  72. static DEFINE_IDR(ipoib_ps);
  73. static int next_port;
  74. struct cma_device {
  75. struct list_head list;
  76. struct ib_device *device;
  77. struct completion comp;
  78. atomic_t refcount;
  79. struct list_head id_list;
  80. };
  81. enum cma_state {
  82. CMA_IDLE,
  83. CMA_ADDR_QUERY,
  84. CMA_ADDR_RESOLVED,
  85. CMA_ROUTE_QUERY,
  86. CMA_ROUTE_RESOLVED,
  87. CMA_CONNECT,
  88. CMA_DISCONNECT,
  89. CMA_ADDR_BOUND,
  90. CMA_LISTEN,
  91. CMA_DEVICE_REMOVAL,
  92. CMA_DESTROYING
  93. };
  94. struct rdma_bind_list {
  95. struct idr *ps;
  96. struct hlist_head owners;
  97. unsigned short port;
  98. };
  99. /*
  100. * Device removal can occur at anytime, so we need extra handling to
  101. * serialize notifying the user of device removal with other callbacks.
  102. * We do this by disabling removal notification while a callback is in process,
  103. * and reporting it after the callback completes.
  104. */
  105. struct rdma_id_private {
  106. struct rdma_cm_id id;
  107. struct rdma_bind_list *bind_list;
  108. struct hlist_node node;
  109. struct list_head list; /* listen_any_list or cma_device.list */
  110. struct list_head listen_list; /* per device listens */
  111. struct cma_device *cma_dev;
  112. struct list_head mc_list;
  113. int internal_id;
  114. enum cma_state state;
  115. spinlock_t lock;
  116. struct mutex qp_mutex;
  117. struct completion comp;
  118. atomic_t refcount;
  119. struct mutex handler_mutex;
  120. int backlog;
  121. int timeout_ms;
  122. struct ib_sa_query *query;
  123. int query_id;
  124. union {
  125. struct ib_cm_id *ib;
  126. struct iw_cm_id *iw;
  127. } cm_id;
  128. u32 seq_num;
  129. u32 qkey;
  130. u32 qp_num;
  131. u8 srq;
  132. u8 tos;
  133. };
  134. struct cma_multicast {
  135. struct rdma_id_private *id_priv;
  136. union {
  137. struct ib_sa_multicast *ib;
  138. } multicast;
  139. struct list_head list;
  140. void *context;
  141. struct sockaddr_storage addr;
  142. };
  143. struct cma_work {
  144. struct work_struct work;
  145. struct rdma_id_private *id;
  146. enum cma_state old_state;
  147. enum cma_state new_state;
  148. struct rdma_cm_event event;
  149. };
  150. struct cma_ndev_work {
  151. struct work_struct work;
  152. struct rdma_id_private *id;
  153. struct rdma_cm_event event;
  154. };
  155. union cma_ip_addr {
  156. struct in6_addr ip6;
  157. struct {
  158. __be32 pad[3];
  159. __be32 addr;
  160. } ip4;
  161. };
  162. struct cma_hdr {
  163. u8 cma_version;
  164. u8 ip_version; /* IP version: 7:4 */
  165. __be16 port;
  166. union cma_ip_addr src_addr;
  167. union cma_ip_addr dst_addr;
  168. };
  169. struct sdp_hh {
  170. u8 bsdh[16];
  171. u8 sdp_version; /* Major version: 7:4 */
  172. u8 ip_version; /* IP version: 7:4 */
  173. u8 sdp_specific1[10];
  174. __be16 port;
  175. __be16 sdp_specific2;
  176. union cma_ip_addr src_addr;
  177. union cma_ip_addr dst_addr;
  178. };
  179. struct sdp_hah {
  180. u8 bsdh[16];
  181. u8 sdp_version;
  182. };
  183. #define CMA_VERSION 0x00
  184. #define SDP_MAJ_VERSION 0x2
  185. static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
  186. {
  187. unsigned long flags;
  188. int ret;
  189. spin_lock_irqsave(&id_priv->lock, flags);
  190. ret = (id_priv->state == comp);
  191. spin_unlock_irqrestore(&id_priv->lock, flags);
  192. return ret;
  193. }
  194. static int cma_comp_exch(struct rdma_id_private *id_priv,
  195. enum cma_state comp, enum cma_state exch)
  196. {
  197. unsigned long flags;
  198. int ret;
  199. spin_lock_irqsave(&id_priv->lock, flags);
  200. if ((ret = (id_priv->state == comp)))
  201. id_priv->state = exch;
  202. spin_unlock_irqrestore(&id_priv->lock, flags);
  203. return ret;
  204. }
  205. static enum cma_state cma_exch(struct rdma_id_private *id_priv,
  206. enum cma_state exch)
  207. {
  208. unsigned long flags;
  209. enum cma_state old;
  210. spin_lock_irqsave(&id_priv->lock, flags);
  211. old = id_priv->state;
  212. id_priv->state = exch;
  213. spin_unlock_irqrestore(&id_priv->lock, flags);
  214. return old;
  215. }
  216. static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
  217. {
  218. return hdr->ip_version >> 4;
  219. }
  220. static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
  221. {
  222. hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
  223. }
  224. static inline u8 sdp_get_majv(u8 sdp_version)
  225. {
  226. return sdp_version >> 4;
  227. }
  228. static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
  229. {
  230. return hh->ip_version >> 4;
  231. }
  232. static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
  233. {
  234. hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
  235. }
  236. static inline int cma_is_ud_ps(enum rdma_port_space ps)
  237. {
  238. return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
  239. }
  240. static void cma_attach_to_dev(struct rdma_id_private *id_priv,
  241. struct cma_device *cma_dev)
  242. {
  243. atomic_inc(&cma_dev->refcount);
  244. id_priv->cma_dev = cma_dev;
  245. id_priv->id.device = cma_dev->device;
  246. list_add_tail(&id_priv->list, &cma_dev->id_list);
  247. }
  248. static inline void cma_deref_dev(struct cma_device *cma_dev)
  249. {
  250. if (atomic_dec_and_test(&cma_dev->refcount))
  251. complete(&cma_dev->comp);
  252. }
  253. static void cma_detach_from_dev(struct rdma_id_private *id_priv)
  254. {
  255. list_del(&id_priv->list);
  256. cma_deref_dev(id_priv->cma_dev);
  257. id_priv->cma_dev = NULL;
  258. }
  259. static int cma_set_qkey(struct ib_device *device, u8 port_num,
  260. enum rdma_port_space ps,
  261. struct rdma_dev_addr *dev_addr, u32 *qkey)
  262. {
  263. struct ib_sa_mcmember_rec rec;
  264. int ret = 0;
  265. switch (ps) {
  266. case RDMA_PS_UDP:
  267. *qkey = RDMA_UDP_QKEY;
  268. break;
  269. case RDMA_PS_IPOIB:
  270. ib_addr_get_mgid(dev_addr, &rec.mgid);
  271. ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
  272. *qkey = be32_to_cpu(rec.qkey);
  273. break;
  274. default:
  275. break;
  276. }
  277. return ret;
  278. }
  279. static int cma_acquire_dev(struct rdma_id_private *id_priv)
  280. {
  281. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  282. struct cma_device *cma_dev;
  283. union ib_gid gid;
  284. int ret = -ENODEV;
  285. switch (rdma_node_get_transport(dev_addr->dev_type)) {
  286. case RDMA_TRANSPORT_IB:
  287. ib_addr_get_sgid(dev_addr, &gid);
  288. break;
  289. case RDMA_TRANSPORT_IWARP:
  290. iw_addr_get_sgid(dev_addr, &gid);
  291. break;
  292. default:
  293. return -ENODEV;
  294. }
  295. list_for_each_entry(cma_dev, &dev_list, list) {
  296. ret = ib_find_cached_gid(cma_dev->device, &gid,
  297. &id_priv->id.port_num, NULL);
  298. if (!ret) {
  299. ret = cma_set_qkey(cma_dev->device,
  300. id_priv->id.port_num,
  301. id_priv->id.ps, dev_addr,
  302. &id_priv->qkey);
  303. if (!ret)
  304. cma_attach_to_dev(id_priv, cma_dev);
  305. break;
  306. }
  307. }
  308. return ret;
  309. }
  310. static void cma_deref_id(struct rdma_id_private *id_priv)
  311. {
  312. if (atomic_dec_and_test(&id_priv->refcount))
  313. complete(&id_priv->comp);
  314. }
  315. static int cma_disable_callback(struct rdma_id_private *id_priv,
  316. enum cma_state state)
  317. {
  318. mutex_lock(&id_priv->handler_mutex);
  319. if (id_priv->state != state) {
  320. mutex_unlock(&id_priv->handler_mutex);
  321. return -EINVAL;
  322. }
  323. return 0;
  324. }
  325. static int cma_has_cm_dev(struct rdma_id_private *id_priv)
  326. {
  327. return (id_priv->id.device && id_priv->cm_id.ib);
  328. }
  329. struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
  330. void *context, enum rdma_port_space ps)
  331. {
  332. struct rdma_id_private *id_priv;
  333. id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
  334. if (!id_priv)
  335. return ERR_PTR(-ENOMEM);
  336. id_priv->state = CMA_IDLE;
  337. id_priv->id.context = context;
  338. id_priv->id.event_handler = event_handler;
  339. id_priv->id.ps = ps;
  340. spin_lock_init(&id_priv->lock);
  341. mutex_init(&id_priv->qp_mutex);
  342. init_completion(&id_priv->comp);
  343. atomic_set(&id_priv->refcount, 1);
  344. mutex_init(&id_priv->handler_mutex);
  345. INIT_LIST_HEAD(&id_priv->listen_list);
  346. INIT_LIST_HEAD(&id_priv->mc_list);
  347. get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
  348. return &id_priv->id;
  349. }
  350. EXPORT_SYMBOL(rdma_create_id);
  351. static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  352. {
  353. struct ib_qp_attr qp_attr;
  354. int qp_attr_mask, ret;
  355. qp_attr.qp_state = IB_QPS_INIT;
  356. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  357. if (ret)
  358. return ret;
  359. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  360. if (ret)
  361. return ret;
  362. qp_attr.qp_state = IB_QPS_RTR;
  363. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  364. if (ret)
  365. return ret;
  366. qp_attr.qp_state = IB_QPS_RTS;
  367. qp_attr.sq_psn = 0;
  368. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
  369. return ret;
  370. }
  371. static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  372. {
  373. struct ib_qp_attr qp_attr;
  374. int qp_attr_mask, ret;
  375. qp_attr.qp_state = IB_QPS_INIT;
  376. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  377. if (ret)
  378. return ret;
  379. return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  380. }
  381. int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
  382. struct ib_qp_init_attr *qp_init_attr)
  383. {
  384. struct rdma_id_private *id_priv;
  385. struct ib_qp *qp;
  386. int ret;
  387. id_priv = container_of(id, struct rdma_id_private, id);
  388. if (id->device != pd->device)
  389. return -EINVAL;
  390. qp = ib_create_qp(pd, qp_init_attr);
  391. if (IS_ERR(qp))
  392. return PTR_ERR(qp);
  393. if (cma_is_ud_ps(id_priv->id.ps))
  394. ret = cma_init_ud_qp(id_priv, qp);
  395. else
  396. ret = cma_init_conn_qp(id_priv, qp);
  397. if (ret)
  398. goto err;
  399. id->qp = qp;
  400. id_priv->qp_num = qp->qp_num;
  401. id_priv->srq = (qp->srq != NULL);
  402. return 0;
  403. err:
  404. ib_destroy_qp(qp);
  405. return ret;
  406. }
  407. EXPORT_SYMBOL(rdma_create_qp);
  408. void rdma_destroy_qp(struct rdma_cm_id *id)
  409. {
  410. struct rdma_id_private *id_priv;
  411. id_priv = container_of(id, struct rdma_id_private, id);
  412. mutex_lock(&id_priv->qp_mutex);
  413. ib_destroy_qp(id_priv->id.qp);
  414. id_priv->id.qp = NULL;
  415. mutex_unlock(&id_priv->qp_mutex);
  416. }
  417. EXPORT_SYMBOL(rdma_destroy_qp);
  418. static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
  419. struct rdma_conn_param *conn_param)
  420. {
  421. struct ib_qp_attr qp_attr;
  422. int qp_attr_mask, ret;
  423. mutex_lock(&id_priv->qp_mutex);
  424. if (!id_priv->id.qp) {
  425. ret = 0;
  426. goto out;
  427. }
  428. /* Need to update QP attributes from default values. */
  429. qp_attr.qp_state = IB_QPS_INIT;
  430. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  431. if (ret)
  432. goto out;
  433. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  434. if (ret)
  435. goto out;
  436. qp_attr.qp_state = IB_QPS_RTR;
  437. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  438. if (ret)
  439. goto out;
  440. if (conn_param)
  441. qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
  442. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  443. out:
  444. mutex_unlock(&id_priv->qp_mutex);
  445. return ret;
  446. }
  447. static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
  448. struct rdma_conn_param *conn_param)
  449. {
  450. struct ib_qp_attr qp_attr;
  451. int qp_attr_mask, ret;
  452. mutex_lock(&id_priv->qp_mutex);
  453. if (!id_priv->id.qp) {
  454. ret = 0;
  455. goto out;
  456. }
  457. qp_attr.qp_state = IB_QPS_RTS;
  458. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  459. if (ret)
  460. goto out;
  461. if (conn_param)
  462. qp_attr.max_rd_atomic = conn_param->initiator_depth;
  463. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  464. out:
  465. mutex_unlock(&id_priv->qp_mutex);
  466. return ret;
  467. }
  468. static int cma_modify_qp_err(struct rdma_id_private *id_priv)
  469. {
  470. struct ib_qp_attr qp_attr;
  471. int ret;
  472. mutex_lock(&id_priv->qp_mutex);
  473. if (!id_priv->id.qp) {
  474. ret = 0;
  475. goto out;
  476. }
  477. qp_attr.qp_state = IB_QPS_ERR;
  478. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
  479. out:
  480. mutex_unlock(&id_priv->qp_mutex);
  481. return ret;
  482. }
  483. static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
  484. struct ib_qp_attr *qp_attr, int *qp_attr_mask)
  485. {
  486. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  487. int ret;
  488. ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
  489. ib_addr_get_pkey(dev_addr),
  490. &qp_attr->pkey_index);
  491. if (ret)
  492. return ret;
  493. qp_attr->port_num = id_priv->id.port_num;
  494. *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
  495. if (cma_is_ud_ps(id_priv->id.ps)) {
  496. qp_attr->qkey = id_priv->qkey;
  497. *qp_attr_mask |= IB_QP_QKEY;
  498. } else {
  499. qp_attr->qp_access_flags = 0;
  500. *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
  501. }
  502. return 0;
  503. }
  504. int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
  505. int *qp_attr_mask)
  506. {
  507. struct rdma_id_private *id_priv;
  508. int ret = 0;
  509. id_priv = container_of(id, struct rdma_id_private, id);
  510. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  511. case RDMA_TRANSPORT_IB:
  512. if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
  513. ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
  514. else
  515. ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
  516. qp_attr_mask);
  517. if (qp_attr->qp_state == IB_QPS_RTR)
  518. qp_attr->rq_psn = id_priv->seq_num;
  519. break;
  520. case RDMA_TRANSPORT_IWARP:
  521. if (!id_priv->cm_id.iw) {
  522. qp_attr->qp_access_flags = 0;
  523. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
  524. } else
  525. ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
  526. qp_attr_mask);
  527. break;
  528. default:
  529. ret = -ENOSYS;
  530. break;
  531. }
  532. return ret;
  533. }
  534. EXPORT_SYMBOL(rdma_init_qp_attr);
  535. static inline int cma_zero_addr(struct sockaddr *addr)
  536. {
  537. struct in6_addr *ip6;
  538. if (addr->sa_family == AF_INET)
  539. return ipv4_is_zeronet(
  540. ((struct sockaddr_in *)addr)->sin_addr.s_addr);
  541. else {
  542. ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
  543. return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
  544. ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
  545. }
  546. }
  547. static inline int cma_loopback_addr(struct sockaddr *addr)
  548. {
  549. if (addr->sa_family == AF_INET)
  550. return ipv4_is_loopback(
  551. ((struct sockaddr_in *) addr)->sin_addr.s_addr);
  552. else
  553. return ipv6_addr_loopback(
  554. &((struct sockaddr_in6 *) addr)->sin6_addr);
  555. }
  556. static inline int cma_any_addr(struct sockaddr *addr)
  557. {
  558. return cma_zero_addr(addr) || cma_loopback_addr(addr);
  559. }
  560. static inline __be16 cma_port(struct sockaddr *addr)
  561. {
  562. if (addr->sa_family == AF_INET)
  563. return ((struct sockaddr_in *) addr)->sin_port;
  564. else
  565. return ((struct sockaddr_in6 *) addr)->sin6_port;
  566. }
  567. static inline int cma_any_port(struct sockaddr *addr)
  568. {
  569. return !cma_port(addr);
  570. }
  571. static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
  572. u8 *ip_ver, __be16 *port,
  573. union cma_ip_addr **src, union cma_ip_addr **dst)
  574. {
  575. switch (ps) {
  576. case RDMA_PS_SDP:
  577. if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
  578. SDP_MAJ_VERSION)
  579. return -EINVAL;
  580. *ip_ver = sdp_get_ip_ver(hdr);
  581. *port = ((struct sdp_hh *) hdr)->port;
  582. *src = &((struct sdp_hh *) hdr)->src_addr;
  583. *dst = &((struct sdp_hh *) hdr)->dst_addr;
  584. break;
  585. default:
  586. if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
  587. return -EINVAL;
  588. *ip_ver = cma_get_ip_ver(hdr);
  589. *port = ((struct cma_hdr *) hdr)->port;
  590. *src = &((struct cma_hdr *) hdr)->src_addr;
  591. *dst = &((struct cma_hdr *) hdr)->dst_addr;
  592. break;
  593. }
  594. if (*ip_ver != 4 && *ip_ver != 6)
  595. return -EINVAL;
  596. return 0;
  597. }
  598. static void cma_save_net_info(struct rdma_addr *addr,
  599. struct rdma_addr *listen_addr,
  600. u8 ip_ver, __be16 port,
  601. union cma_ip_addr *src, union cma_ip_addr *dst)
  602. {
  603. struct sockaddr_in *listen4, *ip4;
  604. struct sockaddr_in6 *listen6, *ip6;
  605. switch (ip_ver) {
  606. case 4:
  607. listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
  608. ip4 = (struct sockaddr_in *) &addr->src_addr;
  609. ip4->sin_family = listen4->sin_family;
  610. ip4->sin_addr.s_addr = dst->ip4.addr;
  611. ip4->sin_port = listen4->sin_port;
  612. ip4 = (struct sockaddr_in *) &addr->dst_addr;
  613. ip4->sin_family = listen4->sin_family;
  614. ip4->sin_addr.s_addr = src->ip4.addr;
  615. ip4->sin_port = port;
  616. break;
  617. case 6:
  618. listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
  619. ip6 = (struct sockaddr_in6 *) &addr->src_addr;
  620. ip6->sin6_family = listen6->sin6_family;
  621. ip6->sin6_addr = dst->ip6;
  622. ip6->sin6_port = listen6->sin6_port;
  623. ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
  624. ip6->sin6_family = listen6->sin6_family;
  625. ip6->sin6_addr = src->ip6;
  626. ip6->sin6_port = port;
  627. break;
  628. default:
  629. break;
  630. }
  631. }
  632. static inline int cma_user_data_offset(enum rdma_port_space ps)
  633. {
  634. switch (ps) {
  635. case RDMA_PS_SDP:
  636. return 0;
  637. default:
  638. return sizeof(struct cma_hdr);
  639. }
  640. }
  641. static void cma_cancel_route(struct rdma_id_private *id_priv)
  642. {
  643. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  644. case RDMA_TRANSPORT_IB:
  645. if (id_priv->query)
  646. ib_sa_cancel_query(id_priv->query_id, id_priv->query);
  647. break;
  648. default:
  649. break;
  650. }
  651. }
  652. static void cma_cancel_listens(struct rdma_id_private *id_priv)
  653. {
  654. struct rdma_id_private *dev_id_priv;
  655. /*
  656. * Remove from listen_any_list to prevent added devices from spawning
  657. * additional listen requests.
  658. */
  659. mutex_lock(&lock);
  660. list_del(&id_priv->list);
  661. while (!list_empty(&id_priv->listen_list)) {
  662. dev_id_priv = list_entry(id_priv->listen_list.next,
  663. struct rdma_id_private, listen_list);
  664. /* sync with device removal to avoid duplicate destruction */
  665. list_del_init(&dev_id_priv->list);
  666. list_del(&dev_id_priv->listen_list);
  667. mutex_unlock(&lock);
  668. rdma_destroy_id(&dev_id_priv->id);
  669. mutex_lock(&lock);
  670. }
  671. mutex_unlock(&lock);
  672. }
  673. static void cma_cancel_operation(struct rdma_id_private *id_priv,
  674. enum cma_state state)
  675. {
  676. switch (state) {
  677. case CMA_ADDR_QUERY:
  678. rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
  679. break;
  680. case CMA_ROUTE_QUERY:
  681. cma_cancel_route(id_priv);
  682. break;
  683. case CMA_LISTEN:
  684. if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
  685. && !id_priv->cma_dev)
  686. cma_cancel_listens(id_priv);
  687. break;
  688. default:
  689. break;
  690. }
  691. }
  692. static void cma_release_port(struct rdma_id_private *id_priv)
  693. {
  694. struct rdma_bind_list *bind_list = id_priv->bind_list;
  695. if (!bind_list)
  696. return;
  697. mutex_lock(&lock);
  698. hlist_del(&id_priv->node);
  699. if (hlist_empty(&bind_list->owners)) {
  700. idr_remove(bind_list->ps, bind_list->port);
  701. kfree(bind_list);
  702. }
  703. mutex_unlock(&lock);
  704. }
  705. static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
  706. {
  707. struct cma_multicast *mc;
  708. while (!list_empty(&id_priv->mc_list)) {
  709. mc = container_of(id_priv->mc_list.next,
  710. struct cma_multicast, list);
  711. list_del(&mc->list);
  712. ib_sa_free_multicast(mc->multicast.ib);
  713. kfree(mc);
  714. }
  715. }
  716. void rdma_destroy_id(struct rdma_cm_id *id)
  717. {
  718. struct rdma_id_private *id_priv;
  719. enum cma_state state;
  720. id_priv = container_of(id, struct rdma_id_private, id);
  721. state = cma_exch(id_priv, CMA_DESTROYING);
  722. cma_cancel_operation(id_priv, state);
  723. mutex_lock(&lock);
  724. if (id_priv->cma_dev) {
  725. mutex_unlock(&lock);
  726. switch (rdma_node_get_transport(id->device->node_type)) {
  727. case RDMA_TRANSPORT_IB:
  728. if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
  729. ib_destroy_cm_id(id_priv->cm_id.ib);
  730. break;
  731. case RDMA_TRANSPORT_IWARP:
  732. if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
  733. iw_destroy_cm_id(id_priv->cm_id.iw);
  734. break;
  735. default:
  736. break;
  737. }
  738. cma_leave_mc_groups(id_priv);
  739. mutex_lock(&lock);
  740. cma_detach_from_dev(id_priv);
  741. }
  742. mutex_unlock(&lock);
  743. cma_release_port(id_priv);
  744. cma_deref_id(id_priv);
  745. wait_for_completion(&id_priv->comp);
  746. if (id_priv->internal_id)
  747. cma_deref_id(id_priv->id.context);
  748. kfree(id_priv->id.route.path_rec);
  749. kfree(id_priv);
  750. }
  751. EXPORT_SYMBOL(rdma_destroy_id);
  752. static int cma_rep_recv(struct rdma_id_private *id_priv)
  753. {
  754. int ret;
  755. ret = cma_modify_qp_rtr(id_priv, NULL);
  756. if (ret)
  757. goto reject;
  758. ret = cma_modify_qp_rts(id_priv, NULL);
  759. if (ret)
  760. goto reject;
  761. ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
  762. if (ret)
  763. goto reject;
  764. return 0;
  765. reject:
  766. cma_modify_qp_err(id_priv);
  767. ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
  768. NULL, 0, NULL, 0);
  769. return ret;
  770. }
  771. static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
  772. {
  773. if (id_priv->id.ps == RDMA_PS_SDP &&
  774. sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
  775. SDP_MAJ_VERSION)
  776. return -EINVAL;
  777. return 0;
  778. }
  779. static void cma_set_rep_event_data(struct rdma_cm_event *event,
  780. struct ib_cm_rep_event_param *rep_data,
  781. void *private_data)
  782. {
  783. event->param.conn.private_data = private_data;
  784. event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
  785. event->param.conn.responder_resources = rep_data->responder_resources;
  786. event->param.conn.initiator_depth = rep_data->initiator_depth;
  787. event->param.conn.flow_control = rep_data->flow_control;
  788. event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
  789. event->param.conn.srq = rep_data->srq;
  790. event->param.conn.qp_num = rep_data->remote_qpn;
  791. }
  792. static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  793. {
  794. struct rdma_id_private *id_priv = cm_id->context;
  795. struct rdma_cm_event event;
  796. int ret = 0;
  797. if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
  798. cma_disable_callback(id_priv, CMA_CONNECT)) ||
  799. (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
  800. cma_disable_callback(id_priv, CMA_DISCONNECT)))
  801. return 0;
  802. memset(&event, 0, sizeof event);
  803. switch (ib_event->event) {
  804. case IB_CM_REQ_ERROR:
  805. case IB_CM_REP_ERROR:
  806. event.event = RDMA_CM_EVENT_UNREACHABLE;
  807. event.status = -ETIMEDOUT;
  808. break;
  809. case IB_CM_REP_RECEIVED:
  810. event.status = cma_verify_rep(id_priv, ib_event->private_data);
  811. if (event.status)
  812. event.event = RDMA_CM_EVENT_CONNECT_ERROR;
  813. else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
  814. event.status = cma_rep_recv(id_priv);
  815. event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
  816. RDMA_CM_EVENT_ESTABLISHED;
  817. } else
  818. event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
  819. cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
  820. ib_event->private_data);
  821. break;
  822. case IB_CM_RTU_RECEIVED:
  823. case IB_CM_USER_ESTABLISHED:
  824. event.event = RDMA_CM_EVENT_ESTABLISHED;
  825. break;
  826. case IB_CM_DREQ_ERROR:
  827. event.status = -ETIMEDOUT; /* fall through */
  828. case IB_CM_DREQ_RECEIVED:
  829. case IB_CM_DREP_RECEIVED:
  830. if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
  831. goto out;
  832. event.event = RDMA_CM_EVENT_DISCONNECTED;
  833. break;
  834. case IB_CM_TIMEWAIT_EXIT:
  835. event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
  836. break;
  837. case IB_CM_MRA_RECEIVED:
  838. /* ignore event */
  839. goto out;
  840. case IB_CM_REJ_RECEIVED:
  841. cma_modify_qp_err(id_priv);
  842. event.status = ib_event->param.rej_rcvd.reason;
  843. event.event = RDMA_CM_EVENT_REJECTED;
  844. event.param.conn.private_data = ib_event->private_data;
  845. event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
  846. break;
  847. default:
  848. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  849. ib_event->event);
  850. goto out;
  851. }
  852. ret = id_priv->id.event_handler(&id_priv->id, &event);
  853. if (ret) {
  854. /* Destroy the CM ID by returning a non-zero value. */
  855. id_priv->cm_id.ib = NULL;
  856. cma_exch(id_priv, CMA_DESTROYING);
  857. mutex_unlock(&id_priv->handler_mutex);
  858. rdma_destroy_id(&id_priv->id);
  859. return ret;
  860. }
  861. out:
  862. mutex_unlock(&id_priv->handler_mutex);
  863. return ret;
  864. }
  865. static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
  866. struct ib_cm_event *ib_event)
  867. {
  868. struct rdma_id_private *id_priv;
  869. struct rdma_cm_id *id;
  870. struct rdma_route *rt;
  871. union cma_ip_addr *src, *dst;
  872. __be16 port;
  873. u8 ip_ver;
  874. int ret;
  875. if (cma_get_net_info(ib_event->private_data, listen_id->ps,
  876. &ip_ver, &port, &src, &dst))
  877. goto err;
  878. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  879. listen_id->ps);
  880. if (IS_ERR(id))
  881. goto err;
  882. cma_save_net_info(&id->route.addr, &listen_id->route.addr,
  883. ip_ver, port, src, dst);
  884. rt = &id->route;
  885. rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
  886. rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
  887. GFP_KERNEL);
  888. if (!rt->path_rec)
  889. goto destroy_id;
  890. rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
  891. if (rt->num_paths == 2)
  892. rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
  893. ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
  894. ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
  895. &id->route.addr.dev_addr);
  896. if (ret)
  897. goto destroy_id;
  898. id_priv = container_of(id, struct rdma_id_private, id);
  899. id_priv->state = CMA_CONNECT;
  900. return id_priv;
  901. destroy_id:
  902. rdma_destroy_id(id);
  903. err:
  904. return NULL;
  905. }
  906. static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
  907. struct ib_cm_event *ib_event)
  908. {
  909. struct rdma_id_private *id_priv;
  910. struct rdma_cm_id *id;
  911. union cma_ip_addr *src, *dst;
  912. __be16 port;
  913. u8 ip_ver;
  914. int ret;
  915. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  916. listen_id->ps);
  917. if (IS_ERR(id))
  918. return NULL;
  919. if (cma_get_net_info(ib_event->private_data, listen_id->ps,
  920. &ip_ver, &port, &src, &dst))
  921. goto err;
  922. cma_save_net_info(&id->route.addr, &listen_id->route.addr,
  923. ip_ver, port, src, dst);
  924. ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
  925. &id->route.addr.dev_addr);
  926. if (ret)
  927. goto err;
  928. id_priv = container_of(id, struct rdma_id_private, id);
  929. id_priv->state = CMA_CONNECT;
  930. return id_priv;
  931. err:
  932. rdma_destroy_id(id);
  933. return NULL;
  934. }
  935. static void cma_set_req_event_data(struct rdma_cm_event *event,
  936. struct ib_cm_req_event_param *req_data,
  937. void *private_data, int offset)
  938. {
  939. event->param.conn.private_data = private_data + offset;
  940. event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
  941. event->param.conn.responder_resources = req_data->responder_resources;
  942. event->param.conn.initiator_depth = req_data->initiator_depth;
  943. event->param.conn.flow_control = req_data->flow_control;
  944. event->param.conn.retry_count = req_data->retry_count;
  945. event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
  946. event->param.conn.srq = req_data->srq;
  947. event->param.conn.qp_num = req_data->remote_qpn;
  948. }
  949. static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  950. {
  951. struct rdma_id_private *listen_id, *conn_id;
  952. struct rdma_cm_event event;
  953. int offset, ret;
  954. listen_id = cm_id->context;
  955. if (cma_disable_callback(listen_id, CMA_LISTEN))
  956. return -ECONNABORTED;
  957. memset(&event, 0, sizeof event);
  958. offset = cma_user_data_offset(listen_id->id.ps);
  959. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  960. if (cma_is_ud_ps(listen_id->id.ps)) {
  961. conn_id = cma_new_udp_id(&listen_id->id, ib_event);
  962. event.param.ud.private_data = ib_event->private_data + offset;
  963. event.param.ud.private_data_len =
  964. IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
  965. } else {
  966. conn_id = cma_new_conn_id(&listen_id->id, ib_event);
  967. cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
  968. ib_event->private_data, offset);
  969. }
  970. if (!conn_id) {
  971. ret = -ENOMEM;
  972. goto out;
  973. }
  974. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  975. mutex_lock(&lock);
  976. ret = cma_acquire_dev(conn_id);
  977. mutex_unlock(&lock);
  978. if (ret)
  979. goto release_conn_id;
  980. conn_id->cm_id.ib = cm_id;
  981. cm_id->context = conn_id;
  982. cm_id->cm_handler = cma_ib_handler;
  983. ret = conn_id->id.event_handler(&conn_id->id, &event);
  984. if (!ret) {
  985. /*
  986. * Acquire mutex to prevent user executing rdma_destroy_id()
  987. * while we're accessing the cm_id.
  988. */
  989. mutex_lock(&lock);
  990. if (cma_comp(conn_id, CMA_CONNECT) &&
  991. !cma_is_ud_ps(conn_id->id.ps))
  992. ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
  993. mutex_unlock(&lock);
  994. mutex_unlock(&conn_id->handler_mutex);
  995. goto out;
  996. }
  997. /* Destroy the CM ID by returning a non-zero value. */
  998. conn_id->cm_id.ib = NULL;
  999. release_conn_id:
  1000. cma_exch(conn_id, CMA_DESTROYING);
  1001. mutex_unlock(&conn_id->handler_mutex);
  1002. rdma_destroy_id(&conn_id->id);
  1003. out:
  1004. mutex_unlock(&listen_id->handler_mutex);
  1005. return ret;
  1006. }
  1007. static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
  1008. {
  1009. return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
  1010. }
  1011. static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
  1012. struct ib_cm_compare_data *compare)
  1013. {
  1014. struct cma_hdr *cma_data, *cma_mask;
  1015. struct sdp_hh *sdp_data, *sdp_mask;
  1016. __be32 ip4_addr;
  1017. struct in6_addr ip6_addr;
  1018. memset(compare, 0, sizeof *compare);
  1019. cma_data = (void *) compare->data;
  1020. cma_mask = (void *) compare->mask;
  1021. sdp_data = (void *) compare->data;
  1022. sdp_mask = (void *) compare->mask;
  1023. switch (addr->sa_family) {
  1024. case AF_INET:
  1025. ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
  1026. if (ps == RDMA_PS_SDP) {
  1027. sdp_set_ip_ver(sdp_data, 4);
  1028. sdp_set_ip_ver(sdp_mask, 0xF);
  1029. sdp_data->dst_addr.ip4.addr = ip4_addr;
  1030. sdp_mask->dst_addr.ip4.addr = htonl(~0);
  1031. } else {
  1032. cma_set_ip_ver(cma_data, 4);
  1033. cma_set_ip_ver(cma_mask, 0xF);
  1034. cma_data->dst_addr.ip4.addr = ip4_addr;
  1035. cma_mask->dst_addr.ip4.addr = htonl(~0);
  1036. }
  1037. break;
  1038. case AF_INET6:
  1039. ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
  1040. if (ps == RDMA_PS_SDP) {
  1041. sdp_set_ip_ver(sdp_data, 6);
  1042. sdp_set_ip_ver(sdp_mask, 0xF);
  1043. sdp_data->dst_addr.ip6 = ip6_addr;
  1044. memset(&sdp_mask->dst_addr.ip6, 0xFF,
  1045. sizeof sdp_mask->dst_addr.ip6);
  1046. } else {
  1047. cma_set_ip_ver(cma_data, 6);
  1048. cma_set_ip_ver(cma_mask, 0xF);
  1049. cma_data->dst_addr.ip6 = ip6_addr;
  1050. memset(&cma_mask->dst_addr.ip6, 0xFF,
  1051. sizeof cma_mask->dst_addr.ip6);
  1052. }
  1053. break;
  1054. default:
  1055. break;
  1056. }
  1057. }
  1058. static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
  1059. {
  1060. struct rdma_id_private *id_priv = iw_id->context;
  1061. struct rdma_cm_event event;
  1062. struct sockaddr_in *sin;
  1063. int ret = 0;
  1064. if (cma_disable_callback(id_priv, CMA_CONNECT))
  1065. return 0;
  1066. memset(&event, 0, sizeof event);
  1067. switch (iw_event->event) {
  1068. case IW_CM_EVENT_CLOSE:
  1069. event.event = RDMA_CM_EVENT_DISCONNECTED;
  1070. break;
  1071. case IW_CM_EVENT_CONNECT_REPLY:
  1072. sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
  1073. *sin = iw_event->local_addr;
  1074. sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
  1075. *sin = iw_event->remote_addr;
  1076. switch (iw_event->status) {
  1077. case 0:
  1078. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1079. break;
  1080. case -ECONNRESET:
  1081. case -ECONNREFUSED:
  1082. event.event = RDMA_CM_EVENT_REJECTED;
  1083. break;
  1084. case -ETIMEDOUT:
  1085. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1086. break;
  1087. default:
  1088. event.event = RDMA_CM_EVENT_CONNECT_ERROR;
  1089. break;
  1090. }
  1091. break;
  1092. case IW_CM_EVENT_ESTABLISHED:
  1093. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1094. break;
  1095. default:
  1096. BUG_ON(1);
  1097. }
  1098. event.status = iw_event->status;
  1099. event.param.conn.private_data = iw_event->private_data;
  1100. event.param.conn.private_data_len = iw_event->private_data_len;
  1101. ret = id_priv->id.event_handler(&id_priv->id, &event);
  1102. if (ret) {
  1103. /* Destroy the CM ID by returning a non-zero value. */
  1104. id_priv->cm_id.iw = NULL;
  1105. cma_exch(id_priv, CMA_DESTROYING);
  1106. mutex_unlock(&id_priv->handler_mutex);
  1107. rdma_destroy_id(&id_priv->id);
  1108. return ret;
  1109. }
  1110. mutex_unlock(&id_priv->handler_mutex);
  1111. return ret;
  1112. }
  1113. static int iw_conn_req_handler(struct iw_cm_id *cm_id,
  1114. struct iw_cm_event *iw_event)
  1115. {
  1116. struct rdma_cm_id *new_cm_id;
  1117. struct rdma_id_private *listen_id, *conn_id;
  1118. struct sockaddr_in *sin;
  1119. struct net_device *dev = NULL;
  1120. struct rdma_cm_event event;
  1121. int ret;
  1122. struct ib_device_attr attr;
  1123. listen_id = cm_id->context;
  1124. if (cma_disable_callback(listen_id, CMA_LISTEN))
  1125. return -ECONNABORTED;
  1126. /* Create a new RDMA id for the new IW CM ID */
  1127. new_cm_id = rdma_create_id(listen_id->id.event_handler,
  1128. listen_id->id.context,
  1129. RDMA_PS_TCP);
  1130. if (IS_ERR(new_cm_id)) {
  1131. ret = -ENOMEM;
  1132. goto out;
  1133. }
  1134. conn_id = container_of(new_cm_id, struct rdma_id_private, id);
  1135. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1136. conn_id->state = CMA_CONNECT;
  1137. dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
  1138. if (!dev) {
  1139. ret = -EADDRNOTAVAIL;
  1140. mutex_unlock(&conn_id->handler_mutex);
  1141. rdma_destroy_id(new_cm_id);
  1142. goto out;
  1143. }
  1144. ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
  1145. if (ret) {
  1146. mutex_unlock(&conn_id->handler_mutex);
  1147. rdma_destroy_id(new_cm_id);
  1148. goto out;
  1149. }
  1150. mutex_lock(&lock);
  1151. ret = cma_acquire_dev(conn_id);
  1152. mutex_unlock(&lock);
  1153. if (ret) {
  1154. mutex_unlock(&conn_id->handler_mutex);
  1155. rdma_destroy_id(new_cm_id);
  1156. goto out;
  1157. }
  1158. conn_id->cm_id.iw = cm_id;
  1159. cm_id->context = conn_id;
  1160. cm_id->cm_handler = cma_iw_handler;
  1161. sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
  1162. *sin = iw_event->local_addr;
  1163. sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
  1164. *sin = iw_event->remote_addr;
  1165. ret = ib_query_device(conn_id->id.device, &attr);
  1166. if (ret) {
  1167. mutex_unlock(&conn_id->handler_mutex);
  1168. rdma_destroy_id(new_cm_id);
  1169. goto out;
  1170. }
  1171. memset(&event, 0, sizeof event);
  1172. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1173. event.param.conn.private_data = iw_event->private_data;
  1174. event.param.conn.private_data_len = iw_event->private_data_len;
  1175. event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
  1176. event.param.conn.responder_resources = attr.max_qp_rd_atom;
  1177. ret = conn_id->id.event_handler(&conn_id->id, &event);
  1178. if (ret) {
  1179. /* User wants to destroy the CM ID */
  1180. conn_id->cm_id.iw = NULL;
  1181. cma_exch(conn_id, CMA_DESTROYING);
  1182. mutex_unlock(&conn_id->handler_mutex);
  1183. rdma_destroy_id(&conn_id->id);
  1184. goto out;
  1185. }
  1186. mutex_unlock(&conn_id->handler_mutex);
  1187. out:
  1188. if (dev)
  1189. dev_put(dev);
  1190. mutex_unlock(&listen_id->handler_mutex);
  1191. return ret;
  1192. }
  1193. static int cma_ib_listen(struct rdma_id_private *id_priv)
  1194. {
  1195. struct ib_cm_compare_data compare_data;
  1196. struct sockaddr *addr;
  1197. __be64 svc_id;
  1198. int ret;
  1199. id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
  1200. id_priv);
  1201. if (IS_ERR(id_priv->cm_id.ib))
  1202. return PTR_ERR(id_priv->cm_id.ib);
  1203. addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
  1204. svc_id = cma_get_service_id(id_priv->id.ps, addr);
  1205. if (cma_any_addr(addr))
  1206. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
  1207. else {
  1208. cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
  1209. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
  1210. }
  1211. if (ret) {
  1212. ib_destroy_cm_id(id_priv->cm_id.ib);
  1213. id_priv->cm_id.ib = NULL;
  1214. }
  1215. return ret;
  1216. }
  1217. static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
  1218. {
  1219. int ret;
  1220. struct sockaddr_in *sin;
  1221. id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
  1222. iw_conn_req_handler,
  1223. id_priv);
  1224. if (IS_ERR(id_priv->cm_id.iw))
  1225. return PTR_ERR(id_priv->cm_id.iw);
  1226. sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
  1227. id_priv->cm_id.iw->local_addr = *sin;
  1228. ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
  1229. if (ret) {
  1230. iw_destroy_cm_id(id_priv->cm_id.iw);
  1231. id_priv->cm_id.iw = NULL;
  1232. }
  1233. return ret;
  1234. }
  1235. static int cma_listen_handler(struct rdma_cm_id *id,
  1236. struct rdma_cm_event *event)
  1237. {
  1238. struct rdma_id_private *id_priv = id->context;
  1239. id->context = id_priv->id.context;
  1240. id->event_handler = id_priv->id.event_handler;
  1241. return id_priv->id.event_handler(id, event);
  1242. }
  1243. static void cma_listen_on_dev(struct rdma_id_private *id_priv,
  1244. struct cma_device *cma_dev)
  1245. {
  1246. struct rdma_id_private *dev_id_priv;
  1247. struct rdma_cm_id *id;
  1248. int ret;
  1249. id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
  1250. if (IS_ERR(id))
  1251. return;
  1252. dev_id_priv = container_of(id, struct rdma_id_private, id);
  1253. dev_id_priv->state = CMA_ADDR_BOUND;
  1254. memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
  1255. ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
  1256. cma_attach_to_dev(dev_id_priv, cma_dev);
  1257. list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
  1258. atomic_inc(&id_priv->refcount);
  1259. dev_id_priv->internal_id = 1;
  1260. ret = rdma_listen(id, id_priv->backlog);
  1261. if (ret)
  1262. printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
  1263. "listening on device %s\n", ret, cma_dev->device->name);
  1264. }
  1265. static void cma_listen_on_all(struct rdma_id_private *id_priv)
  1266. {
  1267. struct cma_device *cma_dev;
  1268. mutex_lock(&lock);
  1269. list_add_tail(&id_priv->list, &listen_any_list);
  1270. list_for_each_entry(cma_dev, &dev_list, list)
  1271. cma_listen_on_dev(id_priv, cma_dev);
  1272. mutex_unlock(&lock);
  1273. }
  1274. static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
  1275. {
  1276. struct sockaddr_storage addr_in;
  1277. memset(&addr_in, 0, sizeof addr_in);
  1278. addr_in.ss_family = af;
  1279. return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
  1280. }
  1281. int rdma_listen(struct rdma_cm_id *id, int backlog)
  1282. {
  1283. struct rdma_id_private *id_priv;
  1284. int ret;
  1285. id_priv = container_of(id, struct rdma_id_private, id);
  1286. if (id_priv->state == CMA_IDLE) {
  1287. ret = cma_bind_any(id, AF_INET);
  1288. if (ret)
  1289. return ret;
  1290. }
  1291. if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
  1292. return -EINVAL;
  1293. id_priv->backlog = backlog;
  1294. if (id->device) {
  1295. switch (rdma_node_get_transport(id->device->node_type)) {
  1296. case RDMA_TRANSPORT_IB:
  1297. ret = cma_ib_listen(id_priv);
  1298. if (ret)
  1299. goto err;
  1300. break;
  1301. case RDMA_TRANSPORT_IWARP:
  1302. ret = cma_iw_listen(id_priv, backlog);
  1303. if (ret)
  1304. goto err;
  1305. break;
  1306. default:
  1307. ret = -ENOSYS;
  1308. goto err;
  1309. }
  1310. } else
  1311. cma_listen_on_all(id_priv);
  1312. return 0;
  1313. err:
  1314. id_priv->backlog = 0;
  1315. cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
  1316. return ret;
  1317. }
  1318. EXPORT_SYMBOL(rdma_listen);
  1319. void rdma_set_service_type(struct rdma_cm_id *id, int tos)
  1320. {
  1321. struct rdma_id_private *id_priv;
  1322. id_priv = container_of(id, struct rdma_id_private, id);
  1323. id_priv->tos = (u8) tos;
  1324. }
  1325. EXPORT_SYMBOL(rdma_set_service_type);
  1326. static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
  1327. void *context)
  1328. {
  1329. struct cma_work *work = context;
  1330. struct rdma_route *route;
  1331. route = &work->id->id.route;
  1332. if (!status) {
  1333. route->num_paths = 1;
  1334. *route->path_rec = *path_rec;
  1335. } else {
  1336. work->old_state = CMA_ROUTE_QUERY;
  1337. work->new_state = CMA_ADDR_RESOLVED;
  1338. work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
  1339. work->event.status = status;
  1340. }
  1341. queue_work(cma_wq, &work->work);
  1342. }
  1343. static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
  1344. struct cma_work *work)
  1345. {
  1346. struct rdma_addr *addr = &id_priv->id.route.addr;
  1347. struct ib_sa_path_rec path_rec;
  1348. ib_sa_comp_mask comp_mask;
  1349. struct sockaddr_in6 *sin6;
  1350. memset(&path_rec, 0, sizeof path_rec);
  1351. ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
  1352. ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
  1353. path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
  1354. path_rec.numb_path = 1;
  1355. path_rec.reversible = 1;
  1356. path_rec.service_id = cma_get_service_id(id_priv->id.ps,
  1357. (struct sockaddr *) &addr->dst_addr);
  1358. comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
  1359. IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
  1360. IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
  1361. if (addr->src_addr.ss_family == AF_INET) {
  1362. path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
  1363. comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
  1364. } else {
  1365. sin6 = (struct sockaddr_in6 *) &addr->src_addr;
  1366. path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
  1367. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  1368. }
  1369. id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
  1370. id_priv->id.port_num, &path_rec,
  1371. comp_mask, timeout_ms,
  1372. GFP_KERNEL, cma_query_handler,
  1373. work, &id_priv->query);
  1374. return (id_priv->query_id < 0) ? id_priv->query_id : 0;
  1375. }
  1376. static void cma_work_handler(struct work_struct *_work)
  1377. {
  1378. struct cma_work *work = container_of(_work, struct cma_work, work);
  1379. struct rdma_id_private *id_priv = work->id;
  1380. int destroy = 0;
  1381. mutex_lock(&id_priv->handler_mutex);
  1382. if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
  1383. goto out;
  1384. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1385. cma_exch(id_priv, CMA_DESTROYING);
  1386. destroy = 1;
  1387. }
  1388. out:
  1389. mutex_unlock(&id_priv->handler_mutex);
  1390. cma_deref_id(id_priv);
  1391. if (destroy)
  1392. rdma_destroy_id(&id_priv->id);
  1393. kfree(work);
  1394. }
  1395. static void cma_ndev_work_handler(struct work_struct *_work)
  1396. {
  1397. struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
  1398. struct rdma_id_private *id_priv = work->id;
  1399. int destroy = 0;
  1400. mutex_lock(&id_priv->handler_mutex);
  1401. if (id_priv->state == CMA_DESTROYING ||
  1402. id_priv->state == CMA_DEVICE_REMOVAL)
  1403. goto out;
  1404. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1405. cma_exch(id_priv, CMA_DESTROYING);
  1406. destroy = 1;
  1407. }
  1408. out:
  1409. mutex_unlock(&id_priv->handler_mutex);
  1410. cma_deref_id(id_priv);
  1411. if (destroy)
  1412. rdma_destroy_id(&id_priv->id);
  1413. kfree(work);
  1414. }
  1415. static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
  1416. {
  1417. struct rdma_route *route = &id_priv->id.route;
  1418. struct cma_work *work;
  1419. int ret;
  1420. work = kzalloc(sizeof *work, GFP_KERNEL);
  1421. if (!work)
  1422. return -ENOMEM;
  1423. work->id = id_priv;
  1424. INIT_WORK(&work->work, cma_work_handler);
  1425. work->old_state = CMA_ROUTE_QUERY;
  1426. work->new_state = CMA_ROUTE_RESOLVED;
  1427. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1428. route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
  1429. if (!route->path_rec) {
  1430. ret = -ENOMEM;
  1431. goto err1;
  1432. }
  1433. ret = cma_query_ib_route(id_priv, timeout_ms, work);
  1434. if (ret)
  1435. goto err2;
  1436. return 0;
  1437. err2:
  1438. kfree(route->path_rec);
  1439. route->path_rec = NULL;
  1440. err1:
  1441. kfree(work);
  1442. return ret;
  1443. }
  1444. int rdma_set_ib_paths(struct rdma_cm_id *id,
  1445. struct ib_sa_path_rec *path_rec, int num_paths)
  1446. {
  1447. struct rdma_id_private *id_priv;
  1448. int ret;
  1449. id_priv = container_of(id, struct rdma_id_private, id);
  1450. if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
  1451. return -EINVAL;
  1452. id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
  1453. if (!id->route.path_rec) {
  1454. ret = -ENOMEM;
  1455. goto err;
  1456. }
  1457. memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
  1458. return 0;
  1459. err:
  1460. cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
  1461. return ret;
  1462. }
  1463. EXPORT_SYMBOL(rdma_set_ib_paths);
  1464. static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
  1465. {
  1466. struct cma_work *work;
  1467. work = kzalloc(sizeof *work, GFP_KERNEL);
  1468. if (!work)
  1469. return -ENOMEM;
  1470. work->id = id_priv;
  1471. INIT_WORK(&work->work, cma_work_handler);
  1472. work->old_state = CMA_ROUTE_QUERY;
  1473. work->new_state = CMA_ROUTE_RESOLVED;
  1474. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1475. queue_work(cma_wq, &work->work);
  1476. return 0;
  1477. }
  1478. int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
  1479. {
  1480. struct rdma_id_private *id_priv;
  1481. int ret;
  1482. id_priv = container_of(id, struct rdma_id_private, id);
  1483. if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
  1484. return -EINVAL;
  1485. atomic_inc(&id_priv->refcount);
  1486. switch (rdma_node_get_transport(id->device->node_type)) {
  1487. case RDMA_TRANSPORT_IB:
  1488. ret = cma_resolve_ib_route(id_priv, timeout_ms);
  1489. break;
  1490. case RDMA_TRANSPORT_IWARP:
  1491. ret = cma_resolve_iw_route(id_priv, timeout_ms);
  1492. break;
  1493. default:
  1494. ret = -ENOSYS;
  1495. break;
  1496. }
  1497. if (ret)
  1498. goto err;
  1499. return 0;
  1500. err:
  1501. cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
  1502. cma_deref_id(id_priv);
  1503. return ret;
  1504. }
  1505. EXPORT_SYMBOL(rdma_resolve_route);
  1506. static int cma_bind_loopback(struct rdma_id_private *id_priv)
  1507. {
  1508. struct cma_device *cma_dev;
  1509. struct ib_port_attr port_attr;
  1510. union ib_gid gid;
  1511. u16 pkey;
  1512. int ret;
  1513. u8 p;
  1514. mutex_lock(&lock);
  1515. if (list_empty(&dev_list)) {
  1516. ret = -ENODEV;
  1517. goto out;
  1518. }
  1519. list_for_each_entry(cma_dev, &dev_list, list)
  1520. for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
  1521. if (!ib_query_port(cma_dev->device, p, &port_attr) &&
  1522. port_attr.state == IB_PORT_ACTIVE)
  1523. goto port_found;
  1524. p = 1;
  1525. cma_dev = list_entry(dev_list.next, struct cma_device, list);
  1526. port_found:
  1527. ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
  1528. if (ret)
  1529. goto out;
  1530. ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
  1531. if (ret)
  1532. goto out;
  1533. ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1534. ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
  1535. id_priv->id.port_num = p;
  1536. cma_attach_to_dev(id_priv, cma_dev);
  1537. out:
  1538. mutex_unlock(&lock);
  1539. return ret;
  1540. }
  1541. static void addr_handler(int status, struct sockaddr *src_addr,
  1542. struct rdma_dev_addr *dev_addr, void *context)
  1543. {
  1544. struct rdma_id_private *id_priv = context;
  1545. struct rdma_cm_event event;
  1546. memset(&event, 0, sizeof event);
  1547. mutex_lock(&id_priv->handler_mutex);
  1548. /*
  1549. * Grab mutex to block rdma_destroy_id() from removing the device while
  1550. * we're trying to acquire it.
  1551. */
  1552. mutex_lock(&lock);
  1553. if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
  1554. mutex_unlock(&lock);
  1555. goto out;
  1556. }
  1557. if (!status && !id_priv->cma_dev)
  1558. status = cma_acquire_dev(id_priv);
  1559. mutex_unlock(&lock);
  1560. if (status) {
  1561. if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
  1562. goto out;
  1563. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  1564. event.status = status;
  1565. } else {
  1566. memcpy(&id_priv->id.route.addr.src_addr, src_addr,
  1567. ip_addr_size(src_addr));
  1568. event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1569. }
  1570. if (id_priv->id.event_handler(&id_priv->id, &event)) {
  1571. cma_exch(id_priv, CMA_DESTROYING);
  1572. mutex_unlock(&id_priv->handler_mutex);
  1573. cma_deref_id(id_priv);
  1574. rdma_destroy_id(&id_priv->id);
  1575. return;
  1576. }
  1577. out:
  1578. mutex_unlock(&id_priv->handler_mutex);
  1579. cma_deref_id(id_priv);
  1580. }
  1581. static int cma_resolve_loopback(struct rdma_id_private *id_priv)
  1582. {
  1583. struct cma_work *work;
  1584. struct sockaddr_in *src_in, *dst_in;
  1585. union ib_gid gid;
  1586. int ret;
  1587. work = kzalloc(sizeof *work, GFP_KERNEL);
  1588. if (!work)
  1589. return -ENOMEM;
  1590. if (!id_priv->cma_dev) {
  1591. ret = cma_bind_loopback(id_priv);
  1592. if (ret)
  1593. goto err;
  1594. }
  1595. ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1596. ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
  1597. if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
  1598. src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
  1599. dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
  1600. src_in->sin_family = dst_in->sin_family;
  1601. src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
  1602. }
  1603. work->id = id_priv;
  1604. INIT_WORK(&work->work, cma_work_handler);
  1605. work->old_state = CMA_ADDR_QUERY;
  1606. work->new_state = CMA_ADDR_RESOLVED;
  1607. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1608. queue_work(cma_wq, &work->work);
  1609. return 0;
  1610. err:
  1611. kfree(work);
  1612. return ret;
  1613. }
  1614. static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1615. struct sockaddr *dst_addr)
  1616. {
  1617. if (src_addr && src_addr->sa_family)
  1618. return rdma_bind_addr(id, src_addr);
  1619. else
  1620. return cma_bind_any(id, dst_addr->sa_family);
  1621. }
  1622. int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1623. struct sockaddr *dst_addr, int timeout_ms)
  1624. {
  1625. struct rdma_id_private *id_priv;
  1626. int ret;
  1627. id_priv = container_of(id, struct rdma_id_private, id);
  1628. if (id_priv->state == CMA_IDLE) {
  1629. ret = cma_bind_addr(id, src_addr, dst_addr);
  1630. if (ret)
  1631. return ret;
  1632. }
  1633. if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
  1634. return -EINVAL;
  1635. atomic_inc(&id_priv->refcount);
  1636. memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
  1637. if (cma_any_addr(dst_addr))
  1638. ret = cma_resolve_loopback(id_priv);
  1639. else
  1640. ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
  1641. dst_addr, &id->route.addr.dev_addr,
  1642. timeout_ms, addr_handler, id_priv);
  1643. if (ret)
  1644. goto err;
  1645. return 0;
  1646. err:
  1647. cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
  1648. cma_deref_id(id_priv);
  1649. return ret;
  1650. }
  1651. EXPORT_SYMBOL(rdma_resolve_addr);
  1652. static void cma_bind_port(struct rdma_bind_list *bind_list,
  1653. struct rdma_id_private *id_priv)
  1654. {
  1655. struct sockaddr_in *sin;
  1656. sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
  1657. sin->sin_port = htons(bind_list->port);
  1658. id_priv->bind_list = bind_list;
  1659. hlist_add_head(&id_priv->node, &bind_list->owners);
  1660. }
  1661. static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
  1662. unsigned short snum)
  1663. {
  1664. struct rdma_bind_list *bind_list;
  1665. int port, ret;
  1666. bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
  1667. if (!bind_list)
  1668. return -ENOMEM;
  1669. do {
  1670. ret = idr_get_new_above(ps, bind_list, snum, &port);
  1671. } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
  1672. if (ret)
  1673. goto err1;
  1674. if (port != snum) {
  1675. ret = -EADDRNOTAVAIL;
  1676. goto err2;
  1677. }
  1678. bind_list->ps = ps;
  1679. bind_list->port = (unsigned short) port;
  1680. cma_bind_port(bind_list, id_priv);
  1681. return 0;
  1682. err2:
  1683. idr_remove(ps, port);
  1684. err1:
  1685. kfree(bind_list);
  1686. return ret;
  1687. }
  1688. static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
  1689. {
  1690. struct rdma_bind_list *bind_list;
  1691. int port, ret, low, high;
  1692. bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
  1693. if (!bind_list)
  1694. return -ENOMEM;
  1695. retry:
  1696. /* FIXME: add proper port randomization per like inet_csk_get_port */
  1697. do {
  1698. ret = idr_get_new_above(ps, bind_list, next_port, &port);
  1699. } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
  1700. if (ret)
  1701. goto err1;
  1702. inet_get_local_port_range(&low, &high);
  1703. if (port > high) {
  1704. if (next_port != low) {
  1705. idr_remove(ps, port);
  1706. next_port = low;
  1707. goto retry;
  1708. }
  1709. ret = -EADDRNOTAVAIL;
  1710. goto err2;
  1711. }
  1712. if (port == high)
  1713. next_port = low;
  1714. else
  1715. next_port = port + 1;
  1716. bind_list->ps = ps;
  1717. bind_list->port = (unsigned short) port;
  1718. cma_bind_port(bind_list, id_priv);
  1719. return 0;
  1720. err2:
  1721. idr_remove(ps, port);
  1722. err1:
  1723. kfree(bind_list);
  1724. return ret;
  1725. }
  1726. static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
  1727. {
  1728. struct rdma_id_private *cur_id;
  1729. struct sockaddr_in *sin, *cur_sin;
  1730. struct rdma_bind_list *bind_list;
  1731. struct hlist_node *node;
  1732. unsigned short snum;
  1733. sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
  1734. snum = ntohs(sin->sin_port);
  1735. if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
  1736. return -EACCES;
  1737. bind_list = idr_find(ps, snum);
  1738. if (!bind_list)
  1739. return cma_alloc_port(ps, id_priv, snum);
  1740. /*
  1741. * We don't support binding to any address if anyone is bound to
  1742. * a specific address on the same port.
  1743. */
  1744. if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
  1745. return -EADDRNOTAVAIL;
  1746. hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
  1747. if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
  1748. return -EADDRNOTAVAIL;
  1749. cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
  1750. if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
  1751. return -EADDRINUSE;
  1752. }
  1753. cma_bind_port(bind_list, id_priv);
  1754. return 0;
  1755. }
  1756. static int cma_get_port(struct rdma_id_private *id_priv)
  1757. {
  1758. struct idr *ps;
  1759. int ret;
  1760. switch (id_priv->id.ps) {
  1761. case RDMA_PS_SDP:
  1762. ps = &sdp_ps;
  1763. break;
  1764. case RDMA_PS_TCP:
  1765. ps = &tcp_ps;
  1766. break;
  1767. case RDMA_PS_UDP:
  1768. ps = &udp_ps;
  1769. break;
  1770. case RDMA_PS_IPOIB:
  1771. ps = &ipoib_ps;
  1772. break;
  1773. default:
  1774. return -EPROTONOSUPPORT;
  1775. }
  1776. mutex_lock(&lock);
  1777. if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
  1778. ret = cma_alloc_any_port(ps, id_priv);
  1779. else
  1780. ret = cma_use_port(ps, id_priv);
  1781. mutex_unlock(&lock);
  1782. return ret;
  1783. }
  1784. int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
  1785. {
  1786. struct rdma_id_private *id_priv;
  1787. int ret;
  1788. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
  1789. return -EAFNOSUPPORT;
  1790. id_priv = container_of(id, struct rdma_id_private, id);
  1791. if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
  1792. return -EINVAL;
  1793. if (!cma_any_addr(addr)) {
  1794. ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
  1795. if (ret)
  1796. goto err1;
  1797. mutex_lock(&lock);
  1798. ret = cma_acquire_dev(id_priv);
  1799. mutex_unlock(&lock);
  1800. if (ret)
  1801. goto err1;
  1802. }
  1803. memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
  1804. ret = cma_get_port(id_priv);
  1805. if (ret)
  1806. goto err2;
  1807. return 0;
  1808. err2:
  1809. if (!cma_any_addr(addr)) {
  1810. mutex_lock(&lock);
  1811. cma_detach_from_dev(id_priv);
  1812. mutex_unlock(&lock);
  1813. }
  1814. err1:
  1815. cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
  1816. return ret;
  1817. }
  1818. EXPORT_SYMBOL(rdma_bind_addr);
  1819. static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
  1820. struct rdma_route *route)
  1821. {
  1822. struct cma_hdr *cma_hdr;
  1823. struct sdp_hh *sdp_hdr;
  1824. if (route->addr.src_addr.ss_family == AF_INET) {
  1825. struct sockaddr_in *src4, *dst4;
  1826. src4 = (struct sockaddr_in *) &route->addr.src_addr;
  1827. dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
  1828. switch (ps) {
  1829. case RDMA_PS_SDP:
  1830. sdp_hdr = hdr;
  1831. if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
  1832. return -EINVAL;
  1833. sdp_set_ip_ver(sdp_hdr, 4);
  1834. sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
  1835. sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
  1836. sdp_hdr->port = src4->sin_port;
  1837. break;
  1838. default:
  1839. cma_hdr = hdr;
  1840. cma_hdr->cma_version = CMA_VERSION;
  1841. cma_set_ip_ver(cma_hdr, 4);
  1842. cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
  1843. cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
  1844. cma_hdr->port = src4->sin_port;
  1845. break;
  1846. }
  1847. } else {
  1848. struct sockaddr_in6 *src6, *dst6;
  1849. src6 = (struct sockaddr_in6 *) &route->addr.src_addr;
  1850. dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr;
  1851. switch (ps) {
  1852. case RDMA_PS_SDP:
  1853. sdp_hdr = hdr;
  1854. if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
  1855. return -EINVAL;
  1856. sdp_set_ip_ver(sdp_hdr, 6);
  1857. sdp_hdr->src_addr.ip6 = src6->sin6_addr;
  1858. sdp_hdr->dst_addr.ip6 = dst6->sin6_addr;
  1859. sdp_hdr->port = src6->sin6_port;
  1860. break;
  1861. default:
  1862. cma_hdr = hdr;
  1863. cma_hdr->cma_version = CMA_VERSION;
  1864. cma_set_ip_ver(cma_hdr, 6);
  1865. cma_hdr->src_addr.ip6 = src6->sin6_addr;
  1866. cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
  1867. cma_hdr->port = src6->sin6_port;
  1868. break;
  1869. }
  1870. }
  1871. return 0;
  1872. }
  1873. static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
  1874. struct ib_cm_event *ib_event)
  1875. {
  1876. struct rdma_id_private *id_priv = cm_id->context;
  1877. struct rdma_cm_event event;
  1878. struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
  1879. int ret = 0;
  1880. if (cma_disable_callback(id_priv, CMA_CONNECT))
  1881. return 0;
  1882. memset(&event, 0, sizeof event);
  1883. switch (ib_event->event) {
  1884. case IB_CM_SIDR_REQ_ERROR:
  1885. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1886. event.status = -ETIMEDOUT;
  1887. break;
  1888. case IB_CM_SIDR_REP_RECEIVED:
  1889. event.param.ud.private_data = ib_event->private_data;
  1890. event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
  1891. if (rep->status != IB_SIDR_SUCCESS) {
  1892. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1893. event.status = ib_event->param.sidr_rep_rcvd.status;
  1894. break;
  1895. }
  1896. if (id_priv->qkey != rep->qkey) {
  1897. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1898. event.status = -EINVAL;
  1899. break;
  1900. }
  1901. ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
  1902. id_priv->id.route.path_rec,
  1903. &event.param.ud.ah_attr);
  1904. event.param.ud.qp_num = rep->qpn;
  1905. event.param.ud.qkey = rep->qkey;
  1906. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1907. event.status = 0;
  1908. break;
  1909. default:
  1910. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  1911. ib_event->event);
  1912. goto out;
  1913. }
  1914. ret = id_priv->id.event_handler(&id_priv->id, &event);
  1915. if (ret) {
  1916. /* Destroy the CM ID by returning a non-zero value. */
  1917. id_priv->cm_id.ib = NULL;
  1918. cma_exch(id_priv, CMA_DESTROYING);
  1919. mutex_unlock(&id_priv->handler_mutex);
  1920. rdma_destroy_id(&id_priv->id);
  1921. return ret;
  1922. }
  1923. out:
  1924. mutex_unlock(&id_priv->handler_mutex);
  1925. return ret;
  1926. }
  1927. static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
  1928. struct rdma_conn_param *conn_param)
  1929. {
  1930. struct ib_cm_sidr_req_param req;
  1931. struct rdma_route *route;
  1932. int ret;
  1933. req.private_data_len = sizeof(struct cma_hdr) +
  1934. conn_param->private_data_len;
  1935. req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  1936. if (!req.private_data)
  1937. return -ENOMEM;
  1938. if (conn_param->private_data && conn_param->private_data_len)
  1939. memcpy((void *) req.private_data + sizeof(struct cma_hdr),
  1940. conn_param->private_data, conn_param->private_data_len);
  1941. route = &id_priv->id.route;
  1942. ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
  1943. if (ret)
  1944. goto out;
  1945. id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
  1946. cma_sidr_rep_handler, id_priv);
  1947. if (IS_ERR(id_priv->cm_id.ib)) {
  1948. ret = PTR_ERR(id_priv->cm_id.ib);
  1949. goto out;
  1950. }
  1951. req.path = route->path_rec;
  1952. req.service_id = cma_get_service_id(id_priv->id.ps,
  1953. (struct sockaddr *) &route->addr.dst_addr);
  1954. req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
  1955. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  1956. ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
  1957. if (ret) {
  1958. ib_destroy_cm_id(id_priv->cm_id.ib);
  1959. id_priv->cm_id.ib = NULL;
  1960. }
  1961. out:
  1962. kfree(req.private_data);
  1963. return ret;
  1964. }
  1965. static int cma_connect_ib(struct rdma_id_private *id_priv,
  1966. struct rdma_conn_param *conn_param)
  1967. {
  1968. struct ib_cm_req_param req;
  1969. struct rdma_route *route;
  1970. void *private_data;
  1971. int offset, ret;
  1972. memset(&req, 0, sizeof req);
  1973. offset = cma_user_data_offset(id_priv->id.ps);
  1974. req.private_data_len = offset + conn_param->private_data_len;
  1975. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  1976. if (!private_data)
  1977. return -ENOMEM;
  1978. if (conn_param->private_data && conn_param->private_data_len)
  1979. memcpy(private_data + offset, conn_param->private_data,
  1980. conn_param->private_data_len);
  1981. id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
  1982. id_priv);
  1983. if (IS_ERR(id_priv->cm_id.ib)) {
  1984. ret = PTR_ERR(id_priv->cm_id.ib);
  1985. goto out;
  1986. }
  1987. route = &id_priv->id.route;
  1988. ret = cma_format_hdr(private_data, id_priv->id.ps, route);
  1989. if (ret)
  1990. goto out;
  1991. req.private_data = private_data;
  1992. req.primary_path = &route->path_rec[0];
  1993. if (route->num_paths == 2)
  1994. req.alternate_path = &route->path_rec[1];
  1995. req.service_id = cma_get_service_id(id_priv->id.ps,
  1996. (struct sockaddr *) &route->addr.dst_addr);
  1997. req.qp_num = id_priv->qp_num;
  1998. req.qp_type = IB_QPT_RC;
  1999. req.starting_psn = id_priv->seq_num;
  2000. req.responder_resources = conn_param->responder_resources;
  2001. req.initiator_depth = conn_param->initiator_depth;
  2002. req.flow_control = conn_param->flow_control;
  2003. req.retry_count = conn_param->retry_count;
  2004. req.rnr_retry_count = conn_param->rnr_retry_count;
  2005. req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2006. req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2007. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  2008. req.srq = id_priv->srq ? 1 : 0;
  2009. ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
  2010. out:
  2011. if (ret && !IS_ERR(id_priv->cm_id.ib)) {
  2012. ib_destroy_cm_id(id_priv->cm_id.ib);
  2013. id_priv->cm_id.ib = NULL;
  2014. }
  2015. kfree(private_data);
  2016. return ret;
  2017. }
  2018. static int cma_connect_iw(struct rdma_id_private *id_priv,
  2019. struct rdma_conn_param *conn_param)
  2020. {
  2021. struct iw_cm_id *cm_id;
  2022. struct sockaddr_in* sin;
  2023. int ret;
  2024. struct iw_cm_conn_param iw_param;
  2025. cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
  2026. if (IS_ERR(cm_id)) {
  2027. ret = PTR_ERR(cm_id);
  2028. goto out;
  2029. }
  2030. id_priv->cm_id.iw = cm_id;
  2031. sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
  2032. cm_id->local_addr = *sin;
  2033. sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
  2034. cm_id->remote_addr = *sin;
  2035. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2036. if (ret)
  2037. goto out;
  2038. iw_param.ord = conn_param->initiator_depth;
  2039. iw_param.ird = conn_param->responder_resources;
  2040. iw_param.private_data = conn_param->private_data;
  2041. iw_param.private_data_len = conn_param->private_data_len;
  2042. if (id_priv->id.qp)
  2043. iw_param.qpn = id_priv->qp_num;
  2044. else
  2045. iw_param.qpn = conn_param->qp_num;
  2046. ret = iw_cm_connect(cm_id, &iw_param);
  2047. out:
  2048. if (ret && !IS_ERR(cm_id)) {
  2049. iw_destroy_cm_id(cm_id);
  2050. id_priv->cm_id.iw = NULL;
  2051. }
  2052. return ret;
  2053. }
  2054. int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2055. {
  2056. struct rdma_id_private *id_priv;
  2057. int ret;
  2058. id_priv = container_of(id, struct rdma_id_private, id);
  2059. if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
  2060. return -EINVAL;
  2061. if (!id->qp) {
  2062. id_priv->qp_num = conn_param->qp_num;
  2063. id_priv->srq = conn_param->srq;
  2064. }
  2065. switch (rdma_node_get_transport(id->device->node_type)) {
  2066. case RDMA_TRANSPORT_IB:
  2067. if (cma_is_ud_ps(id->ps))
  2068. ret = cma_resolve_ib_udp(id_priv, conn_param);
  2069. else
  2070. ret = cma_connect_ib(id_priv, conn_param);
  2071. break;
  2072. case RDMA_TRANSPORT_IWARP:
  2073. ret = cma_connect_iw(id_priv, conn_param);
  2074. break;
  2075. default:
  2076. ret = -ENOSYS;
  2077. break;
  2078. }
  2079. if (ret)
  2080. goto err;
  2081. return 0;
  2082. err:
  2083. cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
  2084. return ret;
  2085. }
  2086. EXPORT_SYMBOL(rdma_connect);
  2087. static int cma_accept_ib(struct rdma_id_private *id_priv,
  2088. struct rdma_conn_param *conn_param)
  2089. {
  2090. struct ib_cm_rep_param rep;
  2091. int ret;
  2092. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2093. if (ret)
  2094. goto out;
  2095. ret = cma_modify_qp_rts(id_priv, conn_param);
  2096. if (ret)
  2097. goto out;
  2098. memset(&rep, 0, sizeof rep);
  2099. rep.qp_num = id_priv->qp_num;
  2100. rep.starting_psn = id_priv->seq_num;
  2101. rep.private_data = conn_param->private_data;
  2102. rep.private_data_len = conn_param->private_data_len;
  2103. rep.responder_resources = conn_param->responder_resources;
  2104. rep.initiator_depth = conn_param->initiator_depth;
  2105. rep.failover_accepted = 0;
  2106. rep.flow_control = conn_param->flow_control;
  2107. rep.rnr_retry_count = conn_param->rnr_retry_count;
  2108. rep.srq = id_priv->srq ? 1 : 0;
  2109. ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
  2110. out:
  2111. return ret;
  2112. }
  2113. static int cma_accept_iw(struct rdma_id_private *id_priv,
  2114. struct rdma_conn_param *conn_param)
  2115. {
  2116. struct iw_cm_conn_param iw_param;
  2117. int ret;
  2118. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2119. if (ret)
  2120. return ret;
  2121. iw_param.ord = conn_param->initiator_depth;
  2122. iw_param.ird = conn_param->responder_resources;
  2123. iw_param.private_data = conn_param->private_data;
  2124. iw_param.private_data_len = conn_param->private_data_len;
  2125. if (id_priv->id.qp) {
  2126. iw_param.qpn = id_priv->qp_num;
  2127. } else
  2128. iw_param.qpn = conn_param->qp_num;
  2129. return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
  2130. }
  2131. static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
  2132. enum ib_cm_sidr_status status,
  2133. const void *private_data, int private_data_len)
  2134. {
  2135. struct ib_cm_sidr_rep_param rep;
  2136. memset(&rep, 0, sizeof rep);
  2137. rep.status = status;
  2138. if (status == IB_SIDR_SUCCESS) {
  2139. rep.qp_num = id_priv->qp_num;
  2140. rep.qkey = id_priv->qkey;
  2141. }
  2142. rep.private_data = private_data;
  2143. rep.private_data_len = private_data_len;
  2144. return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
  2145. }
  2146. int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2147. {
  2148. struct rdma_id_private *id_priv;
  2149. int ret;
  2150. id_priv = container_of(id, struct rdma_id_private, id);
  2151. if (!cma_comp(id_priv, CMA_CONNECT))
  2152. return -EINVAL;
  2153. if (!id->qp && conn_param) {
  2154. id_priv->qp_num = conn_param->qp_num;
  2155. id_priv->srq = conn_param->srq;
  2156. }
  2157. switch (rdma_node_get_transport(id->device->node_type)) {
  2158. case RDMA_TRANSPORT_IB:
  2159. if (cma_is_ud_ps(id->ps))
  2160. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  2161. conn_param->private_data,
  2162. conn_param->private_data_len);
  2163. else if (conn_param)
  2164. ret = cma_accept_ib(id_priv, conn_param);
  2165. else
  2166. ret = cma_rep_recv(id_priv);
  2167. break;
  2168. case RDMA_TRANSPORT_IWARP:
  2169. ret = cma_accept_iw(id_priv, conn_param);
  2170. break;
  2171. default:
  2172. ret = -ENOSYS;
  2173. break;
  2174. }
  2175. if (ret)
  2176. goto reject;
  2177. return 0;
  2178. reject:
  2179. cma_modify_qp_err(id_priv);
  2180. rdma_reject(id, NULL, 0);
  2181. return ret;
  2182. }
  2183. EXPORT_SYMBOL(rdma_accept);
  2184. int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
  2185. {
  2186. struct rdma_id_private *id_priv;
  2187. int ret;
  2188. id_priv = container_of(id, struct rdma_id_private, id);
  2189. if (!cma_has_cm_dev(id_priv))
  2190. return -EINVAL;
  2191. switch (id->device->node_type) {
  2192. case RDMA_NODE_IB_CA:
  2193. ret = ib_cm_notify(id_priv->cm_id.ib, event);
  2194. break;
  2195. default:
  2196. ret = 0;
  2197. break;
  2198. }
  2199. return ret;
  2200. }
  2201. EXPORT_SYMBOL(rdma_notify);
  2202. int rdma_reject(struct rdma_cm_id *id, const void *private_data,
  2203. u8 private_data_len)
  2204. {
  2205. struct rdma_id_private *id_priv;
  2206. int ret;
  2207. id_priv = container_of(id, struct rdma_id_private, id);
  2208. if (!cma_has_cm_dev(id_priv))
  2209. return -EINVAL;
  2210. switch (rdma_node_get_transport(id->device->node_type)) {
  2211. case RDMA_TRANSPORT_IB:
  2212. if (cma_is_ud_ps(id->ps))
  2213. ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
  2214. private_data, private_data_len);
  2215. else
  2216. ret = ib_send_cm_rej(id_priv->cm_id.ib,
  2217. IB_CM_REJ_CONSUMER_DEFINED, NULL,
  2218. 0, private_data, private_data_len);
  2219. break;
  2220. case RDMA_TRANSPORT_IWARP:
  2221. ret = iw_cm_reject(id_priv->cm_id.iw,
  2222. private_data, private_data_len);
  2223. break;
  2224. default:
  2225. ret = -ENOSYS;
  2226. break;
  2227. }
  2228. return ret;
  2229. }
  2230. EXPORT_SYMBOL(rdma_reject);
  2231. int rdma_disconnect(struct rdma_cm_id *id)
  2232. {
  2233. struct rdma_id_private *id_priv;
  2234. int ret;
  2235. id_priv = container_of(id, struct rdma_id_private, id);
  2236. if (!cma_has_cm_dev(id_priv))
  2237. return -EINVAL;
  2238. switch (rdma_node_get_transport(id->device->node_type)) {
  2239. case RDMA_TRANSPORT_IB:
  2240. ret = cma_modify_qp_err(id_priv);
  2241. if (ret)
  2242. goto out;
  2243. /* Initiate or respond to a disconnect. */
  2244. if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
  2245. ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
  2246. break;
  2247. case RDMA_TRANSPORT_IWARP:
  2248. ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
  2249. break;
  2250. default:
  2251. ret = -EINVAL;
  2252. break;
  2253. }
  2254. out:
  2255. return ret;
  2256. }
  2257. EXPORT_SYMBOL(rdma_disconnect);
  2258. static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
  2259. {
  2260. struct rdma_id_private *id_priv;
  2261. struct cma_multicast *mc = multicast->context;
  2262. struct rdma_cm_event event;
  2263. int ret;
  2264. id_priv = mc->id_priv;
  2265. if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
  2266. cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
  2267. return 0;
  2268. mutex_lock(&id_priv->qp_mutex);
  2269. if (!status && id_priv->id.qp)
  2270. status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
  2271. multicast->rec.mlid);
  2272. mutex_unlock(&id_priv->qp_mutex);
  2273. memset(&event, 0, sizeof event);
  2274. event.status = status;
  2275. event.param.ud.private_data = mc->context;
  2276. if (!status) {
  2277. event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
  2278. ib_init_ah_from_mcmember(id_priv->id.device,
  2279. id_priv->id.port_num, &multicast->rec,
  2280. &event.param.ud.ah_attr);
  2281. event.param.ud.qp_num = 0xFFFFFF;
  2282. event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
  2283. } else
  2284. event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
  2285. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2286. if (ret) {
  2287. cma_exch(id_priv, CMA_DESTROYING);
  2288. mutex_unlock(&id_priv->handler_mutex);
  2289. rdma_destroy_id(&id_priv->id);
  2290. return 0;
  2291. }
  2292. mutex_unlock(&id_priv->handler_mutex);
  2293. return 0;
  2294. }
  2295. static void cma_set_mgid(struct rdma_id_private *id_priv,
  2296. struct sockaddr *addr, union ib_gid *mgid)
  2297. {
  2298. unsigned char mc_map[MAX_ADDR_LEN];
  2299. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2300. struct sockaddr_in *sin = (struct sockaddr_in *) addr;
  2301. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
  2302. if (cma_any_addr(addr)) {
  2303. memset(mgid, 0, sizeof *mgid);
  2304. } else if ((addr->sa_family == AF_INET6) &&
  2305. ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
  2306. 0xFF10A01B)) {
  2307. /* IPv6 address is an SA assigned MGID. */
  2308. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  2309. } else {
  2310. ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
  2311. if (id_priv->id.ps == RDMA_PS_UDP)
  2312. mc_map[7] = 0x01; /* Use RDMA CM signature */
  2313. *mgid = *(union ib_gid *) (mc_map + 4);
  2314. }
  2315. }
  2316. static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
  2317. struct cma_multicast *mc)
  2318. {
  2319. struct ib_sa_mcmember_rec rec;
  2320. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2321. ib_sa_comp_mask comp_mask;
  2322. int ret;
  2323. ib_addr_get_mgid(dev_addr, &rec.mgid);
  2324. ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
  2325. &rec.mgid, &rec);
  2326. if (ret)
  2327. return ret;
  2328. cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
  2329. if (id_priv->id.ps == RDMA_PS_UDP)
  2330. rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
  2331. ib_addr_get_sgid(dev_addr, &rec.port_gid);
  2332. rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  2333. rec.join_state = 1;
  2334. comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
  2335. IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
  2336. IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
  2337. IB_SA_MCMEMBER_REC_FLOW_LABEL |
  2338. IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
  2339. if (id_priv->id.ps == RDMA_PS_IPOIB)
  2340. comp_mask |= IB_SA_MCMEMBER_REC_RATE |
  2341. IB_SA_MCMEMBER_REC_RATE_SELECTOR;
  2342. mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
  2343. id_priv->id.port_num, &rec,
  2344. comp_mask, GFP_KERNEL,
  2345. cma_ib_mc_handler, mc);
  2346. if (IS_ERR(mc->multicast.ib))
  2347. return PTR_ERR(mc->multicast.ib);
  2348. return 0;
  2349. }
  2350. int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
  2351. void *context)
  2352. {
  2353. struct rdma_id_private *id_priv;
  2354. struct cma_multicast *mc;
  2355. int ret;
  2356. id_priv = container_of(id, struct rdma_id_private, id);
  2357. if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
  2358. !cma_comp(id_priv, CMA_ADDR_RESOLVED))
  2359. return -EINVAL;
  2360. mc = kmalloc(sizeof *mc, GFP_KERNEL);
  2361. if (!mc)
  2362. return -ENOMEM;
  2363. memcpy(&mc->addr, addr, ip_addr_size(addr));
  2364. mc->context = context;
  2365. mc->id_priv = id_priv;
  2366. spin_lock(&id_priv->lock);
  2367. list_add(&mc->list, &id_priv->mc_list);
  2368. spin_unlock(&id_priv->lock);
  2369. switch (rdma_node_get_transport(id->device->node_type)) {
  2370. case RDMA_TRANSPORT_IB:
  2371. ret = cma_join_ib_multicast(id_priv, mc);
  2372. break;
  2373. default:
  2374. ret = -ENOSYS;
  2375. break;
  2376. }
  2377. if (ret) {
  2378. spin_lock_irq(&id_priv->lock);
  2379. list_del(&mc->list);
  2380. spin_unlock_irq(&id_priv->lock);
  2381. kfree(mc);
  2382. }
  2383. return ret;
  2384. }
  2385. EXPORT_SYMBOL(rdma_join_multicast);
  2386. void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
  2387. {
  2388. struct rdma_id_private *id_priv;
  2389. struct cma_multicast *mc;
  2390. id_priv = container_of(id, struct rdma_id_private, id);
  2391. spin_lock_irq(&id_priv->lock);
  2392. list_for_each_entry(mc, &id_priv->mc_list, list) {
  2393. if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
  2394. list_del(&mc->list);
  2395. spin_unlock_irq(&id_priv->lock);
  2396. if (id->qp)
  2397. ib_detach_mcast(id->qp,
  2398. &mc->multicast.ib->rec.mgid,
  2399. mc->multicast.ib->rec.mlid);
  2400. ib_sa_free_multicast(mc->multicast.ib);
  2401. kfree(mc);
  2402. return;
  2403. }
  2404. }
  2405. spin_unlock_irq(&id_priv->lock);
  2406. }
  2407. EXPORT_SYMBOL(rdma_leave_multicast);
  2408. static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
  2409. {
  2410. struct rdma_dev_addr *dev_addr;
  2411. struct cma_ndev_work *work;
  2412. dev_addr = &id_priv->id.route.addr.dev_addr;
  2413. if ((dev_addr->src_dev == ndev) &&
  2414. memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
  2415. printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
  2416. ndev->name, &id_priv->id);
  2417. work = kzalloc(sizeof *work, GFP_KERNEL);
  2418. if (!work)
  2419. return -ENOMEM;
  2420. INIT_WORK(&work->work, cma_ndev_work_handler);
  2421. work->id = id_priv;
  2422. work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
  2423. atomic_inc(&id_priv->refcount);
  2424. queue_work(cma_wq, &work->work);
  2425. }
  2426. return 0;
  2427. }
  2428. static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
  2429. void *ctx)
  2430. {
  2431. struct net_device *ndev = (struct net_device *)ctx;
  2432. struct cma_device *cma_dev;
  2433. struct rdma_id_private *id_priv;
  2434. int ret = NOTIFY_DONE;
  2435. if (dev_net(ndev) != &init_net)
  2436. return NOTIFY_DONE;
  2437. if (event != NETDEV_BONDING_FAILOVER)
  2438. return NOTIFY_DONE;
  2439. if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
  2440. return NOTIFY_DONE;
  2441. mutex_lock(&lock);
  2442. list_for_each_entry(cma_dev, &dev_list, list)
  2443. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  2444. ret = cma_netdev_change(ndev, id_priv);
  2445. if (ret)
  2446. goto out;
  2447. }
  2448. out:
  2449. mutex_unlock(&lock);
  2450. return ret;
  2451. }
  2452. static struct notifier_block cma_nb = {
  2453. .notifier_call = cma_netdev_callback
  2454. };
  2455. static void cma_add_one(struct ib_device *device)
  2456. {
  2457. struct cma_device *cma_dev;
  2458. struct rdma_id_private *id_priv;
  2459. cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
  2460. if (!cma_dev)
  2461. return;
  2462. cma_dev->device = device;
  2463. init_completion(&cma_dev->comp);
  2464. atomic_set(&cma_dev->refcount, 1);
  2465. INIT_LIST_HEAD(&cma_dev->id_list);
  2466. ib_set_client_data(device, &cma_client, cma_dev);
  2467. mutex_lock(&lock);
  2468. list_add_tail(&cma_dev->list, &dev_list);
  2469. list_for_each_entry(id_priv, &listen_any_list, list)
  2470. cma_listen_on_dev(id_priv, cma_dev);
  2471. mutex_unlock(&lock);
  2472. }
  2473. static int cma_remove_id_dev(struct rdma_id_private *id_priv)
  2474. {
  2475. struct rdma_cm_event event;
  2476. enum cma_state state;
  2477. int ret = 0;
  2478. /* Record that we want to remove the device */
  2479. state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
  2480. if (state == CMA_DESTROYING)
  2481. return 0;
  2482. cma_cancel_operation(id_priv, state);
  2483. mutex_lock(&id_priv->handler_mutex);
  2484. /* Check for destruction from another callback. */
  2485. if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
  2486. goto out;
  2487. memset(&event, 0, sizeof event);
  2488. event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
  2489. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2490. out:
  2491. mutex_unlock(&id_priv->handler_mutex);
  2492. return ret;
  2493. }
  2494. static void cma_process_remove(struct cma_device *cma_dev)
  2495. {
  2496. struct rdma_id_private *id_priv;
  2497. int ret;
  2498. mutex_lock(&lock);
  2499. while (!list_empty(&cma_dev->id_list)) {
  2500. id_priv = list_entry(cma_dev->id_list.next,
  2501. struct rdma_id_private, list);
  2502. list_del(&id_priv->listen_list);
  2503. list_del_init(&id_priv->list);
  2504. atomic_inc(&id_priv->refcount);
  2505. mutex_unlock(&lock);
  2506. ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
  2507. cma_deref_id(id_priv);
  2508. if (ret)
  2509. rdma_destroy_id(&id_priv->id);
  2510. mutex_lock(&lock);
  2511. }
  2512. mutex_unlock(&lock);
  2513. cma_deref_dev(cma_dev);
  2514. wait_for_completion(&cma_dev->comp);
  2515. }
  2516. static void cma_remove_one(struct ib_device *device)
  2517. {
  2518. struct cma_device *cma_dev;
  2519. cma_dev = ib_get_client_data(device, &cma_client);
  2520. if (!cma_dev)
  2521. return;
  2522. mutex_lock(&lock);
  2523. list_del(&cma_dev->list);
  2524. mutex_unlock(&lock);
  2525. cma_process_remove(cma_dev);
  2526. kfree(cma_dev);
  2527. }
  2528. static int cma_init(void)
  2529. {
  2530. int ret, low, high, remaining;
  2531. get_random_bytes(&next_port, sizeof next_port);
  2532. inet_get_local_port_range(&low, &high);
  2533. remaining = (high - low) + 1;
  2534. next_port = ((unsigned int) next_port % remaining) + low;
  2535. cma_wq = create_singlethread_workqueue("rdma_cm");
  2536. if (!cma_wq)
  2537. return -ENOMEM;
  2538. ib_sa_register_client(&sa_client);
  2539. rdma_addr_register_client(&addr_client);
  2540. register_netdevice_notifier(&cma_nb);
  2541. ret = ib_register_client(&cma_client);
  2542. if (ret)
  2543. goto err;
  2544. return 0;
  2545. err:
  2546. unregister_netdevice_notifier(&cma_nb);
  2547. rdma_addr_unregister_client(&addr_client);
  2548. ib_sa_unregister_client(&sa_client);
  2549. destroy_workqueue(cma_wq);
  2550. return ret;
  2551. }
  2552. static void cma_cleanup(void)
  2553. {
  2554. ib_unregister_client(&cma_client);
  2555. unregister_netdevice_notifier(&cma_nb);
  2556. rdma_addr_unregister_client(&addr_client);
  2557. ib_sa_unregister_client(&sa_client);
  2558. destroy_workqueue(cma_wq);
  2559. idr_destroy(&sdp_ps);
  2560. idr_destroy(&tcp_ps);
  2561. idr_destroy(&udp_ps);
  2562. idr_destroy(&ipoib_ps);
  2563. }
  2564. module_init(cma_init);
  2565. module_exit(cma_cleanup);